repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
MingtaoGuo/RetinaNet_TensorFlow
|
[
"e46a8db9aef3da77de5daa32663c6c5dbb661d11"
] |
[
"test.py"
] |
[
"from networks import backbone\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport cv2\r\n\r\nfrom utils import generate_anchors, draw_bbox, recover_ImgAndBbox_scale\r\nfrom config import IMG_H, IMG_W, CLASSES, K\r\nfrom ops import top_k_score_bbox, sigmoid, offset2bbox\r\n\r\nanchors_p3 = generate_anchors(area=32, stride=8)\r\nanchors_p4 = generate_anchors(area=64, stride=16)\r\nanchors_p5 = generate_anchors(area=128, stride=32)\r\nanchors_p6 = generate_anchors(area=256, stride=64)\r\nanchors_p7 = generate_anchors(area=512, stride=128)\r\nanchors = np.concatenate((anchors_p3, anchors_p4, anchors_p5, anchors_p6, anchors_p7), axis=0)\r\n\r\nclass Inference():\r\n def __init__(self):\r\n self.inputs = tf.placeholder(tf.float32, [None, IMG_H, IMG_W, 3])\r\n self.is_training = tf.placeholder(tf.bool)\r\n _, _, class_logits_dict, box_logits_dict = backbone(self.inputs, self.is_training)\r\n\r\n class_logits_dict[\"P3\"], class_logits_dict[\"P4\"], class_logits_dict[\"P5\"], class_logits_dict[\"P6\"], class_logits_dict[\"P7\"] = \\\r\n tf.reshape(class_logits_dict[\"P3\"], [-1, K]), tf.reshape(class_logits_dict[\"P4\"], [-1, K]), tf.reshape(class_logits_dict[\"P5\"], [-1, K]), \\\r\n tf.reshape(class_logits_dict[\"P6\"], [-1, K]), tf.reshape(class_logits_dict[\"P7\"], [-1, K])\r\n\r\n box_logits_dict[\"P3\"], box_logits_dict[\"P4\"], box_logits_dict[\"P5\"], box_logits_dict[\"P6\"], box_logits_dict[\"P7\"] = \\\r\n tf.reshape(box_logits_dict[\"P3\"], [-1, 4]), tf.reshape(box_logits_dict[\"P4\"], [-1, 4]), tf.reshape(box_logits_dict[\"P5\"], [-1, 4]), \\\r\n tf.reshape(box_logits_dict[\"P6\"], [-1, 4]), tf.reshape(box_logits_dict[\"P7\"], [-1, 4])\r\n\r\n P3_class_pred, P4_class_pred, P5_class_pred, P6_class_pred, P7_class_pred = \\\r\n sigmoid(class_logits_dict[\"P3\"]), sigmoid(class_logits_dict[\"P4\"]), sigmoid(class_logits_dict[\"P5\"]), sigmoid(class_logits_dict[\"P6\"]), sigmoid(class_logits_dict[\"P7\"])\r\n\r\n P3_bbox_pred, P4_bbox_pred, P5_bbox_pred, P6_bbox_pred, P7_bbox_pred = \\\r\n box_logits_dict[\"P3\"], box_logits_dict[\"P4\"], box_logits_dict[\"P5\"], box_logits_dict[\"P6\"], box_logits_dict[\"P7\"]\r\n # thresholding confidence at 0.05 at most 1000 top k score\r\n P3_topK_score, P3_topK_bbox, P3_topK_anchors, P3_topK_class = top_k_score_bbox(P3_class_pred, P3_bbox_pred, anchors_p3, threshold=0.05, k=1000)\r\n P4_topK_score, P4_topK_bbox, P4_topK_anchors, P4_topK_class = top_k_score_bbox(P4_class_pred, P4_bbox_pred, anchors_p4, threshold=0.05, k=1000)\r\n P5_topK_score, P5_topK_bbox, P5_topK_anchors, P5_topK_class = top_k_score_bbox(P5_class_pred, P5_bbox_pred, anchors_p5, threshold=0.05, k=1000)\r\n P6_topK_score, P6_topK_bbox, P6_topK_anchors, P6_topK_class = top_k_score_bbox(P6_class_pred, P6_bbox_pred, anchors_p6, threshold=0.05, k=1000)\r\n P7_topK_score, P7_topK_bbox, P7_topK_anchors, P7_topK_class = top_k_score_bbox(P7_class_pred, P7_bbox_pred, anchors_p7, threshold=0.05, k=1000)\r\n\r\n self.topK_score = tf.concat([P3_topK_score, P4_topK_score, P5_topK_score, P6_topK_score, P7_topK_score], axis=0)\r\n self.topK_bbox = tf.concat([P3_topK_bbox, P4_topK_bbox, P5_topK_bbox, P6_topK_bbox, P7_topK_bbox], axis=0)\r\n self.topK_anchors = tf.concat([P3_topK_anchors, P4_topK_anchors, P5_topK_anchors, P6_topK_anchors, P7_topK_anchors], axis=0)\r\n self.topK_class = tf.concat([P3_topK_class, P4_topK_class, P5_topK_class, P6_topK_class, P7_topK_class], axis=0)\r\n\r\n self.bbox = offset2bbox(self.topK_anchors, self.topK_bbox)\r\n self.nms_idx = tf.image.non_max_suppression(self.bbox, self.topK_score, max_output_size=300)\r\n\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n\r\n saver = tf.train.Saver()\r\n saver.restore(self.sess, \"./model/model.ckpt\")\r\n\r\n def __call__(self, IMG):\r\n IMG_ = np.array(Image.fromarray(IMG).resize([IMG_W, IMG_H]))\r\n [NMS_IDX, BBOX, TOPK_CLASS, TOPK_SCORE] = self.sess.run([self.nms_idx, self.bbox, self.topK_class, self.topK_score], feed_dict={self.inputs: IMG_[np.newaxis] / 127.5 - 1.0, self.is_training: True})\r\n for i in NMS_IDX:\r\n if TOPK_SCORE[i] > 0.5:\r\n IMG = draw_bbox(IMG, recover_ImgAndBbox_scale(IMG, BBOX[i]), CLASSES[TOPK_CLASS[i]])\r\n # IMG_ = draw_bbox(IMG_, np.int32(BBOX[i]), CLASSES[TOPK_CLASS[i]])\r\n return IMG\r\n\r\ndef detect_video(vid_path, inference):\r\n cap = cv2.VideoCapture(vid_path)\r\n while cap.isOpened():\r\n ret, frame = cap.read()\r\n if ret == True:\r\n frame = np.array(frame)\r\n frame = np.array(Image.fromarray(frame).rotate(270))\r\n frame = inference(frame)\r\n cv2.imshow(\"Frame\", np.uint8((frame)))\r\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\r\n break\r\n else:\r\n break\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == \"__main__\":\r\n IMG_PATH = \"C:/Users/gmt/Desktop/cats/65.jpg\"\r\n inference = Inference()\r\n # VIDEO_PATH = \"C:/Users/gmt/Desktop/test2.mp4\"\r\n # detect_video(VIDEO_PATH, inference)\r\n IMG = np.array(Image.open(IMG_PATH))\r\n IMG = inference(IMG)\r\n Image.fromarray(IMG).show()\r\n Image.fromarray(IMG).save(\"1.jpg\")\r\n"
] |
[
[
"tensorflow.concat",
"tensorflow.image.non_max_suppression",
"numpy.uint8",
"tensorflow.reshape",
"tensorflow.placeholder",
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
jmark/turbubox
|
[
"17fd3214ad4cb0c360bdb628d7bd270e8b00aadc",
"17fd3214ad4cb0c360bdb628d7bd270e8b00aadc",
"17fd3214ad4cb0c360bdb628d7bd270e8b00aadc",
"17fd3214ad4cb0c360bdb628d7bd270e8b00aadc"
] |
[
"tools/lib/hopr.py",
"tools/lib/flexi.py",
"sandbox/plot/dens-mach.py",
"tools/lib/gausslobatto.py"
] |
[
"import h5\nimport numpy as np\n\nclass MeshFile:\n def __init__(self,fpath, mode='r'):\n self.h5file = h5.File(fpath,mode)\n self.elemInfo = self.get('ElemInfo')\n self.nodeCoords = self.get('NodeCoords')\n\n self.domain = np.array([\n [self.nodeCoords[:,i].min() for i in range(0,3)],\n [self.nodeCoords[:,i].max() for i in range(0,3)]\n ])\n\n self.domainsize = np.abs(self.domain[1]-self.domain[0])\n\n def get(self,dname, copy=True):\n return self.h5file.get(dname)\n\n def close(self):\n self.h5file.close()\n\nclass CartesianMeshFile(MeshFile):\n def __init__(self,fpath, mode='r'):\n super().__init__(fpath, mode)\n\n self.elemTypes = np.unique(self.elemInfo[:,0])\n if len(self.elemTypes) > 1:\n raise AssertionError('multiple element types detected: %s' % self.elemTypes)\n if self.elemTypes[0] != 108:\n raise AssertionError(\"type of all elements must be '108 aka. cube'\")\n\n self.cellsize = np.abs(self.nodeCoords[7]-self.nodeCoords[0]) # just take first element\n self.gridsize = (self.domainsize // self.cellsize).astype(np.int)\n self.nrelems = len(self.elemInfo)\n\n # better alias\n self.elemsize = self.cellsize\n self.meshshape = self.gridsize\n\n self.elemcoords = (self.nodeCoords[:-7:8,:], self.nodeCoords[7::8,:])\n \n def get_cell_coords(self):\n return (self.nodeCoords[:-7:8,:], self.nodeCoords[7::8,:])\n",
"import h5\nimport numpy as np\nimport ulz\nimport interpolate\nimport gausslobatto\nimport hopr\nimport os\n\nclass File:\n def __init__(self, fpath, mesh, mode='r'):\n self.h5file = h5.File(fpath, mode)\n self.data = self.h5file.get('DG_Solution')\n self.mesh = self.hopr = mesh\n self.attr = self.h5file.get(\"/\").attrs\n self.nodetype = self.attr['NodeType'][0].decode('utf-8').lower()\n self.Npoly = self.attr['N'][0]\n self.Nout = len(self.data[0,:,0,0,0])\n self.time = self.attr['Time'][0]\n\n self.var2idx = dict((k,v) for v,k in enumerate('dens momx momy momz ener magx magy magz'.split()))\n\n self.domain = self.mesh.domain\n self.domainsize = self.mesh.domainsize \n self.cellsize = self.mesh.cellsize\n self.cellvolume = np.prod(self.cellsize)\n \n def as_box(self, iVar, Nvisu=None):\n if isinstance(iVar, str): iVar = self.var2idx[iVar]\n if Nvisu is None: Nvisu = self.Nout\n\n xs = gausslobatto.mk_nodes(self.Nout-1, self.nodetype)\n Xs = ulz.mk_body_centered_linspace(-1,1, Nvisu)\n\n elems = interpolate.change_grid_space(self.data[:,:,:,:,iVar].transpose(0,3,2,1),xs,Xs)\n return interpolate.elements_to_box(elems, self.mesh)\n\n def as_box_fv(self, iVar, Nvisu=None):\n \"\"\"This routine works the same as 'as_box' but recognizes finite-volume cells and\n treats them appropiately.\"\"\"\n\n if Nvisu is None:\n Nvisu = self.Nout\n\n # Which elements are FV cells?\n elemData = self.h5file.get('ElemData')\n FVs = elemData[:,2].astype(np.int32)\n \n xs = gausslobatto.mk_nodes(self.Nout-1, self.nodetype)\n Xs = ulz.mk_body_centered_linspace(-1,1, Nvisu)\n\n elems = interpolate.change_grid_space_dg_fv(self.data[:,:,:,:,iVar].transpose(0,3,2,1),xs,Xs,FVs)\n return interpolate.elements_to_box(elems, self.mesh)\n\n def get_prims(self, Nvisu=None, cons2prim=ulz.navier_conservative_to_primitive, gamma=5/3):\n if Nvisu is None:\n Nvisu = self.Nout\n\n # Which elements are FV cells?\n elemData = self.h5file.get('ElemData')\n FVs = elemData[:,2]\n \n xs = gausslobatto.mk_nodes(self.Nout-1, self.nodetype)\n Xs = ulz.mk_body_centered_linspace(-1,1, Nvisu)\n\n cons = [self.data[:,:,:,:,i] for i in range(0,len(self.varnames))] \n prims = cons2prim(cons, gamma)\n\n retval = []\n for iVar in range(0,len(self.varnames)):\n elems = interpolate.change_grid_space_dg_fv(prims[iVar].transpose(0,3,2,1),xs,Xs,FVs)\n retval.append(interpolate.elements_to_box(elems, self.mesh))\n\n return retval\n\n def get_cons(self, Nvisu=None):\n if Nvisu is None:\n Nvisu = self.Nout\n\n # Which elements are FV cells?\n elemData = self.h5file.get('ElemData')\n FVs = elemData[:,2]\n \n xs = ulz.mk_body_centered_linspace(-1,1, Nvisu)\n Xs = gausslobatto.mk_nodes(self.Nout-1, self.nodetype)\n\n cons = [self.data[:,:,:,:,i] for i in range(0,len(self.varnames))] \n\n retval = []\n for iVar in range(0,len(self.varnames)):\n elems = interpolate.change_grid_space_fv_dg(cons[iVar].transpose(0,3,2,1),xs,Xs,FVs)\n retval.append(interpolate.elements_to_box(elems, self.mesh))\n\n return retval\n\n def close(self):\n self.h5file.close()\n\n # provide context manager interface\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.close()\n if isinstance(value,Exception):\n raise\n\n # convenience methods\n def flexi_to_box(self, iVar, Nvisu=None):\n return self.as_box(iVar, Nvisu)\n\n def get_cons(self, Nvisu=None):\n return [self.as_box_fv(i, Nvisu) for i in range(0,len(self.varnames))]\n",
"#!/usr/bin/env pyturbubox\n\nimport os\nimport box as dbox\nimport sys\nimport numpy as np\nimport periodicbox, ulz # jmark\nimport pickle\nimport pathlib as pl\nfrom collections import namedtuple\nimport multiprocessing as mpr\nimport matplotlib\nmatplotlib.use('Agg')\nmatplotlib.rcParams.update({'font.size': 20})\nfrom matplotlib import pyplot as plt\n\n## ========================================================================= ##\n## process commandline arguments\n\nimport argparse\n\npp = argparse.ArgumentParser(description = 'FLEXI Batch Plotter')\n\npp.add_argument(\n '--destdir',\n help='path to store: <dir>/%%03d.png',\n type=pl.Path, required=True,\n)\n\npp.add_argument(\n '--title',\n type=str,\n)\n\npp.add_argument(\n '--cachedir',\n help='path to cache min max calculations',\n type=pl.Path,\n)\n\npp.add_argument(\n '--parallel',\n help='enable parallel processes: 0 --> max. n procs, > 0 --> set n procs',\n type=int,\n default=-1,\n)\n\npp.add_argument(\n '--crosstime',\n help='crossing time scale: 1.0 (default)',\n type=float,\n default=1.0,\n)\n\npp.add_argument(\n '--gather_min_max',\n help='flag to gather min max values beforehand',\n action='store_true',\n)\n\npp.add_argument(\n '--ntasks',\n help='number of individual tasks',\n type=int,\n)\n\npp.add_argument(\n '--crange',\n help='color range in the format \"cdens = (-1,1), cmach = (-2,2)\"',\n)\n\npp.add_argument(\n 'snapshots',\n help='list of snapshot files',\n type=pl.Path,nargs='*',\n)\n\nARGV = pp.parse_args()\n\n## ========================================================================= ##\n## define tasks \n\n# interfaces used over subroutines below\nData = namedtuple('Data', 'taskID time dyntime cdens cmach')\nCRange = namedtuple('CRange', 'cdens cico cipc cmach')\n\ndef calc_data(srcfp):\n box = periodicbox.File(srcfp.as_posix(), mode='r')\n\n dens, velx, vely, velz, pres = box.get_prims()\n\n ico = box.get_data('ih2 ')\n icp = box.get_data('tdus')\n\n dens[dens < 0] = 1e-5\n pres[pres < 0] = 1e-5\n\n mach = np.sqrt(velx**2+vely**2+velz**2)/np.sqrt(pres/dens)\n\n ax = 2\n return dbox.Box(\n taskID = -1,\n time = box.time,\n dyntime = box.time / ARGV.crosstime,\n cdens = np.log10(np.sum(dens,axis=ax)),\n cmach = np.log10(np.mean(mach,axis=ax)),\n #cico = np.log10(np.sum(ico,axis=ax)),\n #cicp = np.log10(np.sum(ico,axis=ax)),\n cico = ico[:,:,64],\n cicp = icp[:,:,64],\n )\n\ndef min_max(taskID, srcfp):\n data = calc_data(srcfp)\n\n result = Data(\n taskID = taskID, time = data.time, dyntime = data.dyntime,\n cdens = ( np.min(data.cdens), np.max(data.cdens) ),\n cmach = ( np.min(data.cmach), np.max(data.cmach) ),\n )\n\n print(ulz.flatten_dict(result._asdict()), flush=True)\n \n return result\n\ndef mkplot(taskID, srcfp, crange):\n proc = mpr.current_process()\n data = calc_data(srcfp)\n print('Finnished: ', srcfp, flush=True)\n\n subplt = [1,2,0]\n fig = plt.figure(figsize=(20, 9.5))\n\n frameid = taskID+1\n nframes = ARGV.ntasks if ARGV.ntasks else len(ARGV.snapshots)\n\n if ARGV.title is None:\n title = \"simtime: {:1.4e} | frame: {:03d}/{:03d}\".format(data.time, frameid, nframes)\n #title = \"dyntime: % 2.4f | frame: %03d/%03d\" % (data.dyntime/ ARGV.crosstime, frameid, nframes)\n else:\n title = \"%s (dyntime: % 2.4f | frame: %03d/%03d)\" % (ARGV.title, data.dyntime, frameid, nframes)\n\n plt.suptitle(title, fontsize='x-large').set_y(1.01)\n\n def plot(data, crange, title):\n subplt[2] += 1\n ax = fig.add_subplot(*subplt)\n ax.set_title(title)\n ax.set_xlabel('x index')\n ax.set_ylabel('y index')\n\n img = ax.imshow(data,\n #vmin = crange[0],\n #vmax = crange[1],\n interpolation = 'none',\n cmap = plt.get_cmap('cubehelix'),\n )\n\n plt.colorbar(img,fraction=0.0456, pad=0.04, format='%1.2f')\n\n #plot(data.cdens, crange.cdens, title='column sum density (log10)')\n plot(data.cico, crange.cico, title='column sum ico (log10)')\n plot(data.cico, crange.cico, title='column sum icp (log10)')\n #plot(data.cmach, crange.cmach, title='column mean mach number (log10)')\n\n fig.tight_layout()\n\n plotfp = ARGV.destdir / srcfp.with_suffix('.png').name\n plt.savefig(str(plotfp), bbox_inches='tight')\n plt.close()\n print('Finnished: ', str(plotfp), flush=True)\n\n## ========================================================================= ##\n## activate caching\n\nif ARGV.cachedir:\n class mask: pass\n\n mask.calc_data = calc_data\n def calc_data(srcfp):\n cachefp = ARGV.cachedir / srcfp.with_suffix('.cdata.cache.pickle').name\n print('Processing: ', cachefp, flush=True)\n return ulz.cache(srcfp, cachefp, mask.calc_data, srcfp)\n\n mask.min_max = min_max\n def min_max(taskID, srcfp):\n cachefp = ARGV.cachedir / srcfp.with_suffix('.minmax.cache.pickle').name\n print('Processing: ', cachefp, flush=True)\n retval = ulz.cache(srcfp.as_posix(), cachefp.as_posix(), mask.min_max, taskID, srcfp)\n print('Finnished: ', cachefp, flush=True)\n return retval\n\n mask.mkplot = mkplot\n def mkplot(taskID, srcfp, crange):\n plotfp = ARGV.destdir / srcfp.with_suffix('.png').name\n print('Processing: ', plotfp, flush=True)\n if plotfp.exists() and plotfp.stat().st_mtime > srcfp.stat().st_mtime:\n return\n return mask.mkplot(taskID, srcfp, crange)\n\n## ========================================================================= ##\n## set color range defaults\n\ncrange = {k: (None,None) for k in CRange._fields}\n\nif ARGV.crange:\n # DANGER: using 'eval' on tainted data poses a security risk\n crange.update(eval('dict(%s)' % ARGV.crange))\n\ncrange = CRange(**crange)\n\n## ========================================================================= ##\n## gather minimun and maximum values\n\nif ARGV.gather_min_max:\n if ARGV.parallel >= 0:\n def task(args):\n return min_max(*args)\n nprocs = None if ARGV.parallel == 0 else ARGV.parallel\n tmp = mp.Pool(nprocs).map(task,enumerate(ARGV.snapshots))\n else:\n tmp = [min_max(i,x) for i,x in enumerate(ARGV.snapshots)]\n\n def sanitize(dname, i):\n return [x for x in (getattr(X,dname)[i] for X in tmp) if not np.isnan(x) or np.isinf(x)]\n\n crange = CRange(\n cdens = ( np.min(sanitize('cdens',0)), np.max(sanitize('cdens',1)) ),\n cmach = ( np.min(sanitize('cmach',0)), np.max(sanitize('cmach',1)) ),\n )\n\n## ========================================================================= ##\n## do plotting\n\nif ARGV.parallel >= 0:\n def task(args):\n return mkplot(args[0], args[1], crange)\n nprocs = None if ARGV.parallel == 0 else ARGV.parallel\n mpr.Pool(nprocs,maxtasksperchild=1).map(task,enumerate(ARGV.snapshots))\nelse:\n [mkplot(i,x,crange) for i,x in enumerate(ARGV.snapshots)]\n",
"import numpy as np\nimport math\nimport itertools\n\neps = np.finfo(float).eps\nnit,TOL = 4,4*eps\n\ndef shiftMatrix(nrows,ncols,inmat):\n exmat = np.zeros(inmat.shape)\n for i in range(nrows,inmat.shape[0]):\n for j in range(ncols,inmat.shape[1]):\n exmat[i,j] = inmat[i-nrows,j-ncols]\n return exmat\n\ndef upAndDownScaleMat(N):\n tempMat = np.zeros((N,N))\n tempMat[0,0] = 1.0\n tempMat[1,0] = 1.0\n\n rfaceMat = np.zeros((2,N,N))\n for f in range(2):\n for i in range(N//2):\n rfaceMat[f] = rfaceMat[f] + shiftMatrix(2*i,f*N//2+i,tempMat)\n \n cfaceMat = np.zeros((2,N,N))\n for f in range(2):\n cfaceMat[f] = 0.5*np.transpose(rfaceMat[f])\n\ndef mk_body_centered_linspace(infimum, supremum, nNodes, withBoundaryNodes=False):\n \"\"\"\n Make regular body centered linear space w/ or w/o neighboring boundary nodes.\n \"\"\"\n\n domsize = np.abs(supremum - infimum)\n offset = domsize / nNodes / 2\n\n if withBoundaryNodes:\n nNodes = nNodes + 2\n infimum = infimum - offset\n supremum = supremum + offset\n else:\n infimum = infimum + offset\n supremum = supremum - offset\n\n return np.linspace(infimum, supremum, nNodes, endpoint=True)\n\ndef BarycentricWeight(xs,j):\n acc = 1\n for i in range(len(xs)):\n if i==j: continue\n acc *= xs[j]-xs[i]\n return 1/acc\n\ndef BarycentricWeights(xs):\n n = len(xs)\n ws = np.empty(n)\n for j in range(n):\n ws[j] = BarycentricWeight(xs,j) \n return ws\n\ndef LagrangePolynomial(xs,j,x):\n acc = 1\n for i in range(len(xs)):\n if i==j: continue\n acc *= (x-xs[i])/(xs[j]-xs[i])\n return acc\n\ndef BarycentricPolynomial(xs,ws,fs,x):\n numerator,denominator = 0,0\n for j in range(len(xs)):\n diff = x-xs[j]\n if abs(diff) <= eps: return fs[j]\n numerator += fs[j]*ws[j]/diff\n denominator += ws[j]/diff\n return numerator/denominator\n\ndef DiffMatrix(xs):\n ws = BarycentricWeights(xs)\n n = len(xs)\n M = np.zeros([n,n])\n for i in range(n):\n for j in range(n):\n if i != j:\n M[i,j] = ws[j]/(ws[i]*(xs[i]-xs[j]))\n else:\n acc = 0\n for k in range(n):\n if i == k: continue\n acc += ws[k]/(xs[i]-xs[k]) \n M[i,i] = -acc/ws[i]\n return M\n\ndef MassMatrix(ws):\n return np.diagflat(ws)\n\ndef LagrangePolynomialDerivative(xs,j,x):\n ACC = 0\n for i in range(len(xs)):\n if i == j: continue\n acc = 1\n for m in range(len(xs)):\n if m == i or m == j: continue\n acc *= (x-xs[m])/(xs[j]-xs[m])\n ACC += acc/(xs[j]-xs[i])\n return ACC\n\n# =========================================================================== #\n\ndef LegendrePolynomialAndDerivative(N,x,doNormalize=False):\n if N == 0:\n LN, LND = 1, 0\n elif N == 1:\n LN, LND = x, 1\n else:\n LN_2, LN_1 = 1, x\n LND_2, LND_1 = 0, 1\n\n for k in range(2,N+1):\n LN = (2*k-1)/k * x * LN_1 - (k-1)/k * LN_2\n LND = LND_2 + (2*k-1) * LN_1\n LN_2, LN_1 = LN_1, LN\n LND_2, LND_1 = LND_1, LND\n\n if doNormalize:\n LN *= np.sqrt(N + 0.5)\n LND *= np.sqrt(N + 0.5)\n\n return (LN,LND)\n\ndef LegendreGaussNodesAndWeights(N):\n \"\"\" Compute the nodes (roots of the Legendre Polynomial) and weights for\n the Legendre-Gauss-Quadrature. \"\"\"\n if N == 0: return (np.array([0]),np.array([2]))\n if N == 1: return (np.array([-np.sqrt(1/3),np.sqrt(1/3)]) , np.array([1,1]) )\n\n # nodes and weights\n xs = np.zeros(N+1)\n ws = np.zeros(N+1)\n \n for j in range(math.floor((N+1)/2)):\n # make initial guess for the jth's node\n xs[j] = -math.cos((2*j+1)/(2*N+2) * math.pi) \n for k in range(0,nit):\n # Newton's method for finding the root\n LN1,LND1 = LegendrePolynomialAndDerivative(N+1,xs[j])\n Delta = -LN1/LND1\n xs[j] += Delta\n if abs(Delta) <= TOL * abs(xs[j]): break\n\n # get final optimal values for Legendre Polynomial\n LN1,LND1 = LegendrePolynomialAndDerivative(N+1,xs[j])\n ws[j] = 2/(1-xs[j]**2)/LND1**2\n # utilize symmetry\n xs[N-j] = -xs[j]\n ws[N-j] = ws[j]\n \n # consider the middle point if there is one (always zero)\n if N % 2 == 0:\n LN1,LND1 = LegendrePolynomialAndDerivative(N+1,0.0)\n xs[N//2] = 0\n ws[N//2] = 2/LND1**2\n\n return (xs,ws)\n\n# =========================================================================== #\n\ndef qAndLEvaluation(N,x):\n LN_2,LN_1,LND_2,LND_1 = 1,x,0,1\n\n for k in range(2,N+1):\n LN = (2*k-1)/k * x * LN_1 - (k-1)/k * LN_2\n LND = LND_2 + (2*k-1) * LN_1\n LN_2,LN_1 = LN_1,LN\n LND_2,LND_1 = LND_1,LND\n\n k = N+1 \n LN1 = (2*k-1)/k * x * LN - (k-1)/k * LN_2\n LND1 = LND_2 + (2*k-1) * LN_1\n q = LN1 - LN_2\n qD = LND1 - LND_2\n\n return (q,qD,LN)\n\ndef LegendreGaussLobattoNodesAndWeights(N):\n if N == 1: return (np.array([-1,1]), np.array([1,1]))\n\n xs,ws = np.empty(N+1),np.empty(N+1)\n\n xs[0],xs[N] = -1,1\n ws[0],ws[N] = (2/N/(N+1),) * 2\n\n for j in range(math.floor((N+1)/2)):\n xs[j] = - math.cos((j+1/4)*math.pi/N - 3/8/N/math.pi/(j+1/4))\n \n for k in range(nit+1):\n q,qD,LN = qAndLEvaluation(N,xs[j])\n Delta = -q/qD\n xs[j] += Delta\n if abs(Delta) <= TOL*abs(xs[j]): break\n\n q,qD,LN = qAndLEvaluation(N,xs[j])\n xs[N-j] = -xs[j]\n ws[N-j] = ws[j] = 2/N/(N+1)/LN**2\n\n if N % 2 == 0:\n q,qD,LN = qAndLEvaluation(N,0.0)\n xs[N//2] = 0\n ws[N//2] = 2/N/(N+1)/LN**2\n\n return (xs,ws)\n\n# =========================================================================== #\n\ndef integrate(xs,ws,f):\n acc = 0\n for j in range(len(xs)):\n acc += ws[j] * f(xs[j]) \n return acc\n\n# =========================================================================== #\n\ndef mk_mass_matrix(xs,ws):\n n = len(xs)\n M = np.empty([n,n])\n for i in range(n):\n for j in range(n):\n f = lambda x: LagrangePolynomial(xs,i,x) * LagrangePolynomial(xs,j,x)\n M[i,j] = integrate(xs,ws,f)\n return M\n\ndef mk_visual_matrix(Xs,xs):\n M = np.empty([len(Xs),len(xs)])\n for i in range(len(Xs)):\n for j in range(len(xs)):\n M[i,j] = LagrangePolynomial(xs,j,Xs[i])\n return M\n\ndef mk_corner_matrix(N):\n B = np.zeros([N,N])\n B[0,0] = -1\n B[-1,-1] = 1\n\n return B\n\n# =========================================================================== #\n\ndef f1(N=None):\n return lambda x: np.cos(x)\n\ndef f2(N=None):\n return lambda x: 1/(1+x**2)\n\ndef f3(N=None):\n return lambda x: x**(2*N-2)\n\ndef f4(N=None):\n return lambda x: x**(2*N)\n\ndef f5(N=None):\n return lambda x: x**(2*N+2)\n\n# =========================================================================== #\n\ndef mk_exponent_vector(ndim,npoly):\n #return (x for x in itertools.product(range(npoly+1),repeat=ndim) if sum(x) <= npoly)\n return itertools.product(range(npoly+1),repeat=ndim)\n\ndef mk_polynome_vector(x,npoly):\n return [np.prod(np.power(x,e)) for e in mk_exponent_vector(x.shape[-1], npoly)]\n\ndef mk_polynome_matrix(xs, npoly):\n return np.array([mk_polynome_vector(x,npoly) for x in xs])\n\ndef put_row(M,nrow,row):\n tmp = M.copy()\n tmp[nrow] = row\n return tmp\n\ndef mk_interpol_matrix(xs,Xs,npoly):\n M = mk_polynome_matrix(xs,npoly)\n\n return np.array([\n [np.linalg.det(put_row(M,nrow,mk_polynome_vector(X,npoly)))\n for nrow in range(len(M))] for X in Xs])/np.linalg.det(M)\n\ndef mk_polynomial_interpolator(xs,Xs,npoly):\n IM = mk_interpol_matrix(xs, Xs,npoly)\n def closure(fs, domain):\n return np.dot(IM,fs)\n return closure \n\n# =========================================================================== #\n\ndef mk_lagrange_vector(xs,x):\n return np.array([LagrangePolynomial(xs,j,x) for j in range(len(xs))])\n\ndef mk_vandermonde_matrix(xs,ys):\n return np.array([mk_lagrange_vector(xs,x) for x in ys])\n\n# =========================================================================== #\n\ndef mk_lagrange_interpolator_2d(xs,ys, Xs):\n def polyv(nodes,x):\n return np.array([LagrangePolynomial(nodes,j,x) for j in range(len(nodes))])\n\n def polyouter(x,y):\n return np.einsum('i,j->ij',polyv(xs,x),polyv(ys,y))\n \n tensors = [polyouter(*X) for X in Xs]\n \n def interpolate(fs):\n return np.array([np.sum(fs*t) for t in tensors])\n \n return interpolate\n\ndef mk_lagrange_interpolator_3d(xs,ys,zs,Xs):\n def polyv(nodes,x):\n return np.array([LagrangePolynomial(nodes,j,x) for j in range(len(nodes))])\n\n def polyouter(x,y,z):\n return np.einsum('i,j,k->ijk',polyv(xs,x),polyv(ys,y),polyv(zs,z))\n \n tensors = [polyouter(*X) for X in Xs]\n\n def interpolate(fs):\n return np.array([np.sum(fs*t) for t in tensors])\n \n return interpolate\n\ndef mk_nodes(npoly, ntype='gauss'):\n if ntype == 'gauss':\n fun = LegendreGaussNodesAndWeights\n elif ntype == 'gauss-lobatto':\n fun = LegendreGaussLobattoNodesAndWeights\n elif ntype == 'cell-centered':\n fun = lambda npoly: (mk_body_centered_linspace(-1,1,npoly+1),None)\n else:\n raise KeyError(\"unknown node type: '%s'\" % ntype)\n\n nodes, _ = fun(npoly)\n return np.array(nodes)\n\ndef mk_nodes_from_to(x0,x1,npoly,ntype='gauss'):\n return x0 + (x1-x0) * (mk_nodes(npoly,ntype)+1)/2\n\ndef mk_LegendreVandermondeMatrix(xs,doNormalize=False):\n return np.array([[\n LegendrePolynomialAndDerivative(j,xs[i],doNormalize=doNormalize)[0]\n for j in range(0,len(xs))\n ] for i in range(0,len(xs))\n ])\n\ndef mk_dg2fvMat(xs,ws,M):\n \"\"\"\n Args:\n xs: array of quadrature nodes\n ws: array of quadrature weights\n\n Returns:\n (M x len(xs)) projection matrix\n \"\"\"\n\n xL = -0.5*np.sum(ws)\n xR = 0.5*np.sum(ws)\n\n # interpolation nodes within each finite volume\n Ys = [mu + xs/M for mu in mk_body_centered_linspace(xL,xR,M)]\n\n # projection matrix\n return np.array([np.dot(ws,mk_vandermonde_matrix(xs,ys)) for ys in Ys])\n\ndef mk_galerkinMat(xs,ws,ys,vs,M):\n \"\"\"\n Args:\n xs: array of sub-element quadrature nodes\n ws: array of sub-element quadrature weights\n\n ys: array of main element quadrature nodes\n vs: array of main element quadrature weights\n\n M: number of (xs,ws)-elements\n\n Returns:\n tuple of M projection (len(ys) x len(xs)) matrices\n \"\"\"\n\n xL = -0.5*np.sum(vs)\n xR = 0.5*np.sum(vs)\n\n # interpolation nodes within each sub-element of master element\n Ys = [mu + xs/M for mu in mk_body_centered_linspace(xL,xR,M)]\n\n return tuple(\n np.array([[LagrangePolynomial(ys,i,Ys[m][j])*ws[j]/vs[i]/M for j in range(len(xs))] for i in range(len(ys))])\n for m in range(M))\n\ndef mk_dg2dgMat(xs,ws,ys,vs):\n return mk_galerkinMat(xs,ws,ys,vs,M=1)\n"
] |
[
[
"numpy.abs",
"numpy.unique"
],
[
"numpy.prod"
],
[
"numpy.sqrt",
"numpy.isinf",
"numpy.min",
"numpy.isnan",
"matplotlib.use",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.rcParams.update",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.suptitle",
"numpy.sum",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"numpy.abs",
"numpy.linspace",
"numpy.sqrt",
"numpy.diagflat",
"numpy.power",
"numpy.cos",
"numpy.finfo",
"numpy.linalg.det",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
5A5H/PyFEMP
|
[
"94ebf58a52230680fd87b699f295ccb3efa6c46a",
"94ebf58a52230680fd87b699f295ccb3efa6c46a"
] |
[
"examples_1d/Example_3_Postprocessing.py",
"examples_1d/Example_1_dynamic_bamo_bench.py"
] |
[
"# TEST for FiniteElement in coupled problems\n# postprocessing of a truss under gravity\n# seperately solving laplace -> temperature in the middle and on one side set\n\nimport matplotlib.pyplot as plt\n\nimport PyFEMP\nimport PyFEMP.elements.Elmt_BaMo_BaEn_Coupled_1D as ELEMENT\n\n\n# Create FEM Instance\nFEM = PyFEMP.FEM_Simulation(ELEMENT)\nXI, ELEM = PyFEMP.msh_line(0, 10.0, 20)\nFEM.Add_Mesh(XI, ELEM)\nFEM.Add_Material([5,1.2,0,0,1,0],\"All\")\nFEM.Add_EBC(\"x==0\",\"U\",0)\nFEM.Add_EBC(\"x==10\",\"T\",0)\nFEM.Add_EBC(\"x==5\",\"T\",3)\nfor node in range(FEM.NoNodes):\n FEM.Add_NBC(node, \"U\", 0.1)\n\nFEM.Analysis()\n\n# Analysis of a static problem\nFEM.NextStep(1,1)\nFEM.NewtonIteration()\nFEM.NewtonIteration()\n\n#Plot Accelerations,Stresses over 1D domain\nplt.figure(1,figsize=[20,8])\n\nXI = FEM.XI\nUI = FEM.DI[0::2]\nplt.subplot(221)\nplt.plot(XI,UI)\nplt.xlabel('x')\nplt.ylabel('$u$')\n\nXI = FEM.XI\nTI = FEM.DI[1::2]\nplt.subplot(222)\nplt.plot(XI,TI)\nplt.xlabel('x')\nplt.ylabel('$T$')\n\nXI, SigI = FEM.PostProcessing(\"Sig\")\nplt.subplot(223)\nplt.plot(XI,SigI)\nplt.xlabel('x')\nplt.ylabel('$\\sigma$')\n\nXI, A = FEM.PostProcessing(\"q\")\nplt.subplot(224)\nplt.plot(XI,A)\nplt.xlabel('x')\nplt.ylabel('q')\n\nplt.show()",
"# TEST for FiniteElement in coupled problems\n# for the dynamic terms including inertia and damping \n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport PyFEMP\nimport PyFEMP.elements.Elmt_BaMo_BaEn_Coupled_1D as ELEMENT\n\n\n# Create FEM Instance\nFEM = PyFEMP.FEM_Simulation(ELEMENT)\nXI, ELEM = PyFEMP.msh_line(0, 9.0, 10)\nFEM.Add_Mesh(XI, ELEM)\nFEM.Add_Material([5, 1.2, 10, 1, 0, 0],\"All\")\nFEM.Add_EBC(\"x==0\", \"U\", 0)\nFEM.Add_EBC(\"x>-1\", \"T\", 0)\nFEM.Add_NBC(\"x==9\", 0 , 1)\nFEM.Analysis()\n\n# define a loading function\ndef load(time):\n lam = 0.0\n if time <= 10:\n lam = (time/10)\n if time > 10:\n lam = 1.0\n if time > 60:\n lam = 0.0\n return lam\n\n# Lets prepare a time loop, with recoding the time and displacement\nrec_t = []\nrec_u = []\nrec_tu = []\nnStep, time, dt = 100 ,0.0, 1.0\nfor step in range(nStep):\n time += dt\n FEM.NextStep(time,load(time))\n print( FEM.NewtonIteration() )\n print( FEM.NewtonIteration() )\n u = FEM.NodalDof(\"x==9\",\"U\")\n rec_t.append(time)\n rec_u.append(u)\n\n\nplt.plot(rec_t,rec_u)\nplt.xlabel('t')\nplt.ylabel('u')\nplt.show()\n\n\n#Plot Accelerations,Stresses over 1D domain\nplt.figure(1,figsize=[20,5])\n\nXI, SigI = FEM.PostProcessing(\"Sig\")\nplt.subplot(121)\nplt.plot(XI,SigI)\nplt.xlabel('x')\nplt.ylabel('$\\sigma$')\n\nXI, A = FEM.PostProcessing(\"A\")\nplt.subplot(122)\nplt.plot(XI,A)\nplt.xlabel('x')\nplt.ylabel('a')\n\nplt.show()"
] |
[
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryuwd/scikit-learn
|
[
"da562b4fa58bdce4a7f3470f733f33d728747a66",
"da562b4fa58bdce4a7f3470f733f33d728747a66",
"da562b4fa58bdce4a7f3470f733f33d728747a66",
"da562b4fa58bdce4a7f3470f733f33d728747a66",
"da562b4fa58bdce4a7f3470f733f33d728747a66"
] |
[
"sklearn/ensemble/tests/test_weight_boosting.py",
"sklearn/metrics/_ranking.py",
"sklearn/cluster/tests/test_optics.py",
"examples/preprocessing/plot_map_data_to_normal.py",
"sklearn/decomposition/tests/test_nmf.py"
] |
[
"\"\"\"Testing for the boost module (sklearn.ensemble.boost).\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom scipy.sparse import csc_matrix\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import coo_matrix\nfrom scipy.sparse import dok_matrix\nfrom scipy.sparse import lil_matrix\n\nfrom sklearn.utils._testing import assert_array_equal, assert_array_less\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_raises, assert_raises_regexp\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import clone\nfrom sklearn.dummy import DummyClassifier, DummyRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.ensemble._weight_boosting import _samme_proba\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.utils import shuffle\nfrom sklearn.utils._mocking import NoSampleWeightWrapper\nfrom sklearn import datasets\n\n\n# Common random state\nrng = np.random.RandomState(0)\n\n# Toy sample\nX = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\ny_class = [\"foo\", \"foo\", \"foo\", 1, 1, 1] # test string class labels\ny_regr = [-1, -1, -1, 1, 1, 1]\nT = [[-1, -1], [2, 2], [3, 2]]\ny_t_class = [\"foo\", 1, 1]\ny_t_regr = [-1, 1, 1]\n\n# Load the iris dataset and randomly permute it\niris = datasets.load_iris()\nperm = rng.permutation(iris.target.size)\niris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)\n\n# Load the diabetes dataset and randomly permute it\ndiabetes = datasets.load_diabetes()\ndiabetes.data, diabetes.target = shuffle(diabetes.data, diabetes.target,\n random_state=rng)\n\n\ndef test_samme_proba():\n # Test the `_samme_proba` helper function.\n\n # Define some example (bad) `predict_proba` output.\n probs = np.array([[1, 1e-6, 0],\n [0.19, 0.6, 0.2],\n [-999, 0.51, 0.5],\n [1e-6, 1, 1e-9]])\n probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]\n\n # _samme_proba calls estimator.predict_proba.\n # Make a mock object so I can control what gets returned.\n class MockEstimator:\n def predict_proba(self, X):\n assert_array_equal(X.shape, probs.shape)\n return probs\n mock = MockEstimator()\n\n samme_proba = _samme_proba(mock, 3, np.ones_like(probs))\n\n assert_array_equal(samme_proba.shape, probs.shape)\n assert np.isfinite(samme_proba).all()\n\n # Make sure that the correct elements come out as smallest --\n # `_samme_proba` should preserve the ordering in each example.\n assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])\n assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])\n\n\ndef test_oneclass_adaboost_proba():\n # Test predict_proba robustness for one class label input.\n # In response to issue #7501\n # https://github.com/scikit-learn/scikit-learn/issues/7501\n y_t = np.ones(len(X))\n clf = AdaBoostClassifier().fit(X, y_t)\n assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))\n\n\[email protected](\"algorithm\", [\"SAMME\", \"SAMME.R\"])\ndef test_classification_toy(algorithm):\n # Check classification on a toy dataset.\n clf = AdaBoostClassifier(algorithm=algorithm, random_state=0)\n clf.fit(X, y_class)\n assert_array_equal(clf.predict(T), y_t_class)\n assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)\n assert clf.predict_proba(T).shape == (len(T), 2)\n assert clf.decision_function(T).shape == (len(T),)\n\n\ndef test_regression_toy():\n # Check classification on a toy dataset.\n clf = AdaBoostRegressor(random_state=0)\n clf.fit(X, y_regr)\n assert_array_equal(clf.predict(T), y_t_regr)\n\n\ndef test_iris():\n # Check consistency on dataset iris.\n classes = np.unique(iris.target)\n clf_samme = prob_samme = None\n\n for alg in ['SAMME', 'SAMME.R']:\n clf = AdaBoostClassifier(algorithm=alg)\n clf.fit(iris.data, iris.target)\n\n assert_array_equal(classes, clf.classes_)\n proba = clf.predict_proba(iris.data)\n if alg == \"SAMME\":\n clf_samme = clf\n prob_samme = proba\n assert proba.shape[1] == len(classes)\n assert clf.decision_function(iris.data).shape[1] == len(classes)\n\n score = clf.score(iris.data, iris.target)\n assert score > 0.9, \"Failed with algorithm %s and score = %f\" % \\\n (alg, score)\n\n # Check we used multiple estimators\n assert len(clf.estimators_) > 1\n # Check for distinct random states (see issue #7408)\n assert (len(set(est.random_state for est in clf.estimators_)) ==\n len(clf.estimators_))\n\n # Somewhat hacky regression test: prior to\n # ae7adc880d624615a34bafdb1d75ef67051b8200,\n # predict_proba returned SAMME.R values for SAMME.\n clf_samme.algorithm = \"SAMME.R\"\n assert_array_less(0,\n np.abs(clf_samme.predict_proba(iris.data) - prob_samme))\n\n\[email protected]('loss', ['linear', 'square', 'exponential'])\ndef test_diabetes(loss):\n # Check consistency on dataset diabetes.\n reg = AdaBoostRegressor(loss=loss, random_state=0)\n reg.fit(diabetes.data, diabetes.target)\n score = reg.score(diabetes.data, diabetes.target)\n assert score > 0.6\n\n # Check we used multiple estimators\n assert len(reg.estimators_) > 1\n # Check for distinct random states (see issue #7408)\n assert (len(set(est.random_state for est in reg.estimators_)) ==\n len(reg.estimators_))\n\n\[email protected](\"algorithm\", [\"SAMME\", \"SAMME.R\"])\ndef test_staged_predict(algorithm):\n # Check staged predictions.\n rng = np.random.RandomState(0)\n iris_weights = rng.randint(10, size=iris.target.shape)\n diabetes_weights = rng.randint(10, size=diabetes.target.shape)\n\n clf = AdaBoostClassifier(algorithm=algorithm, n_estimators=10)\n clf.fit(iris.data, iris.target, sample_weight=iris_weights)\n\n predictions = clf.predict(iris.data)\n staged_predictions = [p for p in clf.staged_predict(iris.data)]\n proba = clf.predict_proba(iris.data)\n staged_probas = [p for p in clf.staged_predict_proba(iris.data)]\n score = clf.score(iris.data, iris.target, sample_weight=iris_weights)\n staged_scores = [\n s for s in clf.staged_score(\n iris.data, iris.target, sample_weight=iris_weights)]\n\n assert len(staged_predictions) == 10\n assert_array_almost_equal(predictions, staged_predictions[-1])\n assert len(staged_probas) == 10\n assert_array_almost_equal(proba, staged_probas[-1])\n assert len(staged_scores) == 10\n assert_array_almost_equal(score, staged_scores[-1])\n\n # AdaBoost regression\n clf = AdaBoostRegressor(n_estimators=10, random_state=0)\n clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights)\n\n predictions = clf.predict(diabetes.data)\n staged_predictions = [p for p in clf.staged_predict(diabetes.data)]\n score = clf.score(diabetes.data, diabetes.target,\n sample_weight=diabetes_weights)\n staged_scores = [\n s for s in clf.staged_score(\n diabetes.data, diabetes.target, sample_weight=diabetes_weights)]\n\n assert len(staged_predictions) == 10\n assert_array_almost_equal(predictions, staged_predictions[-1])\n assert len(staged_scores) == 10\n assert_array_almost_equal(score, staged_scores[-1])\n\n\ndef test_gridsearch():\n # Check that base trees can be grid-searched.\n # AdaBoost classification\n boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())\n parameters = {'n_estimators': (1, 2),\n 'base_estimator__max_depth': (1, 2),\n 'algorithm': ('SAMME', 'SAMME.R')}\n clf = GridSearchCV(boost, parameters)\n clf.fit(iris.data, iris.target)\n\n # AdaBoost regression\n boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),\n random_state=0)\n parameters = {'n_estimators': (1, 2),\n 'base_estimator__max_depth': (1, 2)}\n clf = GridSearchCV(boost, parameters)\n clf.fit(diabetes.data, diabetes.target)\n\n\ndef test_pickle():\n # Check pickability.\n import pickle\n\n # Adaboost classifier\n for alg in ['SAMME', 'SAMME.R']:\n obj = AdaBoostClassifier(algorithm=alg)\n obj.fit(iris.data, iris.target)\n score = obj.score(iris.data, iris.target)\n s = pickle.dumps(obj)\n\n obj2 = pickle.loads(s)\n assert type(obj2) == obj.__class__\n score2 = obj2.score(iris.data, iris.target)\n assert score == score2\n\n # Adaboost regressor\n obj = AdaBoostRegressor(random_state=0)\n obj.fit(diabetes.data, diabetes.target)\n score = obj.score(diabetes.data, diabetes.target)\n s = pickle.dumps(obj)\n\n obj2 = pickle.loads(s)\n assert type(obj2) == obj.__class__\n score2 = obj2.score(diabetes.data, diabetes.target)\n assert score == score2\n\n\ndef test_importances():\n # Check variable importances.\n X, y = datasets.make_classification(n_samples=2000,\n n_features=10,\n n_informative=3,\n n_redundant=0,\n n_repeated=0,\n shuffle=False,\n random_state=1)\n\n for alg in ['SAMME', 'SAMME.R']:\n clf = AdaBoostClassifier(algorithm=alg)\n\n clf.fit(X, y)\n importances = clf.feature_importances_\n\n assert importances.shape[0] == 10\n assert (importances[:3, np.newaxis] >= importances[3:]).all()\n\n\ndef test_error():\n # Test that it gives proper exception on deficient input.\n assert_raises(ValueError,\n AdaBoostClassifier(learning_rate=-1).fit,\n X, y_class)\n\n assert_raises(ValueError,\n AdaBoostClassifier(algorithm=\"foo\").fit,\n X, y_class)\n\n assert_raises(ValueError,\n AdaBoostClassifier().fit,\n X, y_class, sample_weight=np.asarray([-1]))\n\n\ndef test_base_estimator():\n # Test different base estimators.\n from sklearn.ensemble import RandomForestClassifier\n\n # XXX doesn't work with y_class because RF doesn't support classes_\n # Shouldn't AdaBoost run a LabelBinarizer?\n clf = AdaBoostClassifier(RandomForestClassifier())\n clf.fit(X, y_regr)\n\n clf = AdaBoostClassifier(SVC(), algorithm=\"SAMME\")\n clf.fit(X, y_class)\n\n from sklearn.ensemble import RandomForestRegressor\n\n clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)\n clf.fit(X, y_regr)\n\n clf = AdaBoostRegressor(SVR(), random_state=0)\n clf.fit(X, y_regr)\n\n # Check that an empty discrete ensemble fails in fit, not predict.\n X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]\n y_fail = [\"foo\", \"bar\", 1, 2]\n clf = AdaBoostClassifier(SVC(), algorithm=\"SAMME\")\n assert_raises_regexp(ValueError, \"worse than random\",\n clf.fit, X_fail, y_fail)\n\n\ndef test_sparse_classification():\n # Check classification with sparse input.\n\n class CustomSVC(SVC):\n \"\"\"SVC variant that records the nature of the training set.\"\"\"\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Modification on fit caries data type for later verification.\"\"\"\n super().fit(X, y, sample_weight=sample_weight)\n self.data_type_ = type(X)\n return self\n\n X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,\n n_features=5,\n random_state=42)\n # Flatten y to a 1d array\n y = np.ravel(y)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,\n dok_matrix]:\n X_train_sparse = sparse_format(X_train)\n X_test_sparse = sparse_format(X_test)\n\n # Trained on sparse format\n sparse_classifier = AdaBoostClassifier(\n base_estimator=CustomSVC(probability=True),\n random_state=1,\n algorithm=\"SAMME\"\n ).fit(X_train_sparse, y_train)\n\n # Trained on dense format\n dense_classifier = AdaBoostClassifier(\n base_estimator=CustomSVC(probability=True),\n random_state=1,\n algorithm=\"SAMME\"\n ).fit(X_train, y_train)\n\n # predict\n sparse_results = sparse_classifier.predict(X_test_sparse)\n dense_results = dense_classifier.predict(X_test)\n assert_array_equal(sparse_results, dense_results)\n\n # decision_function\n sparse_results = sparse_classifier.decision_function(X_test_sparse)\n dense_results = dense_classifier.decision_function(X_test)\n assert_array_almost_equal(sparse_results, dense_results)\n\n # predict_log_proba\n sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)\n dense_results = dense_classifier.predict_log_proba(X_test)\n assert_array_almost_equal(sparse_results, dense_results)\n\n # predict_proba\n sparse_results = sparse_classifier.predict_proba(X_test_sparse)\n dense_results = dense_classifier.predict_proba(X_test)\n assert_array_almost_equal(sparse_results, dense_results)\n\n # score\n sparse_results = sparse_classifier.score(X_test_sparse, y_test)\n dense_results = dense_classifier.score(X_test, y_test)\n assert_array_almost_equal(sparse_results, dense_results)\n\n # staged_decision_function\n sparse_results = sparse_classifier.staged_decision_function(\n X_test_sparse)\n dense_results = dense_classifier.staged_decision_function(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_almost_equal(sprase_res, dense_res)\n\n # staged_predict\n sparse_results = sparse_classifier.staged_predict(X_test_sparse)\n dense_results = dense_classifier.staged_predict(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n # staged_predict_proba\n sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)\n dense_results = dense_classifier.staged_predict_proba(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_almost_equal(sprase_res, dense_res)\n\n # staged_score\n sparse_results = sparse_classifier.staged_score(X_test_sparse,\n y_test)\n dense_results = dense_classifier.staged_score(X_test, y_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n # Verify sparsity of data is maintained during training\n types = [i.data_type_ for i in sparse_classifier.estimators_]\n\n assert all([(t == csc_matrix or t == csr_matrix)\n for t in types])\n\n\ndef test_sparse_regression():\n # Check regression with sparse input.\n\n class CustomSVR(SVR):\n \"\"\"SVR variant that records the nature of the training set.\"\"\"\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Modification on fit caries data type for later verification.\"\"\"\n super().fit(X, y, sample_weight=sample_weight)\n self.data_type_ = type(X)\n return self\n\n X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,\n random_state=42)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,\n dok_matrix]:\n X_train_sparse = sparse_format(X_train)\n X_test_sparse = sparse_format(X_test)\n\n # Trained on sparse format\n sparse_classifier = AdaBoostRegressor(\n base_estimator=CustomSVR(),\n random_state=1\n ).fit(X_train_sparse, y_train)\n\n # Trained on dense format\n dense_classifier = dense_results = AdaBoostRegressor(\n base_estimator=CustomSVR(),\n random_state=1\n ).fit(X_train, y_train)\n\n # predict\n sparse_results = sparse_classifier.predict(X_test_sparse)\n dense_results = dense_classifier.predict(X_test)\n assert_array_almost_equal(sparse_results, dense_results)\n\n # staged_predict\n sparse_results = sparse_classifier.staged_predict(X_test_sparse)\n dense_results = dense_classifier.staged_predict(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_almost_equal(sprase_res, dense_res)\n\n types = [i.data_type_ for i in sparse_classifier.estimators_]\n\n assert all([(t == csc_matrix or t == csr_matrix)\n for t in types])\n\n\ndef test_sample_weight_adaboost_regressor():\n \"\"\"\n AdaBoostRegressor should work without sample_weights in the base estimator\n The random weighted sampling is done internally in the _boost method in\n AdaBoostRegressor.\n \"\"\"\n class DummyEstimator(BaseEstimator):\n\n def fit(self, X, y):\n pass\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)\n boost.fit(X, y_regr)\n assert len(boost.estimator_weights_) == len(boost.estimator_errors_)\n\n\ndef test_multidimensional_X():\n \"\"\"\n Check that the AdaBoost estimators can work with n-dimensional\n data matrix\n \"\"\"\n rng = np.random.RandomState(0)\n\n X = rng.randn(50, 3, 3)\n yc = rng.choice([0, 1], 50)\n yr = rng.randn(50)\n\n boost = AdaBoostClassifier(DummyClassifier(strategy='most_frequent'))\n boost.fit(X, yc)\n boost.predict(X)\n boost.predict_proba(X)\n\n boost = AdaBoostRegressor(DummyRegressor())\n boost.fit(X, yr)\n boost.predict(X)\n\n\[email protected](\"algorithm\", ['SAMME', 'SAMME.R'])\ndef test_adaboostclassifier_without_sample_weight(algorithm):\n X, y = iris.data, iris.target\n base_estimator = NoSampleWeightWrapper(DummyClassifier())\n clf = AdaBoostClassifier(\n base_estimator=base_estimator, algorithm=algorithm\n )\n err_msg = (\"{} doesn't support sample_weight\"\n .format(base_estimator.__class__.__name__))\n with pytest.raises(ValueError, match=err_msg):\n clf.fit(X, y)\n\n\ndef test_adaboostregressor_sample_weight():\n # check that giving weight will have an influence on the error computed\n # for a weak learner\n rng = np.random.RandomState(42)\n X = np.linspace(0, 100, num=1000)\n y = (.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001)\n X = X.reshape(-1, 1)\n\n # add an arbitrary outlier\n X[-1] *= 10\n y[-1] = 10000\n\n # random_state=0 ensure that the underlying bootstrap will use the outlier\n regr_no_outlier = AdaBoostRegressor(\n base_estimator=LinearRegression(), n_estimators=1, random_state=0\n )\n regr_with_weight = clone(regr_no_outlier)\n regr_with_outlier = clone(regr_no_outlier)\n\n # fit 3 models:\n # - a model containing the outlier\n # - a model without the outlier\n # - a model containing the outlier but with a null sample-weight\n regr_with_outlier.fit(X, y)\n regr_no_outlier.fit(X[:-1], y[:-1])\n sample_weight = np.ones_like(y)\n sample_weight[-1] = 0\n regr_with_weight.fit(X, y, sample_weight=sample_weight)\n\n score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1])\n score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1])\n score_with_weight = regr_with_weight.score(X[:-1], y[:-1])\n\n assert score_with_outlier < score_no_outlier\n assert score_with_outlier < score_with_weight\n assert score_no_outlier == pytest.approx(score_with_weight)\n\[email protected](\"algorithm\", [\"SAMME\", \"SAMME.R\"])\ndef test_adaboost_consistent_predict(algorithm):\n # check that predict_proba and predict give consistent results\n # regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/14084\n X_train, X_test, y_train, y_test = train_test_split(\n *datasets.load_digits(return_X_y=True), random_state=42\n )\n model = AdaBoostClassifier(algorithm=algorithm, random_state=42)\n model.fit(X_train, y_train)\n\n assert_array_equal(\n np.argmax(model.predict_proba(X_test), axis=1),\n model.predict(X_test)\n )\n\n\[email protected](\n 'model, X, y',\n [(AdaBoostClassifier(), iris.data, iris.target),\n (AdaBoostRegressor(), diabetes.data, diabetes.target)]\n)\ndef test_adaboost_negative_weight_error(model, X, y):\n sample_weight = np.ones_like(y)\n sample_weight[-1] = -10\n\n err_msg = \"sample_weight cannot contain negative weight\"\n with pytest.raises(ValueError, match=err_msg):\n model.fit(X, y, sample_weight=sample_weight)\n",
"\"\"\"Metrics to assess performance on classification task given scores.\n\nFunctions named as ``*_score`` return a scalar value to maximize: the higher\nthe better.\n\nFunction named as ``*_error`` or ``*_loss`` return a scalar value to minimize:\nthe lower the better.\n\"\"\"\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Olivier Grisel <[email protected]>\n# Arnaud Joly <[email protected]>\n# Jochen Wersdorfer <[email protected]>\n# Lars Buitinck\n# Joel Nothman <[email protected]>\n# Noel Dawe <[email protected]>\n# License: BSD 3 clause\n\n\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom scipy.stats import rankdata\n\nfrom ..utils import assert_all_finite\nfrom ..utils import check_consistent_length\nfrom ..utils import column_or_1d, check_array\nfrom ..utils.multiclass import type_of_target\nfrom ..utils.extmath import stable_cumsum\nfrom ..utils.sparsefuncs import count_nonzero\nfrom ..utils.validation import _deprecate_positional_args\nfrom ..exceptions import UndefinedMetricWarning\nfrom ..preprocessing import label_binarize\nfrom ..utils._encode import _encode, _unique\n\nfrom ._base import (\n _average_binary_score,\n _average_multiclass_ovo_score,\n _check_pos_label_consistency,\n)\n\n\ndef auc(x, y):\n \"\"\"Compute Area Under the Curve (AUC) using the trapezoidal rule.\n\n This is a general function, given points on a curve. For computing the\n area under the ROC-curve, see :func:`roc_auc_score`. For an alternative\n way to summarize a precision-recall curve, see\n :func:`average_precision_score`.\n\n Parameters\n ----------\n x : ndarray of shape (n,)\n x coordinates. These must be either monotonic increasing or monotonic\n decreasing.\n y : ndarray of shape, (n,)\n y coordinates.\n\n Returns\n -------\n auc : float\n\n See Also\n --------\n roc_auc_score : Compute the area under the ROC curve.\n average_precision_score : Compute average precision from prediction scores.\n precision_recall_curve : Compute precision-recall pairs for different\n probability thresholds.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)\n >>> metrics.auc(fpr, tpr)\n 0.75\n \"\"\"\n check_consistent_length(x, y)\n x = column_or_1d(x)\n y = column_or_1d(y)\n\n if x.shape[0] < 2:\n raise ValueError('At least 2 points are needed to compute'\n ' area under curve, but x.shape = %s' % x.shape)\n\n direction = 1\n dx = np.diff(x)\n if np.any(dx < 0):\n if np.all(dx <= 0):\n direction = -1\n else:\n raise ValueError(\"x is neither increasing nor decreasing \"\n \": {}.\".format(x))\n\n area = direction * np.trapz(y, x)\n if isinstance(area, np.memmap):\n # Reductions such as .sum used internally in np.trapz do not return a\n # scalar by default for numpy.memmap instances contrary to\n # regular numpy.ndarray instances.\n area = area.dtype.type(area)\n return area\n\n\n@_deprecate_positional_args\ndef average_precision_score(y_true, y_score, *, average=\"macro\", pos_label=1,\n sample_weight=None):\n \"\"\"Compute average precision (AP) from prediction scores.\n\n AP summarizes a precision-recall curve as the weighted mean of precisions\n achieved at each threshold, with the increase in recall from the previous\n threshold used as the weight:\n\n .. math::\n \\\\text{AP} = \\\\sum_n (R_n - R_{n-1}) P_n\n\n where :math:`P_n` and :math:`R_n` are the precision and recall at the nth\n threshold [1]_. This implementation is not interpolated and is different\n from computing the area under the precision-recall curve with the\n trapezoidal rule, which uses linear interpolation and can be too\n optimistic.\n\n Note: this implementation is restricted to the binary classification task\n or multilabel classification task.\n\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples,) or (n_samples, n_classes)\n True binary labels or binary label indicators.\n\n y_score : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by :term:`decision_function` on some classifiers).\n\n average : {'micro', 'samples', 'weighted', 'macro'} or None, \\\n default='macro'\n If ``None``, the scores for each class are returned. Otherwise,\n this determines the type of averaging performed on the data:\n\n ``'micro'``:\n Calculate metrics globally by considering each element of the label\n indicator matrix as a label.\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n ``'weighted'``:\n Calculate metrics for each label, and find their average, weighted\n by support (the number of true instances for each label).\n ``'samples'``:\n Calculate metrics for each instance, and find their average.\n\n Will be ignored when ``y_true`` is binary.\n\n pos_label : int or str, default=1\n The label of the positive class. Only applied to binary ``y_true``.\n For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n average_precision : float\n\n See Also\n --------\n roc_auc_score : Compute the area under the ROC curve.\n precision_recall_curve : Compute precision-recall pairs for different\n probability thresholds.\n\n Notes\n -----\n .. versionchanged:: 0.19\n Instead of linearly interpolating between operating points, precisions\n are weighted by the change in recall since the last operating point.\n\n References\n ----------\n .. [1] `Wikipedia entry for the Average precision\n <https://en.wikipedia.org/w/index.php?title=Information_retrieval&\n oldid=793358396#Average_precision>`_\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import average_precision_score\n >>> y_true = np.array([0, 0, 1, 1])\n >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> average_precision_score(y_true, y_scores)\n 0.83...\n \"\"\"\n def _binary_uninterpolated_average_precision(\n y_true, y_score, pos_label=1, sample_weight=None):\n precision, recall, _ = precision_recall_curve(\n y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)\n # Return the step function integral\n # The following works because the last entry of precision is\n # guaranteed to be 1, as returned by precision_recall_curve\n return -np.sum(np.diff(recall) * np.array(precision)[:-1])\n\n y_type = type_of_target(y_true)\n if y_type == \"multilabel-indicator\" and pos_label != 1:\n raise ValueError(\"Parameter pos_label is fixed to 1 for \"\n \"multilabel-indicator y_true. Do not set \"\n \"pos_label or set pos_label to 1.\")\n elif y_type == \"binary\":\n # Convert to Python primitive type to avoid NumPy type / Python str\n # comparison. See https://github.com/numpy/numpy/issues/6784\n present_labels = np.unique(y_true).tolist()\n if len(present_labels) == 2 and pos_label not in present_labels:\n raise ValueError(\n f\"pos_label={pos_label} is not a valid label. It should be \"\n f\"one of {present_labels}\"\n )\n average_precision = partial(_binary_uninterpolated_average_precision,\n pos_label=pos_label)\n return _average_binary_score(average_precision, y_true, y_score,\n average, sample_weight=sample_weight)\n\n\ndef det_curve(y_true, y_score, pos_label=None, sample_weight=None):\n \"\"\"Compute error rates for different probability thresholds.\n\n .. note::\n This metric is used for evaluation of ranking and error tradeoffs of\n a binary classification task.\n\n Read more in the :ref:`User Guide <det_curve>`.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples,)\n True binary labels. If labels are not either {-1, 1} or {0, 1}, then\n pos_label should be explicitly given.\n\n y_score : ndarray of shape of (n_samples,)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n pos_label : int or str, default=None\n The label of the positive class.\n When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},\n ``pos_label`` is set to 1, otherwise an error will be raised.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n fpr : ndarray of shape (n_thresholds,)\n False positive rate (FPR) such that element i is the false positive\n rate of predictions with score >= thresholds[i]. This is occasionally\n referred to as false acceptance propability or fall-out.\n\n fnr : ndarray of shape (n_thresholds,)\n False negative rate (FNR) such that element i is the false negative\n rate of predictions with score >= thresholds[i]. This is occasionally\n referred to as false rejection or miss rate.\n\n thresholds : ndarray of shape (n_thresholds,)\n Decreasing score values.\n\n See Also\n --------\n plot_det_curve : Plot detection error tradeoff (DET) curve.\n DetCurveDisplay : DET curve visualization.\n roc_curve : Compute Receiver operating characteristic (ROC) curve.\n precision_recall_curve : Compute precision-recall curve.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import det_curve\n >>> y_true = np.array([0, 0, 1, 1])\n >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, fnr, thresholds = det_curve(y_true, y_scores)\n >>> fpr\n array([0.5, 0.5, 0. ])\n >>> fnr\n array([0. , 0.5, 0.5])\n >>> thresholds\n array([0.35, 0.4 , 0.8 ])\n \"\"\"\n if len(np.unique(y_true)) != 2:\n raise ValueError(\"Only one class present in y_true. Detection error \"\n \"tradeoff curve is not defined in that case.\")\n\n fps, tps, thresholds = _binary_clf_curve(\n y_true, y_score, pos_label=pos_label, sample_weight=sample_weight\n )\n\n fns = tps[-1] - tps\n p_count = tps[-1]\n n_count = fps[-1]\n\n # start with false positives zero\n first_ind = (\n fps.searchsorted(fps[0], side='right') - 1\n if fps.searchsorted(fps[0], side='right') > 0\n else None\n )\n # stop with false negatives zero\n last_ind = tps.searchsorted(tps[-1]) + 1\n sl = slice(first_ind, last_ind)\n\n # reverse the output such that list of false positives is decreasing\n return (\n fps[sl][::-1] / n_count,\n fns[sl][::-1] / p_count,\n thresholds[sl][::-1]\n )\n\n\ndef _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):\n \"\"\"Binary roc auc score.\"\"\"\n if len(np.unique(y_true)) != 2:\n raise ValueError(\"Only one class present in y_true. ROC AUC score \"\n \"is not defined in that case.\")\n\n fpr, tpr, _ = roc_curve(y_true, y_score,\n sample_weight=sample_weight)\n if max_fpr is None or max_fpr == 1:\n return auc(fpr, tpr)\n if max_fpr <= 0 or max_fpr > 1:\n raise ValueError(\"Expected max_fpr in range (0, 1], got: %r\" % max_fpr)\n\n # Add a single point at max_fpr by linear interpolation\n stop = np.searchsorted(fpr, max_fpr, 'right')\n x_interp = [fpr[stop - 1], fpr[stop]]\n y_interp = [tpr[stop - 1], tpr[stop]]\n tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))\n fpr = np.append(fpr[:stop], max_fpr)\n partial_auc = auc(fpr, tpr)\n\n # McClish correction: standardize result to be 0.5 if non-discriminant\n # and 1 if maximal\n min_area = 0.5 * max_fpr**2\n max_area = max_fpr\n return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))\n\n\n@_deprecate_positional_args\ndef roc_auc_score(y_true, y_score, *, average=\"macro\", sample_weight=None,\n max_fpr=None, multi_class=\"raise\", labels=None):\n \"\"\"Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)\n from prediction scores.\n\n Note: this implementation can be used with binary, multiclass and\n multilabel classification, but some restrictions apply (see Parameters).\n\n Read more in the :ref:`User Guide <roc_metrics>`.\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,) or (n_samples, n_classes)\n True labels or binary label indicators. The binary and multiclass cases\n expect labels with shape (n_samples,) while the multilabel case expects\n binary label indicators with shape (n_samples, n_classes).\n\n y_score : array-like of shape (n_samples,) or (n_samples, n_classes)\n Target scores.\n\n * In the binary case, it corresponds to an array of shape\n `(n_samples,)`. Both probability estimates and non-thresholded\n decision values can be provided. The probability estimates correspond\n to the **probability of the class with the greater label**,\n i.e. `estimator.classes_[1]` and thus\n `estimator.predict_proba(X, y)[:, 1]`. The decision values\n corresponds to the output of `estimator.decision_function(X, y)`.\n See more information in the :ref:`User guide <roc_auc_binary>`;\n * In the multiclass case, it corresponds to an array of shape\n `(n_samples, n_classes)` of probability estimates provided by the\n `predict_proba` method. The probability estimates **must**\n sum to 1 across the possible classes. In addition, the order of the\n class scores must correspond to the order of ``labels``,\n if provided, or else to the numerical or lexicographical order of\n the labels in ``y_true``. See more information in the\n :ref:`User guide <roc_auc_multiclass>`;\n * In the multilabel case, it corresponds to an array of shape\n `(n_samples, n_classes)`. Probability estimates are provided by the\n `predict_proba` method and the non-thresholded decision values by\n the `decision_function` method. The probability estimates correspond\n to the **probability of the class with the greater label for each\n output** of the classifier. See more information in the\n :ref:`User guide <roc_auc_multilabel>`.\n\n average : {'micro', 'macro', 'samples', 'weighted'} or None, \\\n default='macro'\n If ``None``, the scores for each class are returned. Otherwise,\n this determines the type of averaging performed on the data:\n Note: multiclass ROC AUC currently only handles the 'macro' and\n 'weighted' averages.\n\n ``'micro'``:\n Calculate metrics globally by considering each element of the label\n indicator matrix as a label.\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n ``'weighted'``:\n Calculate metrics for each label, and find their average, weighted\n by support (the number of true instances for each label).\n ``'samples'``:\n Calculate metrics for each instance, and find their average.\n\n Will be ignored when ``y_true`` is binary.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n max_fpr : float > 0 and <= 1, default=None\n If not ``None``, the standardized partial AUC [2]_ over the range\n [0, max_fpr] is returned. For the multiclass case, ``max_fpr``,\n should be either equal to ``None`` or ``1.0`` as AUC ROC partial\n computation currently is not supported for multiclass.\n\n multi_class : {'raise', 'ovr', 'ovo'}, default='raise'\n Only used for multiclass targets. Determines the type of configuration\n to use. The default value raises an error, so either\n ``'ovr'`` or ``'ovo'`` must be passed explicitly.\n\n ``'ovr'``:\n Stands for One-vs-rest. Computes the AUC of each class\n against the rest [3]_ [4]_. This\n treats the multiclass case in the same way as the multilabel case.\n Sensitive to class imbalance even when ``average == 'macro'``,\n because class imbalance affects the composition of each of the\n 'rest' groupings.\n ``'ovo'``:\n Stands for One-vs-one. Computes the average AUC of all\n possible pairwise combinations of classes [5]_.\n Insensitive to class imbalance when\n ``average == 'macro'``.\n\n labels : array-like of shape (n_classes,), default=None\n Only used for multiclass targets. List of labels that index the\n classes in ``y_score``. If ``None``, the numerical or lexicographical\n order of the labels in ``y_true`` is used.\n\n Returns\n -------\n auc : float\n\n References\n ----------\n .. [1] `Wikipedia entry for the Receiver operating characteristic\n <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_\n\n .. [2] `Analyzing a portion of the ROC curve. McClish, 1989\n <https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_\n\n .. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving\n probability estimation trees (Section 6.2), CeDER Working Paper\n #IS-00-04, Stern School of Business, New York University.\n\n .. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern\n Recognition Letters, 27(8), 861-874.\n <https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_\n\n .. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area\n Under the ROC Curve for Multiple Class Classification Problems.\n Machine Learning, 45(2), 171-186.\n <http://link.springer.com/article/10.1023/A:1010920819831>`_\n\n See Also\n --------\n average_precision_score : Area under the precision-recall curve.\n roc_curve : Compute Receiver operating characteristic (ROC) curve.\n plot_roc_curve : Plot Receiver operating characteristic (ROC) curve.\n\n Examples\n --------\n Binary case:\n\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.metrics import roc_auc_score\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = LogisticRegression(solver=\"liblinear\", random_state=0).fit(X, y)\n >>> roc_auc_score(y, clf.predict_proba(X)[:, 1])\n 0.99...\n >>> roc_auc_score(y, clf.decision_function(X))\n 0.99...\n\n Multiclass case:\n\n >>> from sklearn.datasets import load_iris\n >>> X, y = load_iris(return_X_y=True)\n >>> clf = LogisticRegression(solver=\"liblinear\").fit(X, y)\n >>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')\n 0.99...\n\n Multilabel case:\n\n >>> from sklearn.datasets import make_multilabel_classification\n >>> from sklearn.multioutput import MultiOutputClassifier\n >>> X, y = make_multilabel_classification(random_state=0)\n >>> clf = MultiOutputClassifier(clf).fit(X, y)\n >>> # get a list of n_output containing probability arrays of shape\n >>> # (n_samples, n_classes)\n >>> y_pred = clf.predict_proba(X)\n >>> # extract the positive columns for each output\n >>> y_pred = np.transpose([pred[:, 1] for pred in y_pred])\n >>> roc_auc_score(y, y_pred, average=None)\n array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...])\n >>> from sklearn.linear_model import RidgeClassifierCV\n >>> clf = RidgeClassifierCV().fit(X, y)\n >>> roc_auc_score(y, clf.decision_function(X), average=None)\n array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...])\n \"\"\"\n\n y_type = type_of_target(y_true)\n y_true = check_array(y_true, ensure_2d=False, dtype=None)\n y_score = check_array(y_score, ensure_2d=False)\n\n if y_type == \"multiclass\" or (y_type == \"binary\" and\n y_score.ndim == 2 and\n y_score.shape[1] > 2):\n # do not support partial ROC computation for multiclass\n if max_fpr is not None and max_fpr != 1.:\n raise ValueError(\"Partial AUC computation not available in \"\n \"multiclass setting, 'max_fpr' must be\"\n \" set to `None`, received `max_fpr={0}` \"\n \"instead\".format(max_fpr))\n if multi_class == 'raise':\n raise ValueError(\"multi_class must be in ('ovo', 'ovr')\")\n return _multiclass_roc_auc_score(y_true, y_score, labels,\n multi_class, average, sample_weight)\n elif y_type == \"binary\":\n labels = np.unique(y_true)\n y_true = label_binarize(y_true, classes=labels)[:, 0]\n return _average_binary_score(partial(_binary_roc_auc_score,\n max_fpr=max_fpr),\n y_true, y_score, average,\n sample_weight=sample_weight)\n else: # multilabel-indicator\n return _average_binary_score(partial(_binary_roc_auc_score,\n max_fpr=max_fpr),\n y_true, y_score, average,\n sample_weight=sample_weight)\n\n\ndef _multiclass_roc_auc_score(y_true, y_score, labels,\n multi_class, average, sample_weight):\n \"\"\"Multiclass roc auc score.\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True multiclass labels.\n\n y_score : array-like of shape (n_samples, n_classes)\n Target scores corresponding to probability estimates of a sample\n belonging to a particular class\n\n labels : array-like of shape (n_classes,) or None\n List of labels to index ``y_score`` used for multiclass. If ``None``,\n the lexical order of ``y_true`` is used to index ``y_score``.\n\n multi_class : {'ovr', 'ovo'}\n Determines the type of multiclass configuration to use.\n ``'ovr'``:\n Calculate metrics for the multiclass case using the one-vs-rest\n approach.\n ``'ovo'``:\n Calculate metrics for the multiclass case using the one-vs-one\n approach.\n\n average : {'macro', 'weighted'}\n Determines the type of averaging performed on the pairwise binary\n metric scores\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account. Classes\n are assumed to be uniformly distributed.\n ``'weighted'``:\n Calculate metrics for each label, taking into account the\n prevalence of the classes.\n\n sample_weight : array-like of shape (n_samples,) or None\n Sample weights.\n\n \"\"\"\n # validation of the input y_score\n if not np.allclose(1, y_score.sum(axis=1)):\n raise ValueError(\n \"Target scores need to be probabilities for multiclass \"\n \"roc_auc, i.e. they should sum up to 1.0 over classes\")\n\n # validation for multiclass parameter specifications\n average_options = (\"macro\", \"weighted\")\n if average not in average_options:\n raise ValueError(\"average must be one of {0} for \"\n \"multiclass problems\".format(average_options))\n\n multiclass_options = (\"ovo\", \"ovr\")\n if multi_class not in multiclass_options:\n raise ValueError(\"multi_class='{0}' is not supported \"\n \"for multiclass ROC AUC, multi_class must be \"\n \"in {1}\".format(\n multi_class, multiclass_options))\n\n if labels is not None:\n labels = column_or_1d(labels)\n classes = _unique(labels)\n if len(classes) != len(labels):\n raise ValueError(\"Parameter 'labels' must be unique\")\n if not np.array_equal(classes, labels):\n raise ValueError(\"Parameter 'labels' must be ordered\")\n if len(classes) != y_score.shape[1]:\n raise ValueError(\n \"Number of given labels, {0}, not equal to the number \"\n \"of columns in 'y_score', {1}\".format(\n len(classes), y_score.shape[1]))\n if len(np.setdiff1d(y_true, classes)):\n raise ValueError(\n \"'y_true' contains labels not in parameter 'labels'\")\n else:\n classes = _unique(y_true)\n if len(classes) != y_score.shape[1]:\n raise ValueError(\n \"Number of classes in y_true not equal to the number of \"\n \"columns in 'y_score'\")\n\n if multi_class == \"ovo\":\n if sample_weight is not None:\n raise ValueError(\"sample_weight is not supported \"\n \"for multiclass one-vs-one ROC AUC, \"\n \"'sample_weight' must be None in this case.\")\n y_true_encoded = _encode(y_true, uniques=classes)\n # Hand & Till (2001) implementation (ovo)\n return _average_multiclass_ovo_score(_binary_roc_auc_score,\n y_true_encoded,\n y_score, average=average)\n else:\n # ovr is same as multi-label\n y_true_multilabel = label_binarize(y_true, classes=classes)\n return _average_binary_score(_binary_roc_auc_score, y_true_multilabel,\n y_score, average,\n sample_weight=sample_weight)\n\n\ndef _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):\n \"\"\"Calculate true and false positives per binary classification threshold.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples,)\n True targets of binary classification.\n\n y_score : ndarray of shape (n_samples,)\n Estimated probabilities or output of a decision function.\n\n pos_label : int or str, default=None\n The label of the positive class.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n fps : ndarray of shape (n_thresholds,)\n A count of false positives, at index i being the number of negative\n samples assigned a score >= thresholds[i]. The total number of\n negative samples is equal to fps[-1] (thus true negatives are given by\n fps[-1] - fps).\n\n tps : ndarray of shape (n_thresholds,)\n An increasing count of true positives, at index i being the number\n of positive samples assigned a score >= thresholds[i]. The total\n number of positive samples is equal to tps[-1] (thus false negatives\n are given by tps[-1] - tps).\n\n thresholds : ndarray of shape (n_thresholds,)\n Decreasing score values.\n \"\"\"\n # Check to make sure y_true is valid\n y_type = type_of_target(y_true)\n if not (y_type == \"binary\" or\n (y_type == \"multiclass\" and pos_label is not None)):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n check_consistent_length(y_true, y_score, sample_weight)\n y_true = column_or_1d(y_true)\n y_score = column_or_1d(y_score)\n assert_all_finite(y_true)\n assert_all_finite(y_score)\n\n if sample_weight is not None:\n sample_weight = column_or_1d(sample_weight)\n\n pos_label = _check_pos_label_consistency(pos_label, y_true)\n\n # make y_true a boolean vector\n y_true = (y_true == pos_label)\n\n # sort scores and corresponding truth values\n desc_score_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_score = y_score[desc_score_indices]\n y_true = y_true[desc_score_indices]\n if sample_weight is not None:\n weight = sample_weight[desc_score_indices]\n else:\n weight = 1.\n\n # y_score typically has many tied values. Here we extract\n # the indices associated with the distinct values. We also\n # concatenate a value for the end of the curve.\n distinct_value_indices = np.where(np.diff(y_score))[0]\n threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]\n\n # accumulate the true positives with decreasing threshold\n tps = stable_cumsum(y_true * weight)[threshold_idxs]\n if sample_weight is not None:\n # express fps as a cumsum to ensure fps is increasing even in\n # the presence of floating point errors\n fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]\n else:\n fps = 1 + threshold_idxs - tps\n return fps, tps, y_score[threshold_idxs]\n\n\n@_deprecate_positional_args\ndef precision_recall_curve(y_true, probas_pred, *, pos_label=None,\n sample_weight=None):\n \"\"\"Compute precision-recall pairs for different probability thresholds.\n\n Note: this implementation is restricted to the binary classification task.\n\n The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of\n true positives and ``fp`` the number of false positives. The precision is\n intuitively the ability of the classifier not to label as positive a sample\n that is negative.\n\n The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of\n true positives and ``fn`` the number of false negatives. The recall is\n intuitively the ability of the classifier to find all the positive samples.\n\n The last precision and recall values are 1. and 0. respectively and do not\n have a corresponding threshold. This ensures that the graph starts on the\n y axis.\n\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples,)\n True binary labels. If labels are not either {-1, 1} or {0, 1}, then\n pos_label should be explicitly given.\n\n probas_pred : ndarray of shape (n_samples,)\n Estimated probabilities or output of a decision function.\n\n pos_label : int or str, default=None\n The label of the positive class.\n When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1},\n ``pos_label`` is set to 1, otherwise an error will be raised.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n precision : ndarray of shape (n_thresholds + 1,)\n Precision values such that element i is the precision of\n predictions with score >= thresholds[i] and the last element is 1.\n\n recall : ndarray of shape (n_thresholds + 1,)\n Decreasing recall values such that element i is the recall of\n predictions with score >= thresholds[i] and the last element is 0.\n\n thresholds : ndarray of shape (n_thresholds,)\n Increasing thresholds on the decision function used to compute\n precision and recall. n_thresholds <= len(np.unique(probas_pred)).\n\n See Also\n --------\n plot_precision_recall_curve : Plot Precision Recall Curve for binary\n classifiers.\n PrecisionRecallDisplay : Precision Recall visualization.\n average_precision_score : Compute average precision from prediction scores.\n det_curve: Compute error rates for different probability thresholds.\n roc_curve : Compute Receiver operating characteristic (ROC) curve.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import precision_recall_curve\n >>> y_true = np.array([0, 0, 1, 1])\n >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> precision, recall, thresholds = precision_recall_curve(\n ... y_true, y_scores)\n >>> precision\n array([0.66666667, 0.5 , 1. , 1. ])\n >>> recall\n array([1. , 0.5, 0.5, 0. ])\n >>> thresholds\n array([0.35, 0.4 , 0.8 ])\n\n \"\"\"\n fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,\n pos_label=pos_label,\n sample_weight=sample_weight)\n\n precision = tps / (tps + fps)\n precision[np.isnan(precision)] = 0\n recall = tps / tps[-1]\n\n # stop when full recall attained\n # and reverse the outputs so recall is decreasing\n last_ind = tps.searchsorted(tps[-1])\n sl = slice(last_ind, None, -1)\n return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]\n\n\n@_deprecate_positional_args\ndef roc_curve(y_true, y_score, *, pos_label=None, sample_weight=None,\n drop_intermediate=True):\n \"\"\"Compute Receiver operating characteristic (ROC).\n\n Note: this implementation is restricted to the binary classification task.\n\n Read more in the :ref:`User Guide <roc_metrics>`.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples,)\n True binary labels. If labels are not either {-1, 1} or {0, 1}, then\n pos_label should be explicitly given.\n\n y_score : ndarray of shape (n_samples,)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n pos_label : int or str, default=None\n The label of the positive class.\n When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},\n ``pos_label`` is set to 1, otherwise an error will be raised.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n drop_intermediate : bool, default=True\n Whether to drop some suboptimal thresholds which would not appear\n on a plotted ROC curve. This is useful in order to create lighter\n ROC curves.\n\n .. versionadded:: 0.17\n parameter *drop_intermediate*.\n\n Returns\n -------\n fpr : ndarray of shape (>2,)\n Increasing false positive rates such that element i is the false\n positive rate of predictions with score >= `thresholds[i]`.\n\n tpr : ndarray of shape (>2,)\n Increasing true positive rates such that element `i` is the true\n positive rate of predictions with score >= `thresholds[i]`.\n\n thresholds : ndarray of shape = (n_thresholds,)\n Decreasing thresholds on the decision function used to compute\n fpr and tpr. `thresholds[0]` represents no instances being predicted\n and is arbitrarily set to `max(y_score) + 1`.\n\n See Also\n --------\n plot_roc_curve : Plot Receiver operating characteristic (ROC) curve.\n RocCurveDisplay : ROC Curve visualization.\n det_curve: Compute error rates for different probability thresholds.\n roc_auc_score : Compute the area under the ROC curve.\n\n Notes\n -----\n Since the thresholds are sorted from low to high values, they\n are reversed upon returning them to ensure they correspond to both ``fpr``\n and ``tpr``, which are sorted in reversed order during their calculation.\n\n References\n ----------\n .. [1] `Wikipedia entry for the Receiver operating characteristic\n <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_\n\n .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition\n Letters, 2006, 27(8):861-874.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)\n >>> fpr\n array([0. , 0. , 0.5, 0.5, 1. ])\n >>> tpr\n array([0. , 0.5, 0.5, 1. , 1. ])\n >>> thresholds\n array([1.8 , 0.8 , 0.4 , 0.35, 0.1 ])\n\n \"\"\"\n fps, tps, thresholds = _binary_clf_curve(\n y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)\n\n # Attempt to drop thresholds corresponding to points in between and\n # collinear with other points. These are always suboptimal and do not\n # appear on a plotted ROC curve (and thus do not affect the AUC).\n # Here np.diff(_, 2) is used as a \"second derivative\" to tell if there\n # is a corner at the point. Both fps and tps must be tested to handle\n # thresholds with multiple data points (which are combined in\n # _binary_clf_curve). This keeps all cases where the point should be kept,\n # but does not drop more complicated cases like fps = [1, 3, 7],\n # tps = [1, 2, 4]; there is no harm in keeping too many thresholds.\n if drop_intermediate and len(fps) > 2:\n optimal_idxs = np.where(np.r_[True,\n np.logical_or(np.diff(fps, 2),\n np.diff(tps, 2)),\n True])[0]\n fps = fps[optimal_idxs]\n tps = tps[optimal_idxs]\n thresholds = thresholds[optimal_idxs]\n\n # Add an extra threshold position\n # to make sure that the curve starts at (0, 0)\n tps = np.r_[0, tps]\n fps = np.r_[0, fps]\n thresholds = np.r_[thresholds[0] + 1, thresholds]\n\n if fps[-1] <= 0:\n warnings.warn(\"No negative samples in y_true, \"\n \"false positive value should be meaningless\",\n UndefinedMetricWarning)\n fpr = np.repeat(np.nan, fps.shape)\n else:\n fpr = fps / fps[-1]\n\n if tps[-1] <= 0:\n warnings.warn(\"No positive samples in y_true, \"\n \"true positive value should be meaningless\",\n UndefinedMetricWarning)\n tpr = np.repeat(np.nan, tps.shape)\n else:\n tpr = tps / tps[-1]\n\n return fpr, tpr, thresholds\n\n\n@_deprecate_positional_args\ndef label_ranking_average_precision_score(y_true, y_score, *,\n sample_weight=None):\n \"\"\"Compute ranking-based average precision.\n\n Label ranking average precision (LRAP) is the average over each ground\n truth label assigned to each sample, of the ratio of true vs. total\n labels with lower score.\n\n This metric is used in multilabel ranking problem, where the goal\n is to give better rank to the labels associated to each sample.\n\n The obtained score is always strictly greater than 0 and\n the best value is 1.\n\n Read more in the :ref:`User Guide <label_ranking_average_precision>`.\n\n Parameters\n ----------\n y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels)\n True binary labels in binary indicator format.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n score : float\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import label_ranking_average_precision_score\n >>> y_true = np.array([[1, 0, 0], [0, 0, 1]])\n >>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])\n >>> label_ranking_average_precision_score(y_true, y_score)\n 0.416...\n\n \"\"\"\n check_consistent_length(y_true, y_score, sample_weight)\n y_true = check_array(y_true, ensure_2d=False)\n y_score = check_array(y_score, ensure_2d=False)\n\n if y_true.shape != y_score.shape:\n raise ValueError(\"y_true and y_score have different shape\")\n\n # Handle badly formatted array and the degenerate case with one label\n y_type = type_of_target(y_true)\n if (y_type != \"multilabel-indicator\" and\n not (y_type == \"binary\" and y_true.ndim == 2)):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n y_true = csr_matrix(y_true)\n y_score = -y_score\n\n n_samples, n_labels = y_true.shape\n\n out = 0.\n for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):\n relevant = y_true.indices[start:stop]\n\n if (relevant.size == 0 or relevant.size == n_labels):\n # If all labels are relevant or unrelevant, the score is also\n # equal to 1. The label ranking has no meaning.\n aux = 1.\n else:\n scores_i = y_score[i]\n rank = rankdata(scores_i, 'max')[relevant]\n L = rankdata(scores_i[relevant], 'max')\n aux = (L / rank).mean()\n\n if sample_weight is not None:\n aux = aux * sample_weight[i]\n out += aux\n\n if sample_weight is None:\n out /= n_samples\n else:\n out /= np.sum(sample_weight)\n\n return out\n\n\n@_deprecate_positional_args\ndef coverage_error(y_true, y_score, *, sample_weight=None):\n \"\"\"Coverage error measure.\n\n Compute how far we need to go through the ranked scores to cover all\n true labels. The best value is equal to the average number\n of labels in ``y_true`` per sample.\n\n Ties in ``y_scores`` are broken by giving maximal rank that would have\n been assigned to all tied values.\n\n Note: Our implementation's score is 1 greater than the one given in\n Tsoumakas et al., 2010. This extends it to handle the degenerate case\n in which an instance has 0 true labels.\n\n Read more in the :ref:`User Guide <coverage_error>`.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True binary labels in binary indicator format.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n coverage_error : float\n\n References\n ----------\n .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).\n Mining multi-label data. In Data mining and knowledge discovery\n handbook (pp. 667-685). Springer US.\n\n \"\"\"\n y_true = check_array(y_true, ensure_2d=False)\n y_score = check_array(y_score, ensure_2d=False)\n check_consistent_length(y_true, y_score, sample_weight)\n\n y_type = type_of_target(y_true)\n if y_type != \"multilabel-indicator\":\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_true.shape != y_score.shape:\n raise ValueError(\"y_true and y_score have different shape\")\n\n y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))\n y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))\n coverage = (y_score >= y_min_relevant).sum(axis=1)\n coverage = coverage.filled(0)\n\n return np.average(coverage, weights=sample_weight)\n\n\n@_deprecate_positional_args\ndef label_ranking_loss(y_true, y_score, *, sample_weight=None):\n \"\"\"Compute Ranking loss measure.\n\n Compute the average number of label pairs that are incorrectly ordered\n given y_score weighted by the size of the label set and the number of\n labels not in the label set.\n\n This is similar to the error set size, but weighted by the number of\n relevant and irrelevant labels. The best performance is achieved with\n a ranking loss of zero.\n\n Read more in the :ref:`User Guide <label_ranking_loss>`.\n\n .. versionadded:: 0.17\n A function *label_ranking_loss*\n\n Parameters\n ----------\n y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels)\n True binary labels in binary indicator format.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n loss : float\n\n References\n ----------\n .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).\n Mining multi-label data. In Data mining and knowledge discovery\n handbook (pp. 667-685). Springer US.\n \"\"\"\n y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')\n y_score = check_array(y_score, ensure_2d=False)\n check_consistent_length(y_true, y_score, sample_weight)\n\n y_type = type_of_target(y_true)\n if y_type not in (\"multilabel-indicator\",):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_true.shape != y_score.shape:\n raise ValueError(\"y_true and y_score have different shape\")\n\n n_samples, n_labels = y_true.shape\n\n y_true = csr_matrix(y_true)\n\n loss = np.zeros(n_samples)\n for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):\n # Sort and bin the label scores\n unique_scores, unique_inverse = np.unique(y_score[i],\n return_inverse=True)\n true_at_reversed_rank = np.bincount(\n unique_inverse[y_true.indices[start:stop]],\n minlength=len(unique_scores))\n all_at_reversed_rank = np.bincount(unique_inverse,\n minlength=len(unique_scores))\n false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank\n\n # if the scores are ordered, it's possible to count the number of\n # incorrectly ordered paires in linear time by cumulatively counting\n # how many false labels of a given score have a score higher than the\n # accumulated true labels with lower score.\n loss[i] = np.dot(true_at_reversed_rank.cumsum(),\n false_at_reversed_rank)\n\n n_positives = count_nonzero(y_true, axis=1)\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n loss /= ((n_labels - n_positives) * n_positives)\n\n # When there is no positive or no negative labels, those values should\n # be consider as correct, i.e. the ranking doesn't matter.\n loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.\n\n return np.average(loss, weights=sample_weight)\n\n\ndef _dcg_sample_scores(y_true, y_score, k=None,\n log_base=2, ignore_ties=False):\n \"\"\"Compute Discounted Cumulative Gain.\n\n Sum the true scores ranked in the order induced by the predicted scores,\n after applying a logarithmic discount.\n\n This ranking metric yields a high value if true labels are ranked high by\n ``y_score``.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True targets of multilabel classification, or true scores of entities\n to be ranked.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates, confidence values,\n or non-thresholded measure of decisions (as returned by\n \"decision_function\" on some classifiers).\n\n k : int, default=None\n Only consider the highest k scores in the ranking. If None, use all\n outputs.\n\n log_base : float, default=2\n Base of the logarithm used for the discount. A low value means a\n sharper discount (top results are more important).\n\n ignore_ties : bool, default=False\n Assume that there are no ties in y_score (which is likely to be the\n case if y_score is continuous) for efficiency gains.\n\n Returns\n -------\n discounted_cumulative_gain : ndarray of shape (n_samples,)\n The DCG score for each sample.\n\n See Also\n --------\n ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted\n Cumulative Gain (the DCG obtained for a perfect ranking), in order to\n have a score between 0 and 1.\n \"\"\"\n discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))\n if k is not None:\n discount[k:] = 0\n if ignore_ties:\n ranking = np.argsort(y_score)[:, ::-1]\n ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]\n cumulative_gains = discount.dot(ranked.T)\n else:\n discount_cumsum = np.cumsum(discount)\n cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum)\n for y_t, y_s in zip(y_true, y_score)]\n cumulative_gains = np.asarray(cumulative_gains)\n return cumulative_gains\n\n\ndef _tie_averaged_dcg(y_true, y_score, discount_cumsum):\n \"\"\"\n Compute DCG by averaging over possible permutations of ties.\n\n The gain (`y_true`) of an index falling inside a tied group (in the order\n induced by `y_score`) is replaced by the average gain within this group.\n The discounted gain for a tied group is then the average `y_true` within\n this group times the sum of discounts of the corresponding ranks.\n\n This amounts to averaging scores for all possible orderings of the tied\n groups.\n\n (note in the case of dcg@k the discount is 0 after index k)\n\n Parameters\n ----------\n y_true : ndarray\n The true relevance scores.\n\n y_score : ndarray\n Predicted scores.\n\n discount_cumsum : ndarray\n Precomputed cumulative sum of the discounts.\n\n Returns\n -------\n discounted_cumulative_gain : float\n The discounted cumulative gain.\n\n References\n ----------\n McSherry, F., & Najork, M. (2008, March). Computing information retrieval\n performance measures efficiently in the presence of tied scores. In\n European conference on information retrieval (pp. 414-421). Springer,\n Berlin, Heidelberg.\n \"\"\"\n _, inv, counts = np.unique(\n - y_score, return_inverse=True, return_counts=True)\n ranked = np.zeros(len(counts))\n np.add.at(ranked, inv, y_true)\n ranked /= counts\n groups = np.cumsum(counts) - 1\n discount_sums = np.empty(len(counts))\n discount_sums[0] = discount_cumsum[groups[0]]\n discount_sums[1:] = np.diff(discount_cumsum[groups])\n return (ranked * discount_sums).sum()\n\n\ndef _check_dcg_target_type(y_true):\n y_type = type_of_target(y_true)\n supported_fmt = (\"multilabel-indicator\", \"continuous-multioutput\",\n \"multiclass-multioutput\")\n if y_type not in supported_fmt:\n raise ValueError(\n \"Only {} formats are supported. Got {} instead\".format(\n supported_fmt, y_type))\n\n\n@_deprecate_positional_args\ndef dcg_score(y_true, y_score, *, k=None,\n log_base=2, sample_weight=None, ignore_ties=False):\n \"\"\"Compute Discounted Cumulative Gain.\n\n Sum the true scores ranked in the order induced by the predicted scores,\n after applying a logarithmic discount.\n\n This ranking metric yields a high value if true labels are ranked high by\n ``y_score``.\n\n Usually the Normalized Discounted Cumulative Gain (NDCG, computed by\n ndcg_score) is preferred.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True targets of multilabel classification, or true scores of entities\n to be ranked.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates, confidence values,\n or non-thresholded measure of decisions (as returned by\n \"decision_function\" on some classifiers).\n\n k : int, default=None\n Only consider the highest k scores in the ranking. If None, use all\n outputs.\n\n log_base : float, default=2\n Base of the logarithm used for the discount. A low value means a\n sharper discount (top results are more important).\n\n sample_weight : ndarray of shape (n_samples,), default=None\n Sample weights. If None, all samples are given the same weight.\n\n ignore_ties : bool, default=False\n Assume that there are no ties in y_score (which is likely to be the\n case if y_score is continuous) for efficiency gains.\n\n Returns\n -------\n discounted_cumulative_gain : float\n The averaged sample DCG scores.\n\n See Also\n --------\n ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted\n Cumulative Gain (the DCG obtained for a perfect ranking), in order to\n have a score between 0 and 1.\n\n References\n ----------\n `Wikipedia entry for Discounted Cumulative Gain\n <https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_.\n\n Jarvelin, K., & Kekalainen, J. (2002).\n Cumulated gain-based evaluation of IR techniques. ACM Transactions on\n Information Systems (TOIS), 20(4), 422-446.\n\n Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).\n A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th\n Annual Conference on Learning Theory (COLT 2013).\n\n McSherry, F., & Najork, M. (2008, March). Computing information retrieval\n performance measures efficiently in the presence of tied scores. In\n European conference on information retrieval (pp. 414-421). Springer,\n Berlin, Heidelberg.\n\n Examples\n --------\n >>> from sklearn.metrics import dcg_score\n >>> # we have groud-truth relevance of some answers to a query:\n >>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])\n >>> # we predict scores for the answers\n >>> scores = np.asarray([[.1, .2, .3, 4, 70]])\n >>> dcg_score(true_relevance, scores)\n 9.49...\n >>> # we can set k to truncate the sum; only top k answers contribute\n >>> dcg_score(true_relevance, scores, k=2)\n 5.63...\n >>> # now we have some ties in our prediction\n >>> scores = np.asarray([[1, 0, 0, 0, 1]])\n >>> # by default ties are averaged, so here we get the average true\n >>> # relevance of our top predictions: (10 + 5) / 2 = 7.5\n >>> dcg_score(true_relevance, scores, k=1)\n 7.5\n >>> # we can choose to ignore ties for faster results, but only\n >>> # if we know there aren't ties in our scores, otherwise we get\n >>> # wrong results:\n >>> dcg_score(true_relevance,\n ... scores, k=1, ignore_ties=True)\n 5.0\n\n \"\"\"\n y_true = check_array(y_true, ensure_2d=False)\n y_score = check_array(y_score, ensure_2d=False)\n check_consistent_length(y_true, y_score, sample_weight)\n _check_dcg_target_type(y_true)\n return np.average(\n _dcg_sample_scores(\n y_true, y_score, k=k, log_base=log_base,\n ignore_ties=ignore_ties),\n weights=sample_weight)\n\n\ndef _ndcg_sample_scores(y_true, y_score, k=None, ignore_ties=False):\n \"\"\"Compute Normalized Discounted Cumulative Gain.\n\n Sum the true scores ranked in the order induced by the predicted scores,\n after applying a logarithmic discount. Then divide by the best possible\n score (Ideal DCG, obtained for a perfect ranking) to obtain a score between\n 0 and 1.\n\n This ranking metric yields a high value if true labels are ranked high by\n ``y_score``.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True targets of multilabel classification, or true scores of entities\n to be ranked.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates, confidence values,\n or non-thresholded measure of decisions (as returned by\n \"decision_function\" on some classifiers).\n\n k : int, default=None\n Only consider the highest k scores in the ranking. If None, use all\n outputs.\n\n ignore_ties : bool, default=False\n Assume that there are no ties in y_score (which is likely to be the\n case if y_score is continuous) for efficiency gains.\n\n Returns\n -------\n normalized_discounted_cumulative_gain : ndarray of shape (n_samples,)\n The NDCG score for each sample (float in [0., 1.]).\n\n See Also\n --------\n dcg_score : Discounted Cumulative Gain (not normalized).\n\n \"\"\"\n gain = _dcg_sample_scores(y_true, y_score, k, ignore_ties=ignore_ties)\n # Here we use the order induced by y_true so we can ignore ties since\n # the gain associated to tied indices is the same (permuting ties doesn't\n # change the value of the re-ordered y_true)\n normalizing_gain = _dcg_sample_scores(y_true, y_true, k, ignore_ties=True)\n all_irrelevant = normalizing_gain == 0\n gain[all_irrelevant] = 0\n gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]\n return gain\n\n\n@_deprecate_positional_args\ndef ndcg_score(y_true, y_score, *, k=None, sample_weight=None,\n ignore_ties=False):\n \"\"\"Compute Normalized Discounted Cumulative Gain.\n\n Sum the true scores ranked in the order induced by the predicted scores,\n after applying a logarithmic discount. Then divide by the best possible\n score (Ideal DCG, obtained for a perfect ranking) to obtain a score between\n 0 and 1.\n\n This ranking metric yields a high value if true labels are ranked high by\n ``y_score``.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True targets of multilabel classification, or true scores of entities\n to be ranked.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates, confidence values,\n or non-thresholded measure of decisions (as returned by\n \"decision_function\" on some classifiers).\n\n k : int, default=None\n Only consider the highest k scores in the ranking. If None, use all\n outputs.\n\n sample_weight : ndarray of shape (n_samples,), default=None\n Sample weights. If None, all samples are given the same weight.\n\n ignore_ties : bool, default=False\n Assume that there are no ties in y_score (which is likely to be the\n case if y_score is continuous) for efficiency gains.\n\n Returns\n -------\n normalized_discounted_cumulative_gain : float in [0., 1.]\n The averaged NDCG scores for all samples.\n\n See Also\n --------\n dcg_score : Discounted Cumulative Gain (not normalized).\n\n References\n ----------\n `Wikipedia entry for Discounted Cumulative Gain\n <https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_\n\n Jarvelin, K., & Kekalainen, J. (2002).\n Cumulated gain-based evaluation of IR techniques. ACM Transactions on\n Information Systems (TOIS), 20(4), 422-446.\n\n Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).\n A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th\n Annual Conference on Learning Theory (COLT 2013)\n\n McSherry, F., & Najork, M. (2008, March). Computing information retrieval\n performance measures efficiently in the presence of tied scores. In\n European conference on information retrieval (pp. 414-421). Springer,\n Berlin, Heidelberg.\n\n Examples\n --------\n >>> from sklearn.metrics import ndcg_score\n >>> # we have groud-truth relevance of some answers to a query:\n >>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])\n >>> # we predict some scores (relevance) for the answers\n >>> scores = np.asarray([[.1, .2, .3, 4, 70]])\n >>> ndcg_score(true_relevance, scores)\n 0.69...\n >>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])\n >>> ndcg_score(true_relevance, scores)\n 0.49...\n >>> # we can set k to truncate the sum; only top k answers contribute.\n >>> ndcg_score(true_relevance, scores, k=4)\n 0.35...\n >>> # the normalization takes k into account so a perfect answer\n >>> # would still get 1.0\n >>> ndcg_score(true_relevance, true_relevance, k=4)\n 1.0\n >>> # now we have some ties in our prediction\n >>> scores = np.asarray([[1, 0, 0, 0, 1]])\n >>> # by default ties are averaged, so here we get the average (normalized)\n >>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75\n >>> ndcg_score(true_relevance, scores, k=1)\n 0.75\n >>> # we can choose to ignore ties for faster results, but only\n >>> # if we know there aren't ties in our scores, otherwise we get\n >>> # wrong results:\n >>> ndcg_score(true_relevance,\n ... scores, k=1, ignore_ties=True)\n 0.5\n\n \"\"\"\n y_true = check_array(y_true, ensure_2d=False)\n y_score = check_array(y_score, ensure_2d=False)\n check_consistent_length(y_true, y_score, sample_weight)\n _check_dcg_target_type(y_true)\n gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties)\n return np.average(gain, weights=sample_weight)\n\n\ndef top_k_accuracy_score(y_true, y_score, *, k=2, normalize=True,\n sample_weight=None, labels=None):\n \"\"\"Top-k Accuracy classification score.\n\n This metric computes the number of times where the correct label is among\n the top `k` labels predicted (ranked by predicted scores). Note that the\n multilabel case isn't covered here.\n\n Read more in the :ref:`User Guide <top_k_accuracy_score>`\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True labels.\n\n y_score : array-like of shape (n_samples,) or (n_samples, n_classes)\n Target scores. These can be either probability estimates or\n non-thresholded decision values (as returned by\n :term:`decision_function` on some classifiers). The binary case expects\n scores with shape (n_samples,) while the multiclass case expects scores\n with shape (n_samples, n_classes). In the nulticlass case, the order of\n the class scores must correspond to the order of ``labels``, if\n provided, or else to the numerical or lexicographical order of the\n labels in ``y_true``.\n\n k : int, default=2\n Number of most likely outcomes considered to find the correct label.\n\n normalize : bool, default=True\n If `True`, return the fraction of correctly classified samples.\n Otherwise, return the number of correctly classified samples.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If `None`, all samples are given the same weight.\n\n labels : array-like of shape (n_classes,), default=None\n Multiclass only. List of labels that index the classes in ``y_score``.\n If ``None``, the numerical or lexicographical order of the labels in\n ``y_true`` is used.\n\n Returns\n -------\n score : float\n The top-k accuracy score. The best performance is 1 with\n `normalize == True` and the number of samples with\n `normalize == False`.\n\n See also\n --------\n accuracy_score\n\n Notes\n -----\n In cases where two or more labels are assigned equal predicted scores,\n the labels with the highest indices will be chosen first. This might\n impact the result if the correct label falls after the threshold because\n of that.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import top_k_accuracy_score\n >>> y_true = np.array([0, 1, 2, 2])\n >>> y_score = np.array([[0.5, 0.2, 0.2], # 0 is in top 2\n ... [0.3, 0.4, 0.2], # 1 is in top 2\n ... [0.2, 0.4, 0.3], # 2 is in top 2\n ... [0.7, 0.2, 0.1]]) # 2 isn't in top 2\n >>> top_k_accuracy_score(y_true, y_score, k=2)\n 0.75\n >>> # Not normalizing gives the number of \"correctly\" classified samples\n >>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)\n 3\n\n \"\"\"\n y_true = check_array(y_true, ensure_2d=False, dtype=None)\n y_true = column_or_1d(y_true)\n y_type = type_of_target(y_true)\n y_score = check_array(y_score, ensure_2d=False)\n y_score = column_or_1d(y_score) if y_type == 'binary' else y_score\n check_consistent_length(y_true, y_score, sample_weight)\n\n if y_type not in {'binary', 'multiclass'}:\n raise ValueError(\n f\"y type must be 'binary' or 'multiclass', got '{y_type}' instead.\"\n )\n\n y_score_n_classes = y_score.shape[1] if y_score.ndim == 2 else 2\n\n if labels is None:\n classes = _unique(y_true)\n n_classes = len(classes)\n\n if n_classes != y_score_n_classes:\n raise ValueError(\n f\"Number of classes in 'y_true' ({n_classes}) not equal \"\n f\"to the number of classes in 'y_score' ({y_score_n_classes}).\"\n )\n else:\n labels = column_or_1d(labels)\n classes = _unique(labels)\n n_labels = len(labels)\n n_classes = len(classes)\n\n if n_classes != n_labels:\n raise ValueError(\"Parameter 'labels' must be unique.\")\n\n if not np.array_equal(classes, labels):\n raise ValueError(\"Parameter 'labels' must be ordered.\")\n\n if n_classes != y_score_n_classes:\n raise ValueError(\n f\"Number of given labels ({n_classes}) not equal to the \"\n f\"number of classes in 'y_score' ({y_score_n_classes}).\"\n )\n\n if len(np.setdiff1d(y_true, classes)):\n raise ValueError(\n \"'y_true' contains labels not in parameter 'labels'.\"\n )\n\n if k >= n_classes:\n warnings.warn(\n f\"'k' ({k}) greater than or equal to 'n_classes' ({n_classes}) \"\n \"will result in a perfect score and is therefore meaningless.\",\n UndefinedMetricWarning\n )\n\n y_true_encoded = _encode(y_true, uniques=classes)\n\n if y_type == 'binary':\n if k == 1:\n threshold = .5 if y_score.min() >= 0 and y_score.max() <= 1 else 0\n y_pred = (y_score > threshold).astype(np.int64)\n hits = y_pred == y_true_encoded\n else:\n hits = np.ones_like(y_score, dtype=np.bool_)\n elif y_type == 'multiclass':\n sorted_pred = np.argsort(y_score, axis=1, kind='mergesort')[:, ::-1]\n hits = (y_true_encoded == sorted_pred[:, :k].T).any(axis=0)\n\n if normalize:\n return np.average(hits, weights=sample_weight)\n elif sample_weight is None:\n return np.sum(hits)\n else:\n return np.dot(hits, sample_weight)\n",
"# Authors: Shane Grigsby <[email protected]>\n# Adrin Jalali <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport pytest\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import OPTICS\nfrom sklearn.cluster._optics import _extend_region, _extract_xi_labels\nfrom sklearn.metrics.cluster import contingency_matrix\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.utils import shuffle\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils._testing import assert_allclose\n\nfrom sklearn.cluster.tests.common import generate_clustered_data\n\n\nrng = np.random.RandomState(0)\nn_points_per_cluster = 10\nC1 = [-5, -2] + .8 * rng.randn(n_points_per_cluster, 2)\nC2 = [4, -1] + .1 * rng.randn(n_points_per_cluster, 2)\nC3 = [1, -2] + .2 * rng.randn(n_points_per_cluster, 2)\nC4 = [-2, 3] + .3 * rng.randn(n_points_per_cluster, 2)\nC5 = [3, -2] + 1.6 * rng.randn(n_points_per_cluster, 2)\nC6 = [5, 6] + 2 * rng.randn(n_points_per_cluster, 2)\nX = np.vstack((C1, C2, C3, C4, C5, C6))\n\n\[email protected](\n ('r_plot', 'end'),\n [[[10, 8.9, 8.8, 8.7, 7, 10], 3],\n [[10, 8.9, 8.8, 8.7, 8.6, 7, 10], 0],\n [[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4],\n [[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4],\n ])\ndef test_extend_downward(r_plot, end):\n r_plot = np.array(r_plot)\n ratio = r_plot[:-1] / r_plot[1:]\n steep_downward = ratio >= 1 / .9\n upward = ratio < 1\n\n e = _extend_region(steep_downward, upward, 0, 2)\n assert e == end\n\n\[email protected](\n ('r_plot', 'end'),\n [[[1, 2, 2.1, 2.2, 4, 8, 8, np.inf], 6],\n [[1, 2, 2.1, 2.2, 2.3, 4, 8, 8, np.inf], 0],\n [[1, 2, 2.1, 2, np.inf], 0],\n [[1, 2, 2.1, np.inf], 2],\n ])\ndef test_extend_upward(r_plot, end):\n r_plot = np.array(r_plot)\n ratio = r_plot[:-1] / r_plot[1:]\n steep_upward = ratio <= .9\n downward = ratio > 1\n\n e = _extend_region(steep_upward, downward, 0, 2)\n assert e == end\n\n\[email protected](\n ('ordering', 'clusters', 'expected'),\n [[[0, 1, 2, 3], [[0, 1], [2, 3]], [0, 0, 1, 1]],\n [[0, 1, 2, 3], [[0, 1], [3, 3]], [0, 0, -1, 1]],\n [[0, 1, 2, 3], [[0, 1], [3, 3], [0, 3]], [0, 0, -1, 1]],\n [[3, 1, 2, 0], [[0, 1], [3, 3], [0, 3]], [1, 0, -1, 0]],\n ])\ndef test_the_extract_xi_labels(ordering, clusters, expected):\n labels = _extract_xi_labels(ordering, clusters)\n\n assert_array_equal(labels, expected)\n\n\ndef test_extract_xi():\n # small and easy test (no clusters around other clusters)\n # but with a clear noise data.\n rng = np.random.RandomState(0)\n n_points_per_cluster = 5\n\n C1 = [-5, -2] + .8 * rng.randn(n_points_per_cluster, 2)\n C2 = [4, -1] + .1 * rng.randn(n_points_per_cluster, 2)\n C3 = [1, -2] + .2 * rng.randn(n_points_per_cluster, 2)\n C4 = [-2, 3] + .3 * rng.randn(n_points_per_cluster, 2)\n C5 = [3, -2] + .6 * rng.randn(n_points_per_cluster, 2)\n C6 = [5, 6] + .2 * rng.randn(n_points_per_cluster, 2)\n\n X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]]), C6))\n expected_labels = np.r_[[2] * 5, [0] * 5, [1] * 5, [3] * 5, [1] * 5,\n -1, [4] * 5]\n X, expected_labels = shuffle(X, expected_labels, random_state=rng)\n\n clust = OPTICS(min_samples=3, min_cluster_size=2,\n max_eps=20, cluster_method='xi',\n xi=0.4).fit(X)\n assert_array_equal(clust.labels_, expected_labels)\n\n # check float min_samples and min_cluster_size\n clust = OPTICS(min_samples=0.1, min_cluster_size=0.08,\n max_eps=20, cluster_method='xi',\n xi=0.4).fit(X)\n assert_array_equal(clust.labels_, expected_labels)\n\n X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]] * 2), C6))\n expected_labels = np.r_[[1] * 5, [3] * 5, [2] * 5, [0] * 5, [2] * 5,\n -1, -1, [4] * 5]\n X, expected_labels = shuffle(X, expected_labels, random_state=rng)\n\n clust = OPTICS(min_samples=3, min_cluster_size=3,\n max_eps=20, cluster_method='xi',\n xi=0.3).fit(X)\n # this may fail if the predecessor correction is not at work!\n assert_array_equal(clust.labels_, expected_labels)\n\n C1 = [[0, 0], [0, 0.1], [0, -.1], [0.1, 0]]\n C2 = [[10, 10], [10, 9], [10, 11], [9, 10]]\n C3 = [[100, 100], [100, 90], [100, 110], [90, 100]]\n X = np.vstack((C1, C2, C3))\n expected_labels = np.r_[[0] * 4, [1] * 4, [2] * 4]\n X, expected_labels = shuffle(X, expected_labels, random_state=rng)\n\n clust = OPTICS(min_samples=2, min_cluster_size=2,\n max_eps=np.inf, cluster_method='xi',\n xi=0.04).fit(X)\n assert_array_equal(clust.labels_, expected_labels)\n\n\ndef test_cluster_hierarchy_():\n rng = np.random.RandomState(0)\n n_points_per_cluster = 100\n C1 = [0, 0] + 2 * rng.randn(n_points_per_cluster, 2)\n C2 = [0, 0] + 50 * rng.randn(n_points_per_cluster, 2)\n X = np.vstack((C1, C2))\n X = shuffle(X, random_state=0)\n\n clusters = OPTICS(min_samples=20, xi=.1).fit(X).cluster_hierarchy_\n assert clusters.shape == (2, 2)\n diff = np.sum(clusters - np.array([[0, 99], [0, 199]]))\n assert diff / len(X) < 0.05\n\n\ndef test_correct_number_of_clusters():\n # in 'auto' mode\n\n n_clusters = 3\n X = generate_clustered_data(n_clusters=n_clusters)\n # Parameters chosen specifically for this task.\n # Compute OPTICS\n clust = OPTICS(max_eps=5.0 * 6.0, min_samples=4, xi=.1)\n clust.fit(X)\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(clust.labels_)) - int(-1 in clust.labels_)\n assert n_clusters_1 == n_clusters\n\n # check attribute types and sizes\n assert clust.labels_.shape == (len(X),)\n assert clust.labels_.dtype.kind == 'i'\n\n assert clust.reachability_.shape == (len(X),)\n assert clust.reachability_.dtype.kind == 'f'\n\n assert clust.core_distances_.shape == (len(X),)\n assert clust.core_distances_.dtype.kind == 'f'\n\n assert clust.ordering_.shape == (len(X),)\n assert clust.ordering_.dtype.kind == 'i'\n assert set(clust.ordering_) == set(range(len(X)))\n\n\ndef test_minimum_number_of_sample_check():\n # test that we check a minimum number of samples\n msg = \"min_samples must be no greater than\"\n\n # Compute OPTICS\n X = [[1, 1]]\n clust = OPTICS(max_eps=5.0 * 0.3, min_samples=10, min_cluster_size=1)\n\n # Run the fit\n assert_raise_message(ValueError, msg, clust.fit, X)\n\n\ndef test_bad_extract():\n # Test an extraction of eps too close to original eps\n msg = \"Specify an epsilon smaller than 0.15. Got 0.3.\"\n centers = [[1, 1], [-1, -1], [1, -1]]\n X, labels_true = make_blobs(n_samples=750, centers=centers,\n cluster_std=0.4, random_state=0)\n\n # Compute OPTICS\n clust = OPTICS(max_eps=5.0 * 0.03,\n cluster_method='dbscan',\n eps=0.3, min_samples=10)\n assert_raise_message(ValueError, msg, clust.fit, X)\n\n\ndef test_bad_reachability():\n msg = \"All reachability values are inf. Set a larger max_eps.\"\n centers = [[1, 1], [-1, -1], [1, -1]]\n X, labels_true = make_blobs(n_samples=750, centers=centers,\n cluster_std=0.4, random_state=0)\n\n with pytest.warns(UserWarning, match=msg):\n clust = OPTICS(max_eps=5.0 * 0.003, min_samples=10, eps=0.015)\n clust.fit(X)\n\n\ndef test_close_extract():\n # Test extract where extraction eps is close to scaled max_eps\n\n centers = [[1, 1], [-1, -1], [1, -1]]\n X, labels_true = make_blobs(n_samples=750, centers=centers,\n cluster_std=0.4, random_state=0)\n\n # Compute OPTICS\n clust = OPTICS(max_eps=1.0, cluster_method='dbscan',\n eps=0.3, min_samples=10).fit(X)\n # Cluster ordering starts at 0; max cluster label = 2 is 3 clusters\n assert max(clust.labels_) == 2\n\n\[email protected]('eps', [0.1, .3, .5])\[email protected]('min_samples', [3, 10, 20])\ndef test_dbscan_optics_parity(eps, min_samples):\n # Test that OPTICS clustering labels are <= 5% difference of DBSCAN\n\n centers = [[1, 1], [-1, -1], [1, -1]]\n X, labels_true = make_blobs(n_samples=750, centers=centers,\n cluster_std=0.4, random_state=0)\n\n # calculate optics with dbscan extract at 0.3 epsilon\n op = OPTICS(min_samples=min_samples, cluster_method='dbscan',\n eps=eps).fit(X)\n\n # calculate dbscan labels\n db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\n\n contingency = contingency_matrix(db.labels_, op.labels_)\n agree = min(np.sum(np.max(contingency, axis=0)),\n np.sum(np.max(contingency, axis=1)))\n disagree = X.shape[0] - agree\n\n percent_mismatch = np.round((disagree - 1) / X.shape[0], 2)\n\n # verify label mismatch is <= 5% labels\n assert percent_mismatch <= 0.05\n\n\ndef test_min_samples_edge_case():\n C1 = [[0, 0], [0, 0.1], [0, -.1]]\n C2 = [[10, 10], [10, 9], [10, 11]]\n C3 = [[100, 100], [100, 96], [100, 106]]\n X = np.vstack((C1, C2, C3))\n\n expected_labels = np.r_[[0] * 3, [1] * 3, [2] * 3]\n clust = OPTICS(min_samples=3,\n max_eps=7, cluster_method='xi',\n xi=0.04).fit(X)\n assert_array_equal(clust.labels_, expected_labels)\n\n expected_labels = np.r_[[0] * 3, [1] * 3, [-1] * 3]\n clust = OPTICS(min_samples=3,\n max_eps=3, cluster_method='xi',\n xi=0.04).fit(X)\n assert_array_equal(clust.labels_, expected_labels)\n\n expected_labels = np.r_[[-1] * 9]\n with pytest.warns(UserWarning, match=\"All reachability values\"):\n clust = OPTICS(min_samples=4,\n max_eps=3, cluster_method='xi',\n xi=0.04).fit(X)\n assert_array_equal(clust.labels_, expected_labels)\n\n\n# try arbitrary minimum sizes\[email protected]('min_cluster_size', range(2, X.shape[0] // 10, 23))\ndef test_min_cluster_size(min_cluster_size):\n redX = X[::2] # reduce for speed\n clust = OPTICS(min_samples=9, min_cluster_size=min_cluster_size).fit(redX)\n cluster_sizes = np.bincount(clust.labels_[clust.labels_ != -1])\n if cluster_sizes.size:\n assert min(cluster_sizes) >= min_cluster_size\n # check behaviour is the same when min_cluster_size is a fraction\n clust_frac = OPTICS(min_samples=9,\n min_cluster_size=min_cluster_size / redX.shape[0])\n clust_frac.fit(redX)\n assert_array_equal(clust.labels_, clust_frac.labels_)\n\n\[email protected]('min_cluster_size', [0, -1, 1.1, 2.2])\ndef test_min_cluster_size_invalid(min_cluster_size):\n clust = OPTICS(min_cluster_size=min_cluster_size)\n with pytest.raises(ValueError, match=\"must be a positive integer or a \"):\n clust.fit(X)\n\n\ndef test_min_cluster_size_invalid2():\n clust = OPTICS(min_cluster_size=len(X) + 1)\n with pytest.raises(ValueError, match=\"must be no greater than the \"):\n clust.fit(X)\n\n\ndef test_processing_order():\n # Ensure that we consider all unprocessed points,\n # not only direct neighbors. when picking the next point.\n Y = [[0], [10], [-10], [25]]\n clust = OPTICS(min_samples=3, max_eps=15).fit(Y)\n assert_array_equal(clust.reachability_, [np.inf, 10, 10, 15])\n assert_array_equal(clust.core_distances_, [10, 15, np.inf, np.inf])\n assert_array_equal(clust.ordering_, [0, 1, 2, 3])\n\n\ndef test_compare_to_ELKI():\n # Expected values, computed with (future) ELKI 0.7.5 using:\n # java -jar elki.jar cli -dbc.in csv -dbc.filter FixedDBIDsFilter\n # -algorithm clustering.optics.OPTICSHeap -optics.minpts 5\n # where the FixedDBIDsFilter gives 0-indexed ids.\n r1 = [np.inf, 1.0574896366427478, 0.7587934993548423, 0.7290174038973836,\n 0.7290174038973836, 0.7290174038973836, 0.6861627576116127,\n 0.7587934993548423, 0.9280118450166668, 1.1748022534146194,\n 3.3355455741292257, 0.49618389254482587, 0.2552805046961355,\n 0.2552805046961355, 0.24944622248445714, 0.24944622248445714,\n 0.24944622248445714, 0.2552805046961355, 0.2552805046961355,\n 0.3086779122185853, 4.163024452756142, 1.623152630340929,\n 0.45315840475822655, 0.25468325192031926, 0.2254004358159971,\n 0.18765711877083036, 0.1821471333893275, 0.1821471333893275,\n 0.18765711877083036, 0.18765711877083036, 0.2240202988740153,\n 1.154337614548715, 1.342604473837069, 1.323308536402633,\n 0.8607514948648837, 0.27219111215810565, 0.13260875220533205,\n 0.13260875220533205, 0.09890587675958984, 0.09890587675958984,\n 0.13548790801634494, 0.1575483940837384, 0.17515137170530226,\n 0.17575920159442388, 0.27219111215810565, 0.6101447895405373,\n 1.3189208094864302, 1.323308536402633, 2.2509184159764577,\n 2.4517810628594527, 3.675977064404973, 3.8264795626020365,\n 2.9130735341510614, 2.9130735341510614, 2.9130735341510614,\n 2.9130735341510614, 2.8459300127258036, 2.8459300127258036,\n 2.8459300127258036, 3.0321982337972537]\n o1 = [0, 3, 6, 4, 7, 8, 2, 9, 5, 1, 31, 30, 32, 34, 33, 38, 39, 35, 37, 36,\n 44, 21, 23, 24, 22, 25, 27, 29, 26, 28, 20, 40, 45, 46, 10, 15, 11,\n 13, 17, 19, 18, 12, 16, 14, 47, 49, 43, 48, 42, 41, 53, 57, 51, 52,\n 56, 59, 54, 55, 58, 50]\n p1 = [-1, 0, 3, 6, 6, 6, 8, 3, 7, 5, 1, 31, 30, 30, 34, 34, 34, 32, 32, 37,\n 36, 44, 21, 23, 24, 22, 25, 25, 22, 22, 22, 21, 40, 45, 46, 10, 15,\n 15, 13, 13, 15, 11, 19, 15, 10, 47, 12, 45, 14, 43, 42, 53, 57, 57,\n 57, 57, 59, 59, 59, 58]\n\n # Tests against known extraction array\n # Does NOT work with metric='euclidean', because sklearn euclidean has\n # worse numeric precision. 'minkowski' is slower but more accurate.\n clust1 = OPTICS(min_samples=5).fit(X)\n\n assert_array_equal(clust1.ordering_, np.array(o1))\n assert_array_equal(clust1.predecessor_[clust1.ordering_], np.array(p1))\n assert_allclose(clust1.reachability_[clust1.ordering_], np.array(r1))\n # ELKI currently does not print the core distances (which are not used much\n # in literature, but we can at least ensure to have this consistency:\n for i in clust1.ordering_[1:]:\n assert (clust1.reachability_[i] >=\n clust1.core_distances_[clust1.predecessor_[i]])\n\n # Expected values, computed with (future) ELKI 0.7.5 using\n r2 = [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, 0.27219111215810565, 0.13260875220533205,\n 0.13260875220533205, 0.09890587675958984, 0.09890587675958984,\n 0.13548790801634494, 0.1575483940837384, 0.17515137170530226,\n 0.17575920159442388, 0.27219111215810565, 0.4928068613197889,\n np.inf, 0.2666183922512113, 0.18765711877083036, 0.1821471333893275,\n 0.1821471333893275, 0.1821471333893275, 0.18715928772277457,\n 0.18765711877083036, 0.18765711877083036, 0.25468325192031926,\n np.inf, 0.2552805046961355, 0.2552805046961355, 0.24944622248445714,\n 0.24944622248445714, 0.24944622248445714, 0.2552805046961355,\n 0.2552805046961355, 0.3086779122185853, 0.34466409325984865,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf]\n o2 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 11, 13, 17, 19, 18, 12, 16, 14,\n 47, 46, 20, 22, 25, 23, 27, 29, 24, 26, 28, 21, 30, 32, 34, 33, 38,\n 39, 35, 37, 36, 31, 40, 41, 42, 43, 44, 45, 48, 49, 50, 51, 52, 53,\n 54, 55, 56, 57, 58, 59]\n p2 = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, 15, 15, 13, 13, 15,\n 11, 19, 15, 10, 47, -1, 20, 22, 25, 25, 25, 25, 22, 22, 23, -1, 30,\n 30, 34, 34, 34, 32, 32, 37, 38, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, -1, -1, -1]\n clust2 = OPTICS(min_samples=5, max_eps=0.5).fit(X)\n\n assert_array_equal(clust2.ordering_, np.array(o2))\n assert_array_equal(clust2.predecessor_[clust2.ordering_], np.array(p2))\n assert_allclose(clust2.reachability_[clust2.ordering_], np.array(r2))\n\n index = np.where(clust1.core_distances_ <= 0.5)[0]\n assert_allclose(clust1.core_distances_[index],\n clust2.core_distances_[index])\n\n\ndef test_wrong_cluster_method():\n clust = OPTICS(cluster_method='superfancy')\n with pytest.raises(ValueError, match=\"cluster_method should be one of \"):\n clust.fit(X)\n\n\ndef test_extract_dbscan():\n # testing an easy dbscan case. Not including clusters with different\n # densities.\n rng = np.random.RandomState(0)\n n_points_per_cluster = 20\n C1 = [-5, -2] + .2 * rng.randn(n_points_per_cluster, 2)\n C2 = [4, -1] + .2 * rng.randn(n_points_per_cluster, 2)\n C3 = [1, 2] + .2 * rng.randn(n_points_per_cluster, 2)\n C4 = [-2, 3] + .2 * rng.randn(n_points_per_cluster, 2)\n X = np.vstack((C1, C2, C3, C4))\n\n clust = OPTICS(cluster_method='dbscan', eps=.5).fit(X)\n assert_array_equal(np.sort(np.unique(clust.labels_)), [0, 1, 2, 3])\n\n\ndef test_precomputed_dists():\n redX = X[::2]\n dists = pairwise_distances(redX, metric='euclidean')\n clust1 = OPTICS(min_samples=10, algorithm='brute',\n metric='precomputed').fit(dists)\n clust2 = OPTICS(min_samples=10, algorithm='brute',\n metric='euclidean').fit(redX)\n\n assert_allclose(clust1.reachability_, clust2.reachability_)\n assert_array_equal(clust1.labels_, clust2.labels_)\n",
"\"\"\"\n=================================\nMap data to a normal distribution\n=================================\n\n.. currentmodule:: sklearn.preprocessing\n\nThis example demonstrates the use of the Box-Cox and Yeo-Johnson transforms\nthrough :class:`~PowerTransformer` to map data from various\ndistributions to a normal distribution.\n\nThe power transform is useful as a transformation in modeling problems where\nhomoscedasticity and normality are desired. Below are examples of Box-Cox and\nYeo-Johnwon applied to six different probability distributions: Lognormal,\nChi-squared, Weibull, Gaussian, Uniform, and Bimodal.\n\nNote that the transformations successfully map the data to a normal\ndistribution when applied to certain datasets, but are ineffective with others.\nThis highlights the importance of visualizing the data before and after\ntransformation.\n\nAlso note that even though Box-Cox seems to perform better than Yeo-Johnson for\nlognormal and chi-squared distributions, keep in mind that Box-Cox does not\nsupport inputs with negative values.\n\nFor comparison, we also add the output from\n:class:`~QuantileTransformer`. It can force any arbitrary\ndistribution into a gaussian, provided that there are enough training samples\n(thousands). Because it is a non-parametric method, it is harder to interpret\nthan the parametric ones (Box-Cox and Yeo-Johnson).\n\nOn \"small\" datasets (less than a few hundred points), the quantile transformer\nis prone to overfitting. The use of the power transform is then recommended.\n\"\"\"\n\n# Author: Eric Chang <[email protected]>\n# Nicolas Hug <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.model_selection import train_test_split\n\nprint(__doc__)\n\n\nN_SAMPLES = 1000\nFONT_SIZE = 6\nBINS = 30\n\n\nrng = np.random.RandomState(304)\nbc = PowerTransformer(method='box-cox')\nyj = PowerTransformer(method='yeo-johnson')\n# n_quantiles is set to the training set size rather than the default value\n# to avoid a warning being raised by this example\nqt = QuantileTransformer(n_quantiles=500, output_distribution='normal',\n random_state=rng)\nsize = (N_SAMPLES, 1)\n\n\n# lognormal distribution\nX_lognormal = rng.lognormal(size=size)\n\n# chi-squared distribution\ndf = 3\nX_chisq = rng.chisquare(df=df, size=size)\n\n# weibull distribution\na = 50\nX_weibull = rng.weibull(a=a, size=size)\n\n# gaussian distribution\nloc = 100\nX_gaussian = rng.normal(loc=loc, size=size)\n\n# uniform distribution\nX_uniform = rng.uniform(low=0, high=1, size=size)\n\n# bimodal distribution\nloc_a, loc_b = 100, 105\nX_a, X_b = rng.normal(loc=loc_a, size=size), rng.normal(loc=loc_b, size=size)\nX_bimodal = np.concatenate([X_a, X_b], axis=0)\n\n\n# create plots\ndistributions = [\n ('Lognormal', X_lognormal),\n ('Chi-squared', X_chisq),\n ('Weibull', X_weibull),\n ('Gaussian', X_gaussian),\n ('Uniform', X_uniform),\n ('Bimodal', X_bimodal)\n]\n\ncolors = ['#D81B60', '#0188FF', '#FFC107',\n '#B7A2FF', '#000000', '#2EC5AC']\n\nfig, axes = plt.subplots(nrows=8, ncols=3, figsize=plt.figaspect(2))\naxes = axes.flatten()\naxes_idxs = [(0, 3, 6, 9), (1, 4, 7, 10), (2, 5, 8, 11), (12, 15, 18, 21),\n (13, 16, 19, 22), (14, 17, 20, 23)]\naxes_list = [(axes[i], axes[j], axes[k], axes[l])\n for (i, j, k, l) in axes_idxs]\n\n\nfor distribution, color, axes in zip(distributions, colors, axes_list):\n name, X = distribution\n X_train, X_test = train_test_split(X, test_size=.5)\n\n # perform power transforms and quantile transform\n X_trans_bc = bc.fit(X_train).transform(X_test)\n lmbda_bc = round(bc.lambdas_[0], 2)\n X_trans_yj = yj.fit(X_train).transform(X_test)\n lmbda_yj = round(yj.lambdas_[0], 2)\n X_trans_qt = qt.fit(X_train).transform(X_test)\n\n ax_original, ax_bc, ax_yj, ax_qt = axes\n\n ax_original.hist(X_train, color=color, bins=BINS)\n ax_original.set_title(name, fontsize=FONT_SIZE)\n ax_original.tick_params(axis='both', which='major', labelsize=FONT_SIZE)\n\n for ax, X_trans, meth_name, lmbda in zip(\n (ax_bc, ax_yj, ax_qt),\n (X_trans_bc, X_trans_yj, X_trans_qt),\n ('Box-Cox', 'Yeo-Johnson', 'Quantile transform'),\n (lmbda_bc, lmbda_yj, None)):\n ax.hist(X_trans, color=color, bins=BINS)\n title = 'After {}'.format(meth_name)\n if lmbda is not None:\n title += r'\\n$\\lambda$ = {}'.format(lmbda)\n ax.set_title(title, fontsize=FONT_SIZE)\n ax.tick_params(axis='both', which='major', labelsize=FONT_SIZE)\n ax.set_xlim([-3.5, 3.5])\n\n\nplt.tight_layout()\nplt.show()\n",
"import numpy as np\nimport scipy.sparse as sp\n\nfrom scipy import linalg\nfrom sklearn.decomposition import NMF, non_negative_factorization\nfrom sklearn.decomposition import _nmf as nmf # For testing internals\nfrom scipy.sparse import csc_matrix\n\nimport pytest\n\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.utils.extmath import squared_norm\nfrom sklearn.base import clone\nfrom sklearn.exceptions import ConvergenceWarning\n\n\[email protected]('solver', ['cd', 'mu'])\[email protected]('regularization',\n [None, 'both', 'components', 'transformation'])\ndef test_convergence_warning(solver, regularization):\n convergence_warning = (\"Maximum number of iterations 1 reached. \"\n \"Increase it to improve convergence.\")\n A = np.ones((2, 2))\n with pytest.warns(ConvergenceWarning, match=convergence_warning):\n NMF(solver=solver, regularization=regularization, max_iter=1).fit(A)\n\n\ndef test_initialize_nn_output():\n # Test that initialization does not return negative values\n rng = np.random.mtrand.RandomState(42)\n data = np.abs(rng.randn(10, 10))\n for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):\n W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)\n assert not ((W < 0).any() or (H < 0).any())\n\n\ndef test_parameter_checking():\n A = np.ones((2, 2))\n name = 'spam'\n # FIXME : should be removed in 0.26\n init = 'nndsvda'\n msg = \"Invalid solver parameter: got 'spam' instead of one of\"\n assert_raise_message(ValueError, msg, NMF(solver=name, init=init).fit, A)\n msg = \"Invalid init parameter: got 'spam' instead of one of\"\n assert_raise_message(ValueError, msg, NMF(init=name).fit, A)\n msg = \"Invalid regularization parameter: got 'spam' instead of one of\"\n assert_raise_message(ValueError, msg, NMF(regularization=name,\n init=init).fit, A)\n msg = \"Invalid beta_loss parameter: got 'spam' instead of one\"\n assert_raise_message(ValueError, msg, NMF(solver='mu', init=init,\n beta_loss=name).fit, A)\n msg = \"Invalid beta_loss parameter: solver 'cd' does not handle \"\n msg += \"beta_loss = 1.0\"\n assert_raise_message(ValueError, msg, NMF(solver='cd', init=init,\n beta_loss=1.0).fit, A)\n\n msg = \"Negative values in data passed to\"\n assert_raise_message(ValueError, msg, NMF(init=init).fit, -A)\n assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,\n 2, 'nndsvd')\n clf = NMF(2, tol=0.1, init=init).fit(A)\n assert_raise_message(ValueError, msg, clf.transform, -A)\n\n for init in ['nndsvd', 'nndsvda', 'nndsvdar']:\n msg = (\"init = '{}' can only be used when \"\n \"n_components <= min(n_samples, n_features)\"\n .format(init))\n assert_raise_message(ValueError, msg, NMF(3, init=init).fit, A)\n assert_raise_message(ValueError, msg, nmf._initialize_nmf, A,\n 3, init)\n\n\ndef test_initialize_close():\n # Test NNDSVD error\n # Test that _initialize_nmf error is less than the standard deviation of\n # the entries in the matrix.\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(10, 10))\n W, H = nmf._initialize_nmf(A, 10, init='nndsvd')\n error = linalg.norm(np.dot(W, H) - A)\n sdev = linalg.norm(A - A.mean())\n assert error <= sdev\n\n\ndef test_initialize_variants():\n # Test NNDSVD variants correctness\n # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic\n # 'nndsvd' only where the basic version has zeros.\n rng = np.random.mtrand.RandomState(42)\n data = np.abs(rng.randn(10, 10))\n W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')\n Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')\n War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',\n random_state=0)\n\n for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):\n assert_almost_equal(evl[ref != 0], ref[ref != 0])\n\n\n# ignore UserWarning raised when both solver='mu' and init='nndsvd'\n@ignore_warnings(category=UserWarning)\[email protected]('solver', ('cd', 'mu'))\[email protected]('init',\n (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'))\[email protected]('regularization',\n (None, 'both', 'components', 'transformation'))\ndef test_nmf_fit_nn_output(solver, init, regularization):\n # Test that the decomposition does not contain negative values\n A = np.c_[5. - np.arange(1, 6),\n 5. + np.arange(1, 6)]\n model = NMF(n_components=2, solver=solver, init=init,\n regularization=regularization, random_state=0)\n transf = model.fit_transform(A)\n assert not((model.components_ < 0).any() or\n (transf < 0).any())\n\n\[email protected]('solver', ('cd', 'mu'))\[email protected]('regularization',\n (None, 'both', 'components', 'transformation'))\ndef test_nmf_fit_close(solver, regularization):\n rng = np.random.mtrand.RandomState(42)\n # Test that the fit is not too far away\n pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,\n regularization=regularization, max_iter=600)\n X = np.abs(rng.randn(6, 5))\n assert pnmf.fit(X).reconstruction_err_ < 0.1\n\n\[email protected]('solver', ('cd', 'mu'))\[email protected]('regularization',\n (None, 'both', 'components', 'transformation'))\ndef test_nmf_transform(solver, regularization):\n # Test that NMF.transform returns close values\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(6, 5))\n m = NMF(solver=solver, n_components=3, init='random',\n regularization=regularization, random_state=0, tol=1e-5)\n ft = m.fit_transform(A)\n t = m.transform(A)\n assert_array_almost_equal(ft, t, decimal=2)\n\n\ndef test_nmf_transform_custom_init():\n # Smoke test that checks if NMF.transform works with custom initialization\n random_state = np.random.RandomState(0)\n A = np.abs(random_state.randn(6, 5))\n n_components = 4\n avg = np.sqrt(A.mean() / n_components)\n H_init = np.abs(avg * random_state.randn(n_components, 5))\n W_init = np.abs(avg * random_state.randn(6, n_components))\n\n m = NMF(solver='cd', n_components=n_components, init='custom',\n random_state=0)\n m.fit_transform(A, W=W_init, H=H_init)\n m.transform(A)\n\n\[email protected]('solver', ('cd', 'mu'))\[email protected]('regularization',\n (None, 'both', 'components', 'transformation'))\ndef test_nmf_inverse_transform(solver, regularization):\n # Test that NMF.inverse_transform returns close values\n random_state = np.random.RandomState(0)\n A = np.abs(random_state.randn(6, 4))\n m = NMF(solver=solver, n_components=4, init='random', random_state=0,\n regularization=regularization, max_iter=1000)\n ft = m.fit_transform(A)\n A_new = m.inverse_transform(ft)\n assert_array_almost_equal(A, A_new, decimal=2)\n\n\ndef test_n_components_greater_n_features():\n # Smoke test for the case of more components than features.\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(30, 10))\n # FIXME : should be removed in 0.26\n init = 'random'\n NMF(n_components=15, random_state=0, tol=1e-2, init=init).fit(A)\n\n\[email protected]('solver', ['cd', 'mu'])\[email protected]('regularization',\n [None, 'both', 'components', 'transformation'])\ndef test_nmf_sparse_input(solver, regularization):\n # Test that sparse matrices are accepted as input\n from scipy.sparse import csc_matrix\n\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(10, 10))\n A[:, 2 * np.arange(5)] = 0\n A_sparse = csc_matrix(A)\n\n est1 = NMF(solver=solver, n_components=5, init='random',\n regularization=regularization, random_state=0,\n tol=1e-2)\n est2 = clone(est1)\n\n W1 = est1.fit_transform(A)\n W2 = est2.fit_transform(A_sparse)\n H1 = est1.components_\n H2 = est2.components_\n\n assert_array_almost_equal(W1, W2)\n assert_array_almost_equal(H1, H2)\n\n\ndef test_nmf_sparse_transform():\n # Test that transform works on sparse data. Issue #2124\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(3, 2))\n A[1, 1] = 0\n A = csc_matrix(A)\n\n for solver in ('cd', 'mu'):\n model = NMF(solver=solver, random_state=0, n_components=2,\n max_iter=400, init='nndsvd')\n A_fit_tr = model.fit_transform(A)\n A_tr = model.transform(A)\n assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)\n\n\[email protected]('init', ['random', 'nndsvd'])\[email protected]('solver', ('cd', 'mu'))\[email protected]('regularization',\n (None, 'both', 'components', 'transformation'))\ndef test_non_negative_factorization_consistency(init, solver, regularization):\n # Test that the function is called in the same way, either directly\n # or through the NMF class\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(10, 10))\n A[:, 2 * np.arange(5)] = 0\n\n W_nmf, H, _ = non_negative_factorization(\n A, init=init, solver=solver,\n regularization=regularization, random_state=1, tol=1e-2)\n W_nmf_2, _, _ = non_negative_factorization(\n A, H=H, update_H=False, init=init, solver=solver,\n regularization=regularization, random_state=1, tol=1e-2)\n\n model_class = NMF(init=init, solver=solver,\n regularization=regularization,\n random_state=1, tol=1e-2)\n W_cls = model_class.fit_transform(A)\n W_cls_2 = model_class.transform(A)\n\n assert_array_almost_equal(W_nmf, W_cls, decimal=10)\n assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)\n\n\ndef test_non_negative_factorization_checking():\n A = np.ones((2, 2))\n # Test parameters checking is public function\n nnmf = non_negative_factorization\n msg = (\"Number of components must be a positive integer; \"\n \"got (n_components=1.5)\")\n assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5, init='random')\n msg = (\"Number of components must be a positive integer; \"\n \"got (n_components='2')\")\n assert_raise_message(ValueError, msg, nnmf, A, A, A, '2', init='random')\n msg = \"Negative values in data passed to NMF (input H)\"\n assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, init='custom')\n msg = \"Negative values in data passed to NMF (input W)\"\n assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, init='custom')\n msg = \"Array passed to NMF (input H) is full of zeros\"\n assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, init='custom')\n msg = \"Invalid regularization parameter: got 'spam' instead of one of\"\n assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, init='custom',\n regularization='spam')\n\n\ndef _beta_divergence_dense(X, W, H, beta):\n \"\"\"Compute the beta-divergence of X and W.H for dense array only.\n\n Used as a reference for testing nmf._beta_divergence.\n \"\"\"\n WH = np.dot(W, H)\n\n if beta == 2:\n return squared_norm(X - WH) / 2\n\n WH_Xnonzero = WH[X != 0]\n X_nonzero = X[X != 0]\n np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)\n\n if beta == 1:\n res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))\n res += WH.sum() - X.sum()\n\n elif beta == 0:\n div = X_nonzero / WH_Xnonzero\n res = np.sum(div) - X.size - np.sum(np.log(div))\n else:\n res = (X_nonzero ** beta).sum()\n res += (beta - 1) * (WH ** beta).sum()\n res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()\n res /= beta * (beta - 1)\n\n return res\n\n\ndef test_beta_divergence():\n # Compare _beta_divergence with the reference _beta_divergence_dense\n n_samples = 20\n n_features = 10\n n_components = 5\n beta_losses = [0., 0.5, 1., 1.5, 2.]\n\n # initialization\n rng = np.random.mtrand.RandomState(42)\n X = rng.randn(n_samples, n_features)\n np.clip(X, 0, None, out=X)\n X_csr = sp.csr_matrix(X)\n W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)\n\n for beta in beta_losses:\n ref = _beta_divergence_dense(X, W, H, beta)\n loss = nmf._beta_divergence(X, W, H, beta)\n loss_csr = nmf._beta_divergence(X_csr, W, H, beta)\n\n assert_almost_equal(ref, loss, decimal=7)\n assert_almost_equal(ref, loss_csr, decimal=7)\n\n\ndef test_special_sparse_dot():\n # Test the function that computes np.dot(W, H), only where X is non zero.\n n_samples = 10\n n_features = 5\n n_components = 3\n rng = np.random.mtrand.RandomState(42)\n X = rng.randn(n_samples, n_features)\n np.clip(X, 0, None, out=X)\n X_csr = sp.csr_matrix(X)\n\n W = np.abs(rng.randn(n_samples, n_components))\n H = np.abs(rng.randn(n_components, n_features))\n\n WH_safe = nmf._special_sparse_dot(W, H, X_csr)\n WH = nmf._special_sparse_dot(W, H, X)\n\n # test that both results have same values, in X_csr nonzero elements\n ii, jj = X_csr.nonzero()\n WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()\n assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)\n\n # test that WH_safe and X_csr have the same sparse structure\n assert_array_equal(WH_safe.indices, X_csr.indices)\n assert_array_equal(WH_safe.indptr, X_csr.indptr)\n assert_array_equal(WH_safe.shape, X_csr.shape)\n\n\n@ignore_warnings(category=ConvergenceWarning)\ndef test_nmf_multiplicative_update_sparse():\n # Compare sparse and dense input in multiplicative update NMF\n # Also test continuity of the results with respect to beta_loss parameter\n n_samples = 20\n n_features = 10\n n_components = 5\n alpha = 0.1\n l1_ratio = 0.5\n n_iter = 20\n\n # initialization\n rng = np.random.mtrand.RandomState(1337)\n X = rng.randn(n_samples, n_features)\n X = np.abs(X)\n X_csr = sp.csr_matrix(X)\n W0, H0 = nmf._initialize_nmf(X, n_components, init='random',\n random_state=42)\n\n for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):\n # Reference with dense array X\n W, H = W0.copy(), H0.copy()\n W1, H1, _ = non_negative_factorization(\n X, W, H, n_components, init='custom', update_H=True,\n solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,\n l1_ratio=l1_ratio, regularization='both', random_state=42)\n\n # Compare with sparse X\n W, H = W0.copy(), H0.copy()\n W2, H2, _ = non_negative_factorization(\n X_csr, W, H, n_components, init='custom', update_H=True,\n solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,\n l1_ratio=l1_ratio, regularization='both', random_state=42)\n\n assert_array_almost_equal(W1, W2, decimal=7)\n assert_array_almost_equal(H1, H2, decimal=7)\n\n # Compare with almost same beta_loss, since some values have a specific\n # behavior, but the results should be continuous w.r.t beta_loss\n beta_loss -= 1.e-5\n W, H = W0.copy(), H0.copy()\n W3, H3, _ = non_negative_factorization(\n X_csr, W, H, n_components, init='custom', update_H=True,\n solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,\n l1_ratio=l1_ratio, regularization='both', random_state=42)\n\n assert_array_almost_equal(W1, W3, decimal=4)\n assert_array_almost_equal(H1, H3, decimal=4)\n\n\ndef test_nmf_negative_beta_loss():\n # Test that an error is raised if beta_loss < 0 and X contains zeros.\n # Test that the output has not NaN values when the input contains zeros.\n n_samples = 6\n n_features = 5\n n_components = 3\n\n rng = np.random.mtrand.RandomState(42)\n X = rng.randn(n_samples, n_features)\n np.clip(X, 0, None, out=X)\n X_csr = sp.csr_matrix(X)\n\n def _assert_nmf_no_nan(X, beta_loss):\n W, H, _ = non_negative_factorization(\n X, init='random', n_components=n_components, solver='mu',\n beta_loss=beta_loss, random_state=0, max_iter=1000)\n assert not np.any(np.isnan(W))\n assert not np.any(np.isnan(H))\n\n msg = \"When beta_loss <= 0 and X contains zeros, the solver may diverge.\"\n for beta_loss in (-0.6, 0.):\n assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)\n _assert_nmf_no_nan(X + 1e-9, beta_loss)\n\n for beta_loss in (0.2, 1., 1.2, 2., 2.5):\n _assert_nmf_no_nan(X, beta_loss)\n _assert_nmf_no_nan(X_csr, beta_loss)\n\n\ndef test_nmf_regularization():\n # Test the effect of L1 and L2 regularizations\n n_samples = 6\n n_features = 5\n n_components = 3\n rng = np.random.mtrand.RandomState(42)\n X = np.abs(rng.randn(n_samples, n_features))\n\n # FIXME : should be removed in 0.26\n init = 'nndsvda'\n # L1 regularization should increase the number of zeros\n l1_ratio = 1.\n for solver in ['cd', 'mu']:\n regul = nmf.NMF(n_components=n_components, solver=solver,\n alpha=0.5, l1_ratio=l1_ratio, random_state=42,\n init=init)\n model = nmf.NMF(n_components=n_components, solver=solver,\n alpha=0., l1_ratio=l1_ratio, random_state=42,\n init=init)\n\n W_regul = regul.fit_transform(X)\n W_model = model.fit_transform(X)\n\n H_regul = regul.components_\n H_model = model.components_\n\n W_regul_n_zeros = W_regul[W_regul == 0].size\n W_model_n_zeros = W_model[W_model == 0].size\n H_regul_n_zeros = H_regul[H_regul == 0].size\n H_model_n_zeros = H_model[H_model == 0].size\n\n assert W_regul_n_zeros > W_model_n_zeros\n assert H_regul_n_zeros > H_model_n_zeros\n\n # L2 regularization should decrease the mean of the coefficients\n l1_ratio = 0.\n for solver in ['cd', 'mu']:\n regul = nmf.NMF(n_components=n_components, solver=solver,\n alpha=0.5, l1_ratio=l1_ratio, random_state=42,\n init=init)\n model = nmf.NMF(n_components=n_components, solver=solver,\n alpha=0., l1_ratio=l1_ratio, random_state=42,\n init=init)\n\n W_regul = regul.fit_transform(X)\n W_model = model.fit_transform(X)\n\n H_regul = regul.components_\n H_model = model.components_\n\n assert (linalg.norm(W_model))**2. + (linalg.norm(H_model))**2. > \\\n (linalg.norm(W_regul))**2. + (linalg.norm(H_regul))**2.\n\n\n@ignore_warnings(category=ConvergenceWarning)\ndef test_nmf_decreasing():\n # test that the objective function is decreasing at each iteration\n n_samples = 20\n n_features = 15\n n_components = 10\n alpha = 0.1\n l1_ratio = 0.5\n tol = 0.\n\n # initialization\n rng = np.random.mtrand.RandomState(42)\n X = rng.randn(n_samples, n_features)\n np.abs(X, X)\n W0, H0 = nmf._initialize_nmf(X, n_components, init='random',\n random_state=42)\n\n for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):\n for solver in ('cd', 'mu'):\n if solver != 'mu' and beta_loss != 2:\n # not implemented\n continue\n W, H = W0.copy(), H0.copy()\n previous_loss = None\n for _ in range(30):\n # one more iteration starting from the previous results\n W, H, _ = non_negative_factorization(\n X, W, H, beta_loss=beta_loss, init='custom',\n n_components=n_components, max_iter=1, alpha=alpha,\n solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,\n regularization='both', random_state=0, update_H=True)\n\n loss = nmf._beta_divergence(X, W, H, beta_loss)\n if previous_loss is not None:\n assert previous_loss > loss\n previous_loss = loss\n\n\ndef test_nmf_underflow():\n # Regression test for an underflow issue in _beta_divergence\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 10, 2, 2\n X = np.abs(rng.randn(n_samples, n_features)) * 10\n W = np.abs(rng.randn(n_samples, n_components)) * 10\n H = np.abs(rng.randn(n_components, n_features))\n\n X[0, 0] = 0\n ref = nmf._beta_divergence(X, W, H, beta=1.0)\n X[0, 0] = 1e-323\n res = nmf._beta_divergence(X, W, H, beta=1.0)\n assert_almost_equal(res, ref)\n\n\[email protected](\"dtype_in, dtype_out\", [\n (np.float32, np.float32),\n (np.float64, np.float64),\n (np.int32, np.float64),\n (np.int64, np.float64)])\[email protected](\"solver\", [\"cd\", \"mu\"])\[email protected](\"regularization\",\n (None, \"both\", \"components\", \"transformation\"))\ndef test_nmf_dtype_match(dtype_in, dtype_out, solver, regularization):\n # Check that NMF preserves dtype (float32 and float64)\n X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)\n np.abs(X, out=X)\n # FIXME : should be removed in 0.26\n init = 'nndsvda'\n nmf = NMF(solver=solver, regularization=regularization, init=init)\n\n assert nmf.fit(X).transform(X).dtype == dtype_out\n assert nmf.fit_transform(X).dtype == dtype_out\n assert nmf.components_.dtype == dtype_out\n\n\[email protected](\"solver\", [\"cd\", \"mu\"])\[email protected](\"regularization\",\n (None, \"both\", \"components\", \"transformation\"))\ndef test_nmf_float32_float64_consistency(solver, regularization):\n # Check that the result of NMF is the same between float32 and float64\n X = np.random.RandomState(0).randn(50, 7)\n np.abs(X, out=X)\n # FIXME : should be removed in 0.26\n init = 'nndsvda'\n nmf32 = NMF(solver=solver, regularization=regularization, random_state=0,\n init=init)\n W32 = nmf32.fit_transform(X.astype(np.float32))\n nmf64 = NMF(solver=solver, regularization=regularization, random_state=0,\n init=init)\n W64 = nmf64.fit_transform(X)\n\n assert_allclose(W32, W64, rtol=1e-6, atol=1e-5)\n\n\ndef test_nmf_custom_init_dtype_error():\n # Check that an error is raise if custom H and/or W don't have the same\n # dtype as X.\n rng = np.random.RandomState(0)\n X = rng.random_sample((20, 15))\n H = rng.random_sample((15, 15)).astype(np.float32)\n W = rng.random_sample((20, 15))\n\n with pytest.raises(TypeError, match=\"should have the same dtype as X\"):\n NMF(init='custom').fit(X, H=H, W=W)\n\n with pytest.raises(TypeError, match=\"should have the same dtype as X\"):\n non_negative_factorization(X, H=H, update_H=False)\n\n\n# FIXME : should be removed in 0.26\ndef test_init_default_deprecation():\n # Test FutureWarning on init default\n msg = (\"The 'init' value, when 'init=None' and \"\n \"n_components is less than n_samples and \"\n \"n_features, will be changed from 'nndsvd' to \"\n \"'nndsvda' in 0.26.\")\n rng = np.random.mtrand.RandomState(42)\n A = np.abs(rng.randn(6, 5))\n with pytest.warns(FutureWarning, match=msg):\n nmf._initialize_nmf(A, 3)\n with pytest.warns(FutureWarning, match=msg):\n NMF().fit(A)\n with pytest.warns(FutureWarning, match=msg):\n non_negative_factorization(A)\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.datasets.make_classification",
"numpy.linspace",
"numpy.asarray",
"sklearn.datasets.load_diabetes",
"sklearn.base.clone",
"sklearn.tree.DecisionTreeClassifier",
"numpy.argmin",
"numpy.ones_like",
"sklearn.ensemble.RandomForestClassifier",
"numpy.unique",
"numpy.argmax",
"numpy.ravel",
"sklearn.utils._testing.assert_array_equal",
"numpy.zeros",
"sklearn.dummy.DummyClassifier",
"sklearn.dummy.DummyRegressor",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.utils._testing.assert_raises_regexp",
"sklearn.svm.SVR",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.datasets.make_multilabel_classification",
"sklearn.svm.SVC",
"numpy.array",
"numpy.random.RandomState",
"sklearn.model_selection.GridSearchCV",
"sklearn.tree.DecisionTreeRegressor",
"numpy.isfinite",
"sklearn.utils.shuffle",
"sklearn.datasets.make_regression",
"sklearn.utils._testing.assert_array_almost_equal",
"sklearn.linear_model.LinearRegression",
"sklearn.datasets.load_digits",
"sklearn.ensemble.AdaBoostRegressor"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.cumsum",
"numpy.all",
"numpy.any",
"numpy.searchsorted",
"numpy.trapz",
"numpy.ones_like",
"numpy.unique",
"numpy.arange",
"numpy.diff",
"numpy.interp",
"numpy.repeat",
"numpy.zeros",
"numpy.logical_not",
"numpy.log",
"numpy.isnan",
"scipy.sparse.csr_matrix",
"numpy.logical_or",
"numpy.append",
"numpy.errstate",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.add.at",
"numpy.array_equal",
"scipy.stats.rankdata",
"numpy.setdiff1d",
"numpy.average"
],
[
"sklearn.cluster.DBSCAN",
"numpy.round",
"numpy.max",
"sklearn.cluster.OPTICS",
"sklearn.cluster.tests.common.generate_clustered_data",
"numpy.where",
"sklearn.datasets.make_blobs",
"sklearn.utils._testing.assert_allclose",
"numpy.unique",
"sklearn.cluster._optics._extract_xi_labels",
"sklearn.metrics.cluster.contingency_matrix",
"sklearn.utils._testing.assert_raise_message",
"sklearn.utils._testing.assert_array_equal",
"sklearn.cluster._optics._extend_region",
"numpy.array",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.random.RandomState",
"sklearn.utils.shuffle",
"numpy.bincount",
"numpy.vstack"
],
[
"matplotlib.pyplot.figaspect",
"matplotlib.pyplot.tight_layout",
"sklearn.preprocessing.QuantileTransformer",
"sklearn.model_selection.train_test_split",
"numpy.concatenate",
"sklearn.preprocessing.PowerTransformer",
"numpy.random.RandomState",
"matplotlib.pyplot.show"
],
[
"numpy.dot",
"sklearn.utils._testing.ignore_warnings",
"numpy.asarray",
"sklearn.base.clone",
"sklearn.utils._testing.assert_almost_equal",
"sklearn.utils._testing.assert_allclose",
"numpy.clip",
"numpy.arange",
"sklearn.decomposition.non_negative_factorization",
"sklearn.utils.extmath.squared_norm",
"scipy.linalg.norm",
"sklearn.utils._testing.assert_array_equal",
"sklearn.utils._testing.assert_raise_message",
"numpy.random.mtrand.RandomState",
"scipy.sparse.csc_matrix",
"numpy.log",
"numpy.isnan",
"scipy.sparse.csr_matrix",
"numpy.random.RandomState",
"numpy.sum",
"sklearn.decomposition.NMF",
"numpy.maximum",
"numpy.abs",
"numpy.ones",
"sklearn.utils._testing.assert_array_almost_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"0.17",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
dzenanz/MONAI
|
[
"524c99d90891c061768766b787fe11dc9551d401",
"524c99d90891c061768766b787fe11dc9551d401"
] |
[
"tests/test_crop_foregroundd.py",
"examples/segmentation_3d/unet_training_dict.py"
] |
[
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import CropForegroundd\n\nTEST_CASE_1 = [\n {\n \"keys\": [\"img\", \"label\"],\n \"source_key\": \"label\",\n \"select_fn\": lambda x: x > 0,\n \"channel_indices\": None,\n \"margin\": 0,\n },\n {\n \"img\": np.array([[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]]]),\n \"label\": np.array([[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]]]),\n },\n np.array([[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]),\n]\n\nTEST_CASE_2 = [\n {\"keys\": [\"img\"], \"source_key\": \"img\", \"select_fn\": lambda x: x > 1, \"channel_indices\": None, \"margin\": 0},\n {\"img\": np.array([[[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 3, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]])},\n np.array([[[3]]]),\n]\n\nTEST_CASE_3 = [\n {\"keys\": [\"img\"], \"source_key\": \"img\", \"select_fn\": lambda x: x > 0, \"channel_indices\": 0, \"margin\": 0},\n {\"img\": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])},\n np.array([[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]),\n]\n\nTEST_CASE_4 = [\n {\"keys\": [\"img\"], \"source_key\": \"img\", \"select_fn\": lambda x: x > 0, \"channel_indices\": None, \"margin\": 1},\n {\"img\": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]])},\n np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0]]]),\n]\n\n\nclass TestCropForegroundd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])\n def test_value(self, argments, image, expected_data):\n result = CropForegroundd(**argments)(image)\n np.testing.assert_allclose(result[\"img\"], expected_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport sys\nimport tempfile\nfrom glob import glob\n\nimport nibabel as nib\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport monai\nfrom monai.data import create_test_image_3d, list_data_collate\nfrom monai.inferers import sliding_window_inference\nfrom monai.metrics import DiceMetric\nfrom monai.transforms import (\n AsChannelFirstd,\n Compose,\n LoadNiftid,\n RandCropByPosNegLabeld,\n RandRotate90d,\n ScaleIntensityd,\n ToTensord,\n)\nfrom monai.visualize import plot_2d_or_3d_image\n\n\ndef main(tempdir):\n monai.config.print_config()\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n # # create a temporary directory and 40 random image, mask pairs\n if (not os.path.exists(tempdir)):\n print(f\"generating synthetic data to {tempdir}\")\n for i in range(40):\n im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=3, channel_dim=-1)\n\n n = nib.Nifti1Image(im, np.eye(4))\n nib.save(n, os.path.join(tempdir, f\"img{i:d}.nii.gz\"))\n\n n = nib.Nifti1Image(seg, np.eye(4))\n nib.save(n, os.path.join(tempdir, f\"seg{i:d}.nii.gz\"))\n else:\n print(f\"Found data in {tempdir}\")\n\n images = sorted(glob(os.path.join(tempdir, \"img*.nii.gz\")))\n segs = sorted(glob(os.path.join(tempdir, \"seg*.nii.gz\")))\n train_files = [{\"img\": img, \"seg\": seg} for img, seg in zip(images[:20], segs[:20])]\n val_files = [{\"img\": img, \"seg\": seg} for img, seg in zip(images[-20:], segs[-20:])]\n\n # define transforms for image and segmentation\n train_transforms = Compose(\n [\n LoadNiftid(keys=[\"img\", \"seg\"]),\n AsChannelFirstd(keys=[\"img\", \"seg\"], channel_dim=-1),\n ScaleIntensityd(keys=\"img\"),\n RandCropByPosNegLabeld(\n keys=[\"img\", \"seg\"], label_key=\"seg\", spatial_size=[64, 64, 64], pos=1, neg=1, num_samples=4\n ),\n RandRotate90d(keys=[\"img\", \"seg\"], prob=0.35, spatial_axes=[0, 2]),\n ToTensord(keys=[\"img\", \"seg\"]),\n ]\n )\n val_transforms = Compose(\n [\n LoadNiftid(keys=[\"img\", \"seg\"]),\n AsChannelFirstd(keys=[\"img\", \"seg\"], channel_dim=-1),\n ScaleIntensityd(keys=\"img\"),\n ToTensord(keys=[\"img\", \"seg\"]),\n ]\n )\n\n # define dataset, data loader\n check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)\n # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training\n check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate)\n check_data = monai.utils.misc.first(check_loader)\n print(check_data[\"img\"].shape, check_data[\"seg\"].shape)\n\n # create a training data loader\n train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)\n # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training\n train_loader = DataLoader(\n train_ds,\n batch_size=2,\n shuffle=True,\n num_workers=4,\n collate_fn=list_data_collate,\n pin_memory=torch.cuda.is_available(),\n )\n # create a validation data loader\n val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)\n val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)\n dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction=\"mean\")\n\n # create UNet, DiceLoss and Adam optimizer\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = monai.networks.nets.UNet(\n dimensions=3,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n ).to(device)\n model_path = \"best_metric_model_segmentation3d_dict.pth\"\n if (os.path.exists(model_path)):\n model.load_state_dict(torch.load(model_path))\n print(f\"Loaded model from file '{model_path}'\")\n\n loss_function = monai.losses.DiceLoss(sigmoid=True)\n optimizer = torch.optim.Adam(model.parameters(), 1e-3)\n\n # start a typical PyTorch training\n val_interval = 2\n best_metric = -1\n best_metric_epoch = -1\n epoch_loss_values = list()\n metric_values = list()\n writer = SummaryWriter()\n num_epochs = 10\n for epoch in range(num_epochs):\n print(\"-\" * 10)\n print(f\"epoch {epoch + 1}/{num_epochs}\")\n model.train()\n epoch_loss = 0\n step = 0\n for batch_data in train_loader:\n step += 1\n inputs, labels = batch_data[\"img\"].to(device), batch_data[\"seg\"].to(device)\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = loss_function(outputs, labels)\n loss.backward()\n optimizer.step()\n epoch_loss += loss.item()\n epoch_len = len(train_ds) // train_loader.batch_size\n print(f\"{step}/{epoch_len}, train_loss: {loss.item():.4f}\")\n writer.add_scalar(\"train_loss\", loss.item(), epoch_len * epoch + step)\n epoch_loss /= step\n epoch_loss_values.append(epoch_loss)\n print(f\"epoch {epoch + 1} average loss: {epoch_loss:.4f}\")\n\n if (epoch + 1) % val_interval == 0:\n model.eval()\n with torch.no_grad():\n metric_sum = 0.0\n metric_count = 0\n val_images = None\n val_labels = None\n val_outputs = None\n for val_data in val_loader:\n val_images, val_labels = val_data[\"img\"].to(device), val_data[\"seg\"].to(device)\n roi_size = (64, 64, 64)\n sw_batch_size = 4\n val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)\n value = dice_metric(y_pred=val_outputs, y=val_labels)\n metric_count += len(value)\n metric_sum += value.item() * len(value)\n metric = metric_sum / metric_count\n metric_values.append(metric)\n if metric > best_metric:\n best_metric = metric\n best_metric_epoch = epoch + 1\n torch.save(model.state_dict(), model_path)\n print(\"saved new best metric model\")\n print(\n \"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}\".format(\n epoch + 1, metric, best_metric, best_metric_epoch\n )\n )\n writer.add_scalar(\"val_mean_dice\", metric, epoch + 1)\n # plot the last model output as GIF image in TensorBoard with the corresponding image and label\n plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag=\"image\")\n plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag=\"label\")\n plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag=\"output\")\n\n print(f\"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}\")\n writer.close()\n\n\nif __name__ == \"__main__\":\n main(\"./workspace/data/medical/tempMy\")\n"
] |
[
[
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"torch.load",
"numpy.eye",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lws803/Crime-detect
|
[
"e71e04032589229372124d9895131bdcdeb3fbb9"
] |
[
"utils/common.py"
] |
[
"import numpy as np\nimport cv2\n\n\ndef draw_flow(img, flow, step=8):\n h, w = img.shape[:2]\n y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)\n fx, fy = flow[y,x].T\n lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)\n lines = np.int32(lines + 0.5)\n # vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n vis = img\n cv2.polylines(vis, lines, 0, (0, 255, 0))\n for (x1, y1), (x2, y2) in lines:\n cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)\n return vis\n\n\ndef isIntersect(human, knife):\n\n amin_x = human[0]\n amax_x = human[2]\n amin_y = human[1]\n amax_y = human[3]\n\n othermin_x = knife[0]\n othermax_x = knife[2]\n othermin_y = knife[1]\n othermax_y = knife[3]\n\n\n if amin_x > othermax_x or amax_x < othermin_x:\n return False\n if amin_y > othermax_y or amax_y < othermin_y:\n return False\n return True\n"
] |
[
[
"numpy.int32",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kidscash/Deep_Learning
|
[
"d25d98176270fc2e871c11d0baa567634e8a269b"
] |
[
"ch03/mnist_show.py"
] |
[
"import sys, os\nsys.path.append(os.pardir)\nfrom mnist import load_mnist\nimport numpy as np\nfrom PIL import Image\n\ndef img_show(img):\n pil_img = Image.fromarray(np.uint8(img))\n pil_img.show()\n\n\n(x_train, t_train), (x_test, t_test) = \\\n load_mnist(flatten=True, normalize=False)\n \n\nimg = x_train[1]\nlabel = t_train[1]\nprint(label)\n\nprint('img shape : ', img.shape)\nimg = img.reshape(28, 28)\nprint('reshaped img : ', img.shape)\n\nimg_show(img)"
] |
[
[
"numpy.uint8"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zack-vii/mdsplus
|
[
"7c631281d22e993599dbdf7bd31782035af92688"
] |
[
"python/MDSplus/apd.py"
] |
[
"#\n# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice, this\n# list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\ndef _mimport(name, level=1):\n try:\n return __import__(name, globals(), level=level)\n except:\n return __import__(name, globals())\n\nimport numpy as _N\nimport ctypes as _C\n\n_dsc=_mimport('descriptor')\n_dat=_mimport('mdsdata')\n_scr=_mimport('mdsscalar')\n_arr=_mimport('mdsarray')\n_ver=_mimport('version')\n\nclass Apd(_dat.TreeRefX,_arr.Array):\n \"\"\"The Apd class represents the Array of Pointers to Descriptors structure.\n This structure provides a mechanism for storing an array of non-primitive items.\n \"\"\"\n mdsclass=196\n dtype_id=24\n maxdesc = 1<<31\n @property\n def _descriptor(self):\n descs=self.descs\n _compound=_mimport('compound')\n d=_dsc.Descriptor_apd()\n d.scale=0\n d.digits=0\n d.aflags=0\n d.dtype=self.dtype_id\n d.dimct=1\n d.length=_C.sizeof(_C.c_void_p)\n ndesc = len(descs)\n d.array=[None]*ndesc\n if ndesc:\n d.arsize=d.length*ndesc\n descs_ptrs=(_dsc.Descriptor.PTR*ndesc)()\n for idx,desc in enumerate(descs):\n if desc is None:\n descs_ptrs[idx] = None\n else: # keys in dicts have to be python types\n if isinstance(desc,_dsc.Descriptor):\n d.array[idx] = desc\n else:\n d.array[idx] = _dat.Data(desc)._descriptor\n descs_ptrs[idx] = d.array[idx].ptr_\n d.pointer=_C.cast(_C.pointer(descs_ptrs),_C.c_void_p)\n d.a0=d.pointer\n return _compound.Compound._descriptorWithProps(self,d)\n\n @classmethod\n def fromDescriptor(cls,d):\n num = d.arsize//d.length\n dptrs = _C.cast(d.pointer,_C.POINTER(_C.c_void_p*num)).contents\n descs = [_dsc.pointerToObject(dptr,d.tree) for dptr in dptrs]\n return cls(descs)._setTree(d.tree)\n\n def __init__(self,value=None,dtype=0):\n \"\"\"Initializes a Apd instance\n \"\"\"\n if value is self: return\n self.dtype_id = dtype\n self._descs = []\n if value is not None:\n if isinstance(value,(Apd,tuple,list,_ver.mapclass,_ver.generator,_N.ndarray)):\n for val in value:\n self.append(_dat.Data(val))\n else:\n raise TypeError(\"must provide tuple of items when creating ApdData: %s\"%(type(value),))\n\n def __len__(self):\n \"\"\"Return the number of descriptors in the apd\"\"\"\n return self.getNumDescs()\n\n\n def append(self,value):\n \"\"\"Append a value to apd\"\"\"\n self[len(self)]=_dat.Data(value)\n return self\n\n @property\n def value(self):\n return _N.array(self.descs,object)\n\n\n @property\n def _value(self):\n \"\"\"Returns native representation of the List\"\"\"\n return _N.asarray(tuple(d.value for d in self._descs),'object')\n\n\nclass Dictionary(dict,Apd):\n \"\"\"dictionary class\"\"\"\n class dict_np(_N.ndarray):\n def __new__(cls,items):\n return _N.asarray(tuple(d for d in items),'object').view(Dictionary.dict_np)\n def tolist(self):\n return dict(super(Dictionary.dict_np,self).tolist())\n\n _key_value_exception = Exception('A dictionary requires an even number of elements read as key-value pairs.')\n\n dtype_id=216\n\n def __init__(self,value=None):\n if value is self: return\n if value is not None:\n if isinstance(value,dict):\n for key,val in value.items():\n self.setdefault(key,val)\n elif isinstance(value,(Apd,tuple,list,_ver.mapclass,_N.ndarray)):\n if isinstance(value,(_ver.mapclass,)) and not isinstance(value,(tuple,)):\n value = tuple(value)\n if len(value)&1:\n raise Dictionary._key_value_exception\n for idx in range(0,len(value),2):\n self.setdefault(value[idx],value[idx+1])\n elif isinstance(value,(_ver.generator)):\n for key in value:\n self.setdefault(key,next(value))\n else:\n raise TypeError('Cannot create Dictionary from type: '+str(type(value)))\n\n @staticmethod\n def toKey(key):\n if isinstance(key,(_scr.Scalar,)):\n key = key.value\n if isinstance(key,(_ver.npbytes,_ver.npunicode)):\n return _ver.tostr(key)\n if isinstance(key,(_N.int32,)):\n return int(key)\n if isinstance(key,(_N.float32,_N.float64)):\n return float(key)\n return _dat.Data(key).data().tolist()\n\n def setdefault(self,key,val):\n \"\"\"check keys and converts values to instances of Data\"\"\"\n key = Dictionary.toKey(key)\n if not isinstance(val,_dat.Data):\n val=_dat.Data(val)\n super(Dictionary,self).setdefault(key,val)\n\n def remove(self,key):\n \"\"\"remove pair with key\"\"\"\n del(self[Dictionary.toKey(key)])\n\n def __setitem__(self,name,value):\n \"\"\"sets values as instances of Data\"\"\"\n self.setdefault(name,value)\n\n def __getitem__(self,name):\n \"\"\"gets values as instances of Data\"\"\"\n return super(Dictionary,self).__getitem__(Dictionary.toKey(name))\n\n @property\n def value(self):\n \"\"\"Return native representation of data item\"\"\"\n return Dictionary.dict_np(self.items())\n\n def toApd(self):\n return Apd(self.descs,self.dtype_id)\n\n @property\n def descs(self):\n \"\"\"Returns the descs of the Apd.\n @rtype: tuple\n \"\"\"\n return self._descs\n @property\n def _descs(self): return sum(self.items(),())\n\nclass List(list,Apd):\n \"\"\"list class\"\"\"\n\n dtype_id=214\n\n def __init__(self,value=None):\n if value is self: return\n if value is not None:\n if isinstance(value,(Apd,tuple,list,_ver.mapclass,_ver.generator,_N.ndarray)):\n for val in value:\n List.append(self,_dat.Data(val))\n else:\n raise TypeError('Cannot create List from type: '+str(type(value)))\n\n @property\n def descs(self):\n \"\"\"Returns the descs of the Apd.\n @rtype: tuple\n \"\"\"\n return tuple(self)\n\n @property\n def _descs(self): return self\n\ndescriptor=_mimport('descriptor')\ndescriptor.dtypeToClass[Apd.dtype_id]=Apd\ndescriptor.dtypeToClass[List.dtype_id]=List\ndescriptor.dtypeToClass[Dictionary.dtype_id]=Dictionary\ndescriptor.dtypeToArrayClass[Apd.dtype_id]=Apd\ndescriptor.dtypeToArrayClass[List.dtype_id]=List\ndescriptor.dtypeToArrayClass[Dictionary.dtype_id]=Dictionary\n_tre=_mimport('tree')\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c-voegele/PowerSimData
|
[
"5b1500e573f00a34571316796ff442bfa753871a"
] |
[
"powersimdata/input/tests/test_transform_grid.py"
] |
[
"import copy\n\nimport numpy as np\nimport pytest\n\nfrom powersimdata.input.change_table import ChangeTable\nfrom powersimdata.input.grid import Grid\nfrom powersimdata.input.transform_grid import TransformGrid\n\ngrid = Grid([\"USA\"])\n\n\[email protected]\ndef ct():\n return ChangeTable(grid)\n\n\ndef get_plant_id(zone_id, gen_type):\n plant_id = (\n grid.plant.groupby([\"zone_id\", \"type\"])\n .get_group((zone_id, gen_type))\n .index.values.tolist()\n )\n return plant_id\n\n\ndef get_branch_id(zone_id):\n branch_id = (\n grid.branch.groupby([\"from_zone_id\", \"to_zone_id\"])\n .get_group((zone_id, zone_id))\n .index.values.tolist()\n )\n return branch_id\n\n\ndef test_that_only_capacities_are_modified_when_scaling_renewable_gen(ct):\n gen_type = \"solar\"\n zone = \"Utah\"\n factor = 1.41\n ct.scale_plant_capacity(gen_type, zone_name={zone: factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n ref_grid = copy.deepcopy(grid)\n plant_id = get_plant_id(grid.zone2id[zone], gen_type)\n\n assert new_grid != ref_grid\n ref_grid.plant.loc[plant_id, [\"Pmax\", \"Pmin\"]] *= factor\n assert new_grid == ref_grid\n\n\ndef test_scale_gen_capacity_one_zone(ct):\n gen_type = \"coal\"\n zone = \"Colorado\"\n factor = 2.0\n ct.scale_plant_capacity(gen_type, zone_name={zone: factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n plant_id = get_plant_id(grid.zone2id[zone], gen_type)\n\n pmax = grid.plant.Pmax\n new_pmax = new_grid.plant.Pmax\n\n assert new_grid != grid\n assert not new_pmax.equals(factor * pmax)\n assert new_pmax.loc[plant_id].equals(factor * pmax.loc[plant_id])\n\n\ndef test_scale_thermal_gen_gencost_two_types_two_zones(ct):\n gen_type = [\"ng\", \"coal\"]\n zone = [\"Louisiana\", \"Montana Eastern\"]\n factor = [0.8, 1.25]\n for i, r in enumerate(gen_type):\n ct.scale_plant_capacity(r, zone_name={zone[i]: factor[i]})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n plant_id = []\n for z, r in zip(zone, gen_type):\n plant_id.append(get_plant_id(grid.zone2id[z], r))\n\n c0 = grid.gencost[\"before\"].c0\n new_c0 = new_grid.gencost[\"before\"].c0\n c1 = grid.gencost[\"before\"].c1\n new_c1 = new_grid.gencost[\"before\"].c1\n c2 = grid.gencost[\"before\"].c2\n new_c2 = new_grid.gencost[\"before\"].c2\n\n assert new_grid != grid\n assert new_c1.equals(c1)\n for f, i in zip(factor, plant_id):\n assert new_c0.loc[i].equals(f * c0.loc[i])\n assert new_c2.loc[i].equals(c2.loc[i] / f)\n\n\ndef test_scale_renewable_gen_gencost_one_zone(ct):\n ct.scale_plant_capacity(\"wind\", zone_name={\"Washington\": 2.3})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n assert new_grid != grid\n assert new_grid.gencost[\"before\"].c0.equals(grid.gencost[\"before\"].c0)\n assert new_grid.gencost[\"before\"].c1.equals(grid.gencost[\"before\"].c1)\n assert new_grid.gencost[\"before\"].c2.equals(grid.gencost[\"before\"].c2)\n\n\ndef test_scale_gen_one_plant(ct):\n plant_id = 3000\n gen_type = grid.plant.loc[plant_id].type\n factor = 0.33\n ct.scale_plant_capacity(gen_type, plant_id={plant_id: factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n pmax = grid.plant.Pmax\n new_pmax = new_grid.plant.Pmax\n pmin = grid.plant.Pmin\n new_pmin = new_grid.plant.Pmin\n\n assert new_grid != grid\n assert not new_pmax.equals(factor * pmax)\n assert not new_pmin.equals(factor * pmin)\n assert new_pmax.loc[plant_id] == factor * pmax.loc[plant_id]\n assert new_pmin.loc[plant_id] == factor * pmin.loc[plant_id]\n\n if gen_type in [\"coal\", \"dfo\", \"geothermal\", \"ng\", \"nuclear\"]:\n c0 = grid.gencost[\"before\"].c0\n new_c0 = new_grid.gencost[\"before\"].c0\n assert not new_c0.equals(factor * c0)\n assert new_c0.loc[plant_id] == factor * c0.loc[plant_id]\n\n c1 = grid.gencost[\"before\"].c1\n new_c1 = new_grid.gencost[\"before\"].c1\n assert new_c1.equals(c1)\n\n c2 = grid.gencost[\"before\"].c2\n new_c2 = new_grid.gencost[\"before\"].c2\n assert not new_c2.equals(c2 / factor)\n assert new_c2.loc[plant_id] == c2.loc[plant_id] / factor\n\n\ndef test_scale_gencost_one_plant(ct):\n # This must be the plant ID of a non-zero-cost resource\n plant_id = 3000\n gen_type = grid.plant.loc[plant_id].type\n factor = 1.5\n ct.scale_plant_cost(gen_type, plant_id={plant_id: factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n old_gencost = grid.gencost[\"before\"]\n new_gencost = new_grid.gencost[\"before\"]\n modified_columns = [\"c0\", \"c1\", \"c2\"]\n non_modified_columns = set(old_gencost.columns) - set(modified_columns)\n\n assert new_grid != grid\n # Make sure we don't mess with the plant dataframe\n assert new_grid.plant.equals(grid.plant)\n # Make sure we modify cost coefficient columns and only those columns\n assert new_gencost.loc[plant_id, modified_columns].equals(\n old_gencost.loc[plant_id, modified_columns] * factor\n )\n assert new_gencost.loc[plant_id, non_modified_columns].equals(\n old_gencost.loc[plant_id, non_modified_columns]\n )\n\n\ndef test_scale_gencost_two_types_two_zones(ct):\n gen_type = [\"ng\", \"coal\"]\n zone = [\"Louisiana\", \"Montana Eastern\"]\n factor = [0.8, 1.25]\n for i, r in enumerate(gen_type):\n ct.scale_plant_cost(r, zone_name={zone[i]: factor[i]})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n plant_id = []\n for z, r in zip(zone, gen_type):\n plant_id.append(get_plant_id(grid.zone2id[z], r))\n\n old_gencost = grid.gencost[\"before\"]\n new_gencost = new_grid.gencost[\"before\"]\n modified_columns = [\"c0\", \"c1\", \"c2\"]\n non_modified_columns = set(old_gencost.columns) - set(modified_columns)\n\n assert new_grid != grid\n # Make sure we don't mess with the plant dataframe\n assert new_grid.plant.equals(grid.plant)\n # Make sure we didn't mess with any other plants\n changed_plants = set().union(*plant_id)\n unchanged_plants = set(grid.plant.index.tolist()) - changed_plants\n assert old_gencost.loc[unchanged_plants].equals(new_gencost.loc[unchanged_plants])\n for f, i in zip(factor, plant_id):\n # Make sure we modify cost coefficient columns and only those columns\n assert new_gencost.loc[i, modified_columns].equals(\n old_gencost.loc[i, modified_columns] * f\n )\n assert new_gencost.loc[i, non_modified_columns].equals(\n old_gencost.loc[i, non_modified_columns]\n )\n for f, i in zip(factor, plant_id):\n # Make sure we modify cost coefficient columns and only those columns\n assert new_gencost.loc[i, modified_columns].equals(\n old_gencost.loc[i, modified_columns] * f\n )\n assert new_gencost.loc[i, non_modified_columns].equals(\n old_gencost.loc[i, non_modified_columns]\n )\n\n\ndef test_scale_gen_pmin_one_plant(ct):\n # This must be the plant ID of a non-zero-cost resource\n plant_id = 3000\n gen_type = grid.plant.loc[plant_id].type\n factor = 1.5\n ct.scale_plant_pmin(gen_type, plant_id={plant_id: factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n old_plant = grid.plant\n new_plant = new_grid.plant\n modified_columns = [\"Pmin\"]\n non_modified_columns = set(old_plant.columns) - set(modified_columns)\n\n assert not new_plant.equals(old_plant)\n # Make sure we don't mess with the gencost dataframe\n assert new_grid.gencost[\"before\"].equals(grid.gencost[\"before\"])\n # Make sure we modify Pmin and only Pmin\n assert new_plant.loc[plant_id, modified_columns].equals(\n old_plant.loc[plant_id, modified_columns] * factor\n )\n assert new_plant.loc[plant_id, non_modified_columns].equals(\n old_plant.loc[plant_id, non_modified_columns]\n )\n\n\ndef test_scale_gen_pmin_two_types_two_zones(ct):\n gen_type = [\"ng\", \"coal\"]\n zone = [\"Louisiana\", \"Montana Eastern\"]\n factor = [0.8, 1.25]\n for i, r in enumerate(gen_type):\n ct.scale_plant_pmin(r, zone_name={zone[i]: factor[i]})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n plant_id = []\n for z, r in zip(zone, gen_type):\n plant_id.append(get_plant_id(grid.zone2id[z], r))\n\n old_plant = grid.plant\n new_plant = new_grid.plant\n modified_columns = [\"Pmin\"]\n non_modified_columns = set(old_plant.columns) - set(modified_columns)\n\n assert not new_plant.equals(old_plant)\n # Make sure we don't mess with the gencost dataframe\n assert new_grid.gencost[\"before\"].equals(grid.gencost[\"before\"])\n # Make sure we modify Pmin and only Pmin\n changed_plants = set().union(*plant_id)\n unchanged_plants = set(grid.plant.index.tolist()) - changed_plants\n assert old_plant.loc[unchanged_plants].equals(new_plant.loc[unchanged_plants])\n for f, i in zip(factor, plant_id):\n # Make sure we modify cost coefficient columns and only those columns\n assert new_plant.loc[i, modified_columns].equals(\n old_plant.loc[i, modified_columns] * f\n )\n assert new_plant.loc[i, non_modified_columns].equals(\n old_plant.loc[i, non_modified_columns]\n )\n\n\ndef test_scale_branch_one_zone(ct):\n factor = 4\n zone = \"Washington\"\n ct.scale_branch_capacity(zone_name={\"Washington\": factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n branch_id = get_branch_id(grid.zone2id[zone])\n\n capacity = grid.branch.rateA\n new_capacity = new_grid.branch.rateA\n x = grid.branch.x\n new_x = new_grid.branch.x\n\n assert new_grid != grid\n assert not new_capacity.equals(factor * capacity)\n assert new_capacity.loc[branch_id].equals(factor * capacity.loc[branch_id])\n assert not new_x.equals(x / factor)\n assert new_x.loc[branch_id].equals(x.loc[branch_id] / factor)\n\n\ndef test_scale_branch_two_zones(ct):\n factor = [0.3, 1.25]\n zone = [\"West Virginia\", \"Nevada\"]\n ct.scale_branch_capacity(zone_name={z: f for z, f in zip(zone, factor)})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n branch_id = []\n for z in zone:\n branch_id.append(get_branch_id(grid.zone2id[z]))\n\n capacity = grid.branch.rateA\n new_capacity = new_grid.branch.rateA\n x = grid.branch.x\n new_x = new_grid.branch.x\n\n assert new_grid.plant.equals(grid.plant)\n for f, i in zip(factor, branch_id):\n assert new_capacity.loc[i].equals(f * capacity.loc[i])\n assert new_x.loc[i].equals(x.loc[i] / f)\n\n\ndef test_scale_one_branch(ct):\n branch_id = 11111\n factor = 1.62\n ct.scale_branch_capacity(branch_id={branch_id: factor})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n capacity = grid.branch.rateA\n new_capacity = new_grid.branch.rateA\n x = grid.branch.x\n new_x = new_grid.branch.x\n\n assert new_grid != grid\n assert new_grid.dcline.equals(grid.dcline)\n assert not new_capacity.equals(factor * capacity)\n assert new_capacity.loc[branch_id] == factor * capacity.loc[branch_id]\n assert not new_x.equals(x / factor)\n assert new_x.loc[branch_id] == x.loc[branch_id] / factor\n\n\ndef test_scale_dcline(ct):\n dcline_id = [2, 4, 6]\n factor = [1.2, 1.6, 0]\n ct.scale_dcline_capacity({i: f for i, f in zip(dcline_id, factor)})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n pmin = grid.dcline.Pmin\n new_pmin = new_grid.dcline.Pmin\n pmax = grid.dcline.Pmax\n new_pmax = new_grid.dcline.Pmax\n status = grid.dcline.status\n new_status = new_grid.dcline.status\n\n assert new_grid != grid\n assert not new_status.equals(status)\n assert not new_pmin.equals(pmin)\n assert not new_pmax.equals(pmax)\n for i, f in zip(dcline_id, factor):\n assert new_pmin.loc[i] == f * pmin.loc[i]\n assert new_pmax.loc[i] == f * pmax.loc[i]\n assert status.loc[i] == 1\n assert new_status.loc[i] == 0 if f == 0 else 1\n\n\ndef test_add_branch(ct):\n new_branch = [\n {\"capacity\": 150, \"from_bus_id\": 8, \"to_bus_id\": 100},\n {\"capacity\": 250, \"from_bus_id\": 8000, \"to_bus_id\": 30000},\n {\"capacity\": 50, \"from_bus_id\": 1, \"to_bus_id\": 655},\n {\"capacity\": 125, \"from_bus_id\": 3001005, \"to_bus_id\": 3008157},\n ]\n ct.add_branch(new_branch)\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n new_capacity = new_grid.branch.rateA.values\n new_index = new_grid.branch.index\n old_index = grid.branch.index\n\n assert new_grid.branch.shape[0] != grid.branch.shape[0]\n assert np.array_equal(\n new_index[-len(new_branch) :],\n range(old_index[-1] + 1, old_index[-1] + 1 + len(new_branch)),\n )\n assert np.array_equal(\n new_capacity[-len(new_branch) :],\n np.array([ac[\"capacity\"] for ac in new_branch]),\n )\n\n\ndef test_added_branch_scaled(ct):\n new_branch = [\n {\"capacity\": 150, \"from_bus_id\": 8, \"to_bus_id\": 100},\n {\"capacity\": 250, \"from_bus_id\": 8000, \"to_bus_id\": 30000},\n {\"capacity\": 50, \"from_bus_id\": 1, \"to_bus_id\": 655},\n {\"capacity\": 125, \"from_bus_id\": 3001005, \"to_bus_id\": 3008157},\n ]\n ct.add_branch(new_branch)\n prev_max_branch_id = grid.branch.index.max()\n new_branch_ids = list(\n range(prev_max_branch_id + 1, prev_max_branch_id + 1 + len(new_branch))\n )\n ct.scale_branch_capacity(branch_id={new_branch_ids[0]: 2})\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n new_capacity = new_grid.branch.rateA\n\n for i, new_id in enumerate(new_branch_ids):\n if i == 0:\n assert new_capacity.loc[new_branch_ids[i]] == new_branch[i][\"capacity\"] * 2\n else:\n assert new_capacity.loc[new_id] == new_branch[i][\"capacity\"]\n\n\ndef test_add_dcline(ct):\n new_dcline = [\n {\"capacity\": 2000, \"from_bus_id\": 200, \"to_bus_id\": 2000},\n {\"capacity\": 1000, \"from_bus_id\": 3001001, \"to_bus_id\": 1},\n {\"capacity\": 8000, \"from_bus_id\": 12000, \"to_bus_id\": 5996},\n ]\n ct.add_dcline(new_dcline)\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n new_pmin = new_grid.dcline.Pmin.values\n new_pmax = new_grid.dcline.Pmax.values\n new_index = new_grid.dcline.index\n old_index = grid.dcline.index\n\n assert new_grid.dcline.shape[0] != grid.dcline.shape[0]\n assert np.array_equal(\n new_index[-len(new_dcline) :],\n range(old_index[-1] + 1, old_index[-1] + 1 + len(new_dcline)),\n )\n assert np.array_equal(\n new_pmin[-len(new_dcline) :],\n np.array([-1 * dc[\"capacity\"] for dc in new_dcline]),\n )\n assert np.array_equal(\n new_pmax[-len(new_dcline) :],\n np.array([dc[\"capacity\"] for dc in new_dcline]),\n )\n\n\ndef test_add_gen_add_entries_in_plant_data_frame(ct):\n new_plant = [\n {\"type\": \"solar\", \"bus_id\": 2050363, \"Pmax\": 85},\n {\"type\": \"wind\", \"bus_id\": 9, \"Pmin\": 5, \"Pmax\": 60},\n {\"type\": \"wind_offshore\", \"bus_id\": 13802, \"Pmax\": 175},\n {\n \"type\": \"ng\",\n \"bus_id\": 2010687,\n \"Pmin\": 25,\n \"Pmax\": 400,\n \"c0\": 1500,\n \"c1\": 50,\n \"c2\": 0.5,\n },\n ]\n ct.add_plant(new_plant)\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n new_pmin = new_grid.plant.Pmin.values\n new_pmax = new_grid.plant.Pmax.values\n new_status = new_grid.plant.status.values\n new_index = new_grid.plant.index\n old_index = grid.plant.index\n\n assert new_grid.plant.shape[0] != grid.plant.shape[0]\n assert np.array_equal(\n new_index[-len(new_plant) :],\n range(old_index[-1] + 1, old_index[-1] + 1 + len(new_plant)),\n )\n assert np.array_equal(\n new_pmin[-len(new_plant) :],\n np.array([p[\"Pmin\"] if \"Pmin\" in p.keys() else 0 for p in new_plant]),\n )\n assert np.array_equal(\n new_pmax[-len(new_plant) :], np.array([p[\"Pmax\"] for p in new_plant])\n )\n assert np.array_equal(new_status[-len(new_plant) :], np.array([1] * len(new_plant)))\n\n\ndef test_add_gen_add_entries_in_gencost_data_frame(ct):\n new_plant = [\n {\"type\": \"solar\", \"bus_id\": 2050363, \"Pmax\": 15},\n {\"type\": \"wind\", \"bus_id\": 555, \"Pmin\": 5, \"Pmax\": 60},\n {\"type\": \"wind_offshore\", \"bus_id\": 60123, \"Pmax\": 175},\n {\n \"type\": \"ng\",\n \"bus_id\": 2010687,\n \"Pmin\": 25,\n \"Pmax\": 400,\n \"c0\": 1500,\n \"c1\": 50,\n \"c2\": 0.5,\n },\n {\"type\": \"solar\", \"bus_id\": 2050363, \"Pmax\": 15},\n ]\n ct.add_plant(new_plant)\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n new_c0 = new_grid.gencost[\"before\"].c0.values\n new_c1 = new_grid.gencost[\"before\"].c1.values\n new_c2 = new_grid.gencost[\"before\"].c2.values\n new_type = new_grid.gencost[\"before\"].type.values\n new_startup = new_grid.gencost[\"before\"].startup.values\n new_shutdown = new_grid.gencost[\"before\"].shutdown.values\n new_n = new_grid.gencost[\"before\"].n.values\n new_index = new_grid.gencost[\"before\"].index\n old_index = grid.gencost[\"before\"].index\n\n assert new_grid.gencost[\"before\"] is new_grid.gencost[\"after\"]\n assert new_grid.gencost[\"before\"].shape[0] != grid.gencost[\"before\"].shape[0]\n assert np.array_equal(\n new_index[-len(new_plant) :],\n range(old_index[-1] + 1, old_index[-1] + 1 + len(new_plant)),\n )\n assert np.array_equal(\n new_c0[-len(new_plant) :],\n np.array([p[\"c0\"] if \"c0\" in p.keys() else 0 for p in new_plant]),\n )\n assert np.array_equal(\n new_c1[-len(new_plant) :],\n np.array([p[\"c1\"] if \"c1\" in p.keys() else 0 for p in new_plant]),\n )\n assert np.array_equal(\n new_c2[-len(new_plant) :],\n np.array([p[\"c2\"] if \"c2\" in p.keys() else 0 for p in new_plant]),\n )\n assert np.array_equal(new_type[-len(new_plant) :], np.array([2] * len(new_plant)))\n assert np.array_equal(\n new_startup[-len(new_plant) :], np.array([0] * len(new_plant))\n )\n assert np.array_equal(\n new_shutdown[-len(new_plant) :], np.array([0] * len(new_plant))\n )\n assert np.array_equal(new_n[-len(new_plant) :], np.array([3] * len(new_plant)))\n\n\ndef test_add_storage(ct):\n storage = [\n {\"bus_id\": 2021005, \"capacity\": 116.0},\n {\"bus_id\": 2028827, \"capacity\": 82.5},\n {\"bus_id\": 2028060, \"capacity\": 82.5},\n ]\n ct.add_storage_capacity(storage)\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n\n pmin = new_grid.storage[\"gen\"].Pmin.values\n pmax = new_grid.storage[\"gen\"].Pmax.values\n\n assert new_grid.storage[\"gen\"].shape[0] != grid.storage[\"gen\"].shape[0]\n assert np.array_equal(pmin, -1 * np.array([d[\"capacity\"] for d in storage]))\n assert np.array_equal(pmax, np.array([d[\"capacity\"] for d in storage]))\n\n\ndef test_add_bus(ct):\n prev_num_buses = len(grid.bus.index)\n prev_max_bus = grid.bus.index.max()\n prev_num_subs = len(grid.sub.index)\n ct.ct[\"new_bus\"] = [\n # These three are buses at new locations\n {\"lat\": 40, \"lon\": 50.5, \"zone_id\": 2, \"Pd\": 0, \"baseKV\": 69},\n {\"lat\": -40.5, \"lon\": -50, \"zone_id\": 201, \"Pd\": 10, \"baseKV\": 230},\n # We want to test that we can add two new buses at the same lat/lon\n {\"lat\": -40.5, \"lon\": -50, \"zone_id\": 201, \"Pd\": 5, \"baseKV\": 69},\n # This one is at the lat/lon of an existing substation\n {\"lat\": 36.0155, \"lon\": -114.738, \"zone_id\": 208, \"Pd\": 0, \"baseKV\": 345},\n ]\n expected_interconnects = (\"Eastern\", \"Western\", \"Western\", \"Western\")\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n assert len(new_grid.bus.index) == prev_num_buses + len(ct.ct[\"new_bus\"])\n for i, new_bus in enumerate(ct.ct[\"new_bus\"]):\n new_bus_id = prev_max_bus + 1 + i\n for k, v in new_bus.items():\n assert new_grid.bus.loc[new_bus_id, k] == v\n assert new_grid.bus.loc[new_bus_id, \"interconnect\"] == expected_interconnects[i]\n # Ensure that we still match with the other dataframes that matter\n assert len(new_grid.bus) == len(new_grid.bus2sub)\n assert len(new_grid.bus2sub.sub_id.unique()) == len(new_grid.sub)\n # Even though we add three new buses, there are only two unique lat/lon pairs\n assert len(new_grid.sub) == prev_num_subs + 2\n assert new_grid.bus.index.dtype == grid.bus.index.dtype\n assert new_grid.bus2sub.index.dtype == grid.bus2sub.index.dtype\n assert new_grid.sub.index.dtype == grid.sub.index.dtype\n\n\ndef test_remove_branch(ct):\n assert 0 in grid.branch.index\n ct.ct[\"remove_branch\"] = {0}\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n assert 0 not in new_grid.branch.index\n ct.ct[\"remove_branch\"] = {1, 2}\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n assert all(i not in new_grid.branch.index for i in [1, 2])\n\n\ndef test_remove_bus(ct):\n assert 1 in grid.bus.index\n ct.ct[\"remove_bus\"] = {1}\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n assert 1 not in new_grid.bus.index\n ct.ct[\"remove_bus\"] = {2, 3}\n new_grid = TransformGrid(grid, ct.ct).get_grid()\n assert all(i not in new_grid.bus.index for i in [2, 3])\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
codacy-badger/toughio
|
[
"8d4f3d8408d5507a83f65e7f393b13be08d42aca"
] |
[
"toughio/mesh/avsucd/_avsucd.py"
] |
[
"from __future__ import division, unicode_literals, with_statement\n\nimport logging\n\nimport numpy\n\nfrom ...__about__ import __version__ as version\nfrom .._common import meshio_data\nfrom .._mesh import Cells, Mesh\n\n__all__ = [\n \"read\",\n \"write\",\n]\n\n\nmeshio_to_avsucd_type = {\n \"vertex\": \"pt\",\n \"line\": \"line\",\n \"triangle\": \"tri\",\n \"quad\": \"quad\",\n \"tetra\": \"tet\",\n \"pyramid\": \"pyr\",\n \"wedge\": \"prism\",\n \"hexahedron\": \"hex\",\n}\navsucd_to_meshio_type = {v: k for k, v in meshio_to_avsucd_type.items()}\n\n\nmeshio_to_avsucd_order = {\n \"vertex\": [0],\n \"line\": [0, 1],\n \"triangle\": [0, 1, 2],\n \"quad\": [0, 1, 2, 3],\n \"tetra\": [0, 1, 3, 2],\n \"pyramid\": [4, 0, 1, 2, 3],\n \"wedge\": [3, 4, 5, 0, 1, 2],\n \"hexahedron\": [4, 5, 6, 7, 0, 1, 2, 3],\n}\n\n\navsucd_to_meshio_order = {\n k: (v if k != \"pyramid\" else [1, 2, 3, 4, 0])\n for k, v in meshio_to_avsucd_order.items()\n}\n\n\ndef read(filename):\n \"\"\"Read AVS-UCD ASCII file.\"\"\"\n with open(filename, \"r\") as f:\n out = read_buffer(f)\n return out\n\n\ndef read_buffer(f):\n # Skip comments and unpack first line\n num_nodes, num_cells, num_node_data, num_cell_data, _ = numpy.genfromtxt(\n f, max_rows=1, dtype=int, comments=\"#\"\n )\n\n # Read nodes\n point_ids, points = _read_nodes(f, num_nodes)\n\n # Read cells\n cell_ids, cells, cell_data = _read_cells(f, num_cells, point_ids)\n\n # Read node data\n if num_node_data:\n point_data = _read_data(f, num_nodes, point_ids)\n else:\n point_data = {}\n\n # Read cell data\n if num_cell_data:\n cdata = _read_data(f, num_cells, cell_ids)\n sections = numpy.cumsum([len(c[1]) for c in cells[:-1]])\n for k, v in cdata.items():\n cell_data[k] = numpy.split(v, sections)\n\n return Mesh(points, cells, point_data=point_data, cell_data=cell_data)\n\n\ndef _read_nodes(f, num_nodes):\n data = numpy.genfromtxt(f, max_rows=num_nodes)\n points_ids = {int(pid): i for i, pid in enumerate(data[:, 0])}\n return points_ids, data[:, 1:]\n\n\ndef _read_cells(f, num_cells, point_ids):\n cells = []\n cell_ids = {}\n cell_data = {\"avsucd:material\": []}\n count = 0\n for _ in range(num_cells):\n line = f.readline().strip().split()\n cell_id = int(line[0])\n cell_mat = int(line[1])\n cell_type = avsucd_to_meshio_type[line[2]]\n corner = [point_ids[int(pid)] for pid in line[3:]]\n\n if len(cells) > 0 and cells[-1].type == cell_type:\n cells[-1].data.append(corner)\n cell_data[\"avsucd:material\"][-1].append(cell_mat)\n else:\n cells.append(Cells(cell_type, [corner]))\n cell_data[\"avsucd:material\"].append([cell_mat])\n\n cell_ids[cell_id] = count\n count += 1\n\n # Convert to numpy arrays\n for k, c in enumerate(cells):\n cells[k] = Cells(c.type, numpy.array(c.data)[:, avsucd_to_meshio_order[c.type]])\n cell_data[\"avsucd:material\"][k] = numpy.array(cell_data[\"avsucd:material\"][k])\n return cell_ids, cells, cell_data\n\n\ndef _read_data(f, num_entities, entity_ids):\n line = f.readline().strip().split()\n data_size = [int(i) for i in line[1:]]\n\n labels = {}\n data = {}\n for i, dsize in enumerate(data_size):\n line = f.readline().strip().split(\",\")\n labels[i] = line[0].strip().replace(\" \", \"_\")\n data[labels[i]] = (\n numpy.empty(num_entities)\n if dsize == 1\n else numpy.empty((num_entities, dsize))\n )\n\n for _ in range(num_entities):\n line = f.readline().strip().split()\n eid = entity_ids[int(line[0])]\n j = 0\n for i, dsize in enumerate(data_size):\n if dsize == 1:\n data[labels[i]][eid] = float(line[j + 1])\n else:\n data[labels[i]][eid] = [\n float(val) for val in line[j + 1 : j + 1 + dsize]\n ]\n j += dsize\n return data\n\n\ndef write(filename, mesh):\n \"\"\"Write AVS-UCD ASCII file.\"\"\"\n if mesh.points.shape[1] == 2:\n logging.warning(\n \"AVS-UCD requires 3D points, but 2D points given. \"\n \"Appending 0 third component.\"\n )\n mesh.points = numpy.column_stack(\n [mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])]\n )\n\n with open(filename, \"w\") as f:\n # Write meshio version\n f.write(\"# Written by toughio v{}\\n\".format(version))\n\n # Write first line\n num_nodes = len(mesh.points)\n num_cells = sum(len(c.data) for c in mesh.cells)\n num_node_data = [\n 1 if v.ndim == 1 else v.shape[1] for v in mesh.point_data.values()\n ]\n num_cell_data = [\n 1 if vv.ndim == 1 else vv.shape[1]\n for k, v in mesh.cell_data.items()\n for vv in v\n if k not in meshio_data\n ]\n num_node_data_sum = sum(num_node_data)\n num_cell_data_sum = sum(num_cell_data)\n f.write(\n \"{} {} {} {} 0\\n\".format(\n num_nodes, num_cells, num_node_data_sum, num_cell_data_sum\n )\n )\n\n # Write nodes\n _write_nodes(f, mesh.points)\n\n # Write cells\n _write_cells(f, mesh.cells, mesh.cell_data, num_cells)\n\n # Write node data\n if num_node_data_sum:\n labels = mesh.point_data.keys()\n data_array = numpy.column_stack([v for v in mesh.point_data.values()])\n _write_data(\n f, labels, data_array, num_nodes, num_node_data, num_node_data_sum\n )\n\n # Write cell data\n if num_cell_data_sum:\n labels = [k for k in mesh.cell_data.keys() if k not in meshio_data]\n data_array = numpy.column_stack(\n [\n numpy.concatenate(v)\n for k, v in mesh.cell_data.items()\n if k not in meshio_data\n ]\n )\n _write_data(\n f, labels, data_array, num_cells, num_cell_data, num_cell_data_sum\n )\n\n\ndef _write_nodes(f, points):\n for i, (x, y, z) in enumerate(points):\n f.write(\"{} {} {} {}\\n\".format(i + 1, x, y, z))\n\n\ndef _write_cells(f, cells, cell_data, num_cells):\n # Interoperability with other formats\n mat_data = None\n for k in cell_data.keys():\n if k in meshio_data:\n mat_data = k\n break\n\n # Material array\n if mat_data:\n material = numpy.concatenate(cell_data[mat_data])\n else:\n material = numpy.zeros(num_cells, dtype=int)\n\n # Loop over cells\n i = 0\n for k, v in cells:\n for cell in v[:, meshio_to_avsucd_order[k]]:\n cell_str = \" \".join(str(c + 1) for c in cell)\n f.write(\n \"{} {} {} {}\\n\".format(\n i + 1, int(material[i]), meshio_to_avsucd_type[k], cell_str\n )\n )\n i += 1\n\n\ndef _write_data(f, labels, data_array, num_entities, num_data, num_data_sum):\n num_data_str = \" \".join(str(i) for i in num_data)\n f.write(\"{} {}\\n\".format(len(num_data), num_data_str))\n\n for label in labels:\n f.write(\"{}, real\\n\".format(label))\n\n data_array = numpy.column_stack((numpy.arange(1, num_entities + 1), data_array))\n numpy.savetxt(f, data_array, delimiter=\" \", fmt=[\"%d\"] + [\"%.14e\"] * num_data_sum)\n"
] |
[
[
"numpy.split",
"numpy.arange",
"numpy.genfromtxt",
"numpy.concatenate",
"numpy.savetxt",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
embedded-machine-learning/eml-tools
|
[
"9c9d12f9b970a42360bc6ca350f3b67ad822b141",
"9c9d12f9b970a42360bc6ca350f3b67ad822b141",
"9c9d12f9b970a42360bc6ca350f3b67ad822b141"
] |
[
"hardwaremodules/nvidia/convert_tf2_to_trt.py",
"inference_evaluation/inference_utils/image_utils.py",
"training/tf2oda_evaluate_ckpt_performance.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nConvert and infer into Tensor-rt models\n\nLicense_info:\n# ==============================================================================\n# ISC License (ISC)\n# Copyright 2020 Christian Doppler Laboratory for Embedded Machine Learning\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE\n# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n# ==============================================================================\n\n# The following script uses several method fragments the following guide\n# Source: https://towardsai.net/p/deep-learning/cvml-annotation%e2%80%8a-%e2%80%8awhat-it-is-and-how-to-convert-it\n\n\"\"\"\n\n# Futures\n#from __future__ import print_function\n#from __future__ import absolute_import, division, print_function, unicode_literals\n\n# Built-in/Generic Imports\nimport csv\nimport datetime\nimport json\nimport os\nimport time\nimport argparse\n\n# Libs\nimport numpy as np\n#import matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.keras.preprocessing import image\n#from tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom tensorflow.keras.applications.inception_v3 import preprocess_input, decode_predictions\n\n# Own modules\n\n__author__ = 'Amid Mozelli'\n__copyright__ = 'Copyright 2021, Christian Doppler Laboratory for ' \\\n 'Embedded Machine Learning'\n__credits__ = ['Alexander Wendt', 'Rohit Verma', 'Snehan Kekre']\n__license__ = 'ISC'\n__version__ = '0.1.0'\n__maintainer__ = 'Amid Mozelli'\n__email__ = '[email protected]'\n__status__ = 'Experiental'\n\nparser = argparse.ArgumentParser(description='Benchmarking TRT-Optimized TF-Models')\n\t\t\t\nparser.add_argument('-t', '--tensorflow_model', default='./inceptionv3_saved_model',\n help='Unoptimized Tensorflow model', required=False)\n\t\t\t\t\t\nparser.add_argument('-b', '--batch_size', type=int, default=32,\n help='Batch Size', required=False)\n\t\t\t\t\t\nparser.add_argument('-s', '--image_size', type=str, default='[299, 299]',\n help='List of two coordinates: [Height, Width]', required=False)\n\t\t\t\t\t\nparser.add_argument('-p', '--precision', default='FP32',\n help='TensorRT precision mode: FP32, FP16 or INT8 and input data type.', required=False)\n\nparser.add_argument('-e', '--dtype', default='uint8',\n help='Data type for the input from float32, float16 or uint8.', required=False)\n\nparser.add_argument('-d', '--data_dir', default='./images/validation',\n help='Location of the dataset.', required=False)\n\nparser.add_argument('-out', '--output_dir', default='./exported-models-trt/model_name_trt',\n help='Export location of converted models.', required=False)\n\t\t\t\t\t\n\t\t\t\t\t\nargs, unknown = parser.parse_known_args()\nprint(args)\n\n\ndef batch_input(batch_size, data_path, d_type, hw, is_keras=False):\n if d_type == 'float32':\n datatype = np.float32\n elif d_type == 'float16':\n datatype = np.float16\n elif d_type == 'uint8':\n datatype = np.uint8\n else:\n raise ValueError(\"No valid data type provided: \" + d_type + \". It has to be float32, float16 or uint8\")\n\n\n batched_input = np.zeros((batch_size, hw[0], hw[1], 3), dtype=datatype)\n pics = os.listdir(data_path)\n n = len(pics)\n\n for i in range(batch_size):\n img_path = os.path.join(data_path, pics[i % n]) #generating batches\n img = image.load_img(img_path, target_size=(hw[0], hw[1]))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n if is_keras:\n x = preprocess_input(x) #for models loaded from Keras applications, preprocess should be imported\n batched_input[i, :] = x\n\n batched_input = tf.constant(batched_input)\n return batched_input\n\n\ndef load_tf_saved_model(input_saved_model_dir):\n print(f'Loading saved model {input_saved_model_dir}...')\n start_time = time.time()\n saved_model_loaded = tf.saved_model.load(input_saved_model_dir, tags=[tag_constants.SERVING])\n end_time = time.time()\n print('Loading model took {:4.1f}s'.format(end_time - start_time))\n return saved_model_loaded\n\n\ndef convert_to_trt_graph_and_save(precision_mode, input_saved_model_dir, calibration_data, output_dir='./converted_model'):\n\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n print(\"Created \", output_dir)\n\n if precision_mode == 'FP32':\n precision_mode = trt.TrtPrecisionMode.FP32\n converted_saved__suffix = '_TRTFP32'\n #converted_saved__prefix = 'TRTFP32_'\n\n if precision_mode == 'FP16':\n precision_mode = trt.TrtPrecisionMode.FP16\n converted_saved__suffix = '_TRTFP16'\n #converted_saved__prefix = 'TRTFP16_'\n\n if precision_mode == 'INT8':\n precision_mode = trt.TrtPrecisionMode.INT8\n converted_saved__suffix ='_TRTINT8'\n #converted_saved__prefix = 'TRTINT8_'\n\n \n #r = input_saved_model_dir.split('_')\n #final = input_saved_model_dir[len(r[0])+1:]\n #output_saved_model_dir = converted_saved__prefix + final \n\n #r = input_saved_model_dir.split('/')\n #header = r[0]\n #output_saved_model_dir = os.path.join(output_dir, header + converted_saved__suffix)\n\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=precision_mode,\n max_workspace_size_bytes=8000000000\n )\n\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=input_saved_model_dir,\n conversion_params=conversion_params\n )\n\n print(f'Converting {input_saved_model_dir} to TF-TRT graph precision mode {precision_mode}...')\n\n if precision_mode == trt.TrtPrecisionMode.INT8:\n def calibration_input_fn():\n yield (calibration_data, )\n start_time = time.time()\n converter.convert(calibration_input_fn=calibration_input_fn)\n end_time = time.time()\n else:\n start_time = time.time()\n converter.convert()\n end_time = time.time()\n\n print('Conversion took {:4.1f}s.'.format(end_time - start_time))\n print(f'Saving converted model to {output_dir}')\n converter.save(output_saved_model_dir=output_dir)\n print('Conversion Complete')\n\n\ndef main():\n\n #Image size\n images_size = json.loads(args.image_size)\n\n #making batched inputs\n print(\"=== Prepare batch input ===\")\n batched_input = batch_input(args.batch_size, args.data_dir, args.dtype, images_size, is_keras=False)\n print(\"=== Batch input prepared ===\")\n\n\t#conversion\n print(\"=== Convert input model to trt. Model={}, Precision={} ===\".format(args.tensorflow_model, args.precision))\n convert_to_trt_graph_and_save(args.precision, args.tensorflow_model, batched_input, args.output_dir)\n print(\"=== Conversion complete ===\")\n\t\n\nif __name__ == \"__main__\":\n\n main()\n\n print(\"=== Finished ===\")",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nImage handling utilities\n\nLicense_info:\n# ==============================================================================\n# ISC License (ISC)\n# Copyright 2020 Christian Doppler Laboratory for Embedded Machine Learning\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE\n# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n\n# The following script uses several method fragments from Tensorflow\nhttps://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_pascal_tf_record.py\n\nTensorflow has the following licence:\n# ==============================================================================\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\n\n# Futures\nfrom __future__ import print_function\n\n# Built-in/Generic Imports\nimport os\n\n# Libs\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport tensorflow as tf\nfrom six import BytesIO\n\n# Own modules\n\n__author__ = 'Alexander Wendt'\n__copyright__ = 'Copyright 2020, Christian Doppler Laboratory for ' \\\n 'Embedded Machine Learning'\n__credits__ = ['']\n__license__ = 'ISC'\n__version__ = '0.2.0'\n__maintainer__ = 'Alexander Wendt'\n__email__ = '[email protected]'\n__status__ = 'Experiental'\n\ndef load_image_into_numpy_array(path):\n \"\"\"Load an image from file into a numpy array.\n\n Puts image into numpy array to feed into tensorflow graph.\n Note that by convention we put it into a numpy array with shape\n (height, width, channels), where channels=3 for RGB.\n\n Args:\n path: a file path (this can be local or on colossus)\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3)\n \"\"\"\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef get_images_name(image_folder):\n image_folder = image_folder.replace('\\\\', '/')\n image_names = [f for f in os.listdir(image_folder)\n if re.search(r'([a-zA-Z0-9\\s_\\\\.\\-\\(\\):])+(.jpg|.jpeg|.png)$', f)]\n\n return image_names\n\ndef show_save_figure(fig, output_dir=None, filename=None, show_image=True):\n '''\n Show and save an image\n\n :param\n output_dir: Directory to put image\n filename: Filename to use. No file ending necessary. Png will be used. If None, then image is not saved.\n If image filename and outputdir is is set, the image will be saved\n show_image: Show image as non blocking. Default: True\n\n\n '''\n if filename:\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n fig.savefig(os.path.join(output_dir, filename))\n if show_image:\n plt.show(block=False)\n plt.pause(0.1)\n plt.close()",
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Creates and runs TF2 object detection models.\n\nFor local training/evaluation run:\nPIPELINE_CONFIG_PATH=path/to/pipeline.config\nMODEL_DIR=/tmp/model_outputs\nNUM_TRAIN_STEPS=10000\nSAMPLE_1_OF_N_EVAL_EXAMPLES=1\npython model_main_tf2.py -- \\\n --model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \\\n --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \\\n --pipeline_config_path=$PIPELINE_CONFIG_PATH \\\n --alsologtostderr\n\"\"\"\nimport glob\nimport os\nimport sys\n\nfrom absl import flags\nimport tensorflow.compat.v2 as tf\nfrom object_detection import model_lib_v2\n\nflags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '\n 'file.')\n#flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')\nflags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train '\n 'data (only supported in distributed training).')\nflags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of '\n 'every n eval input examples, where n is provided.')\nflags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '\n 'one of every n train input examples for evaluation, '\n 'where n is provided. This is only used if '\n '`eval_training_data` is True.')\nflags.DEFINE_string(\n 'model_dir', None, 'Path to output model directory '\n 'where event and checkpoint files will be written.')\nflags.DEFINE_string(\n 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '\n '`checkpoint_dir` is provided, this binary operates in eval-only mode, '\n 'writing resulting metrics to `model_dir`.')\nflags.DEFINE_bool('clear_eval', True, 'Clear the evaluation folder from evaluations and create a new one')\n\n#flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an'\n# 'evaluation checkpoint before exiting.')\n\n#flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.')\n#flags.DEFINE_string(\n# 'tpu_name',\n# default=None,\n# help='Name of the Cloud TPU for Cluster Resolvers.')\nflags.DEFINE_integer(\n 'num_workers', 1, 'When num_workers > 1, training uses '\n 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses '\n 'MirroredStrategy.')\nflags.DEFINE_integer(\n 'checkpoint_every_n', 1000, 'Integer defining how often we checkpoint.')\nflags.DEFINE_boolean('record_summaries', True,\n ('Whether or not to record summaries during'\n ' training.'))\n\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n flags.mark_flag_as_required('model_dir')\n flags.mark_flag_as_required('pipeline_config_path')\n flags.mark_flag_as_required('checkpoint_dir')\n\n #Clear evaluations folder\n if FLAGS.clear_eval:\n eval_dir = os.path.join(FLAGS.checkpoint_dir, \"eval\")\n files = glob.glob(eval_dir + \"/*\")\n for f in files:\n print(\"Remove \", f)\n os.remove(f)\n\n tf.config.set_soft_device_placement(True)\n\n #if FLAGS.checkpoint_dir:\n model_lib_v2.eval_continuously(\n pipeline_config_path=FLAGS.pipeline_config_path,\n model_dir=FLAGS.model_dir,\n train_steps=None,\n sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,\n sample_1_of_n_eval_on_train_examples=(FLAGS.sample_1_of_n_eval_on_train_examples),\n checkpoint_dir=FLAGS.checkpoint_dir,\n wait_interval=1, timeout=1)\n\n print(\"Tensorboard evaluation file created in \", FLAGS.checkpoint_dir + \"/eval\")\n print(\"Program End\")\n sys.exit(0) #Exit code, else program throws error\n\nif __name__ == '__main__':\n tf.compat.v1.app.run()\n"
] |
[
[
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.constant",
"tensorflow.saved_model.load",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.inception_v3.preprocess_input",
"tensorflow.python.compiler.tensorrt.trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace",
"numpy.zeros",
"tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverterV2"
],
[
"tensorflow.io.gfile.GFile",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.close"
],
[
"tensorflow.compat.v2.config.set_soft_device_placement",
"tensorflow.compat.v2.compat.v1.app.run"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
man-sean/pscgan
|
[
"be87e519cf789dc28b052afcea6c135a74cdbaaa"
] |
[
"nets/inception.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\ntry:\n from torchvision.models.utils import load_state_dict_from_url\nexcept ImportError:\n from torch.utils.model_zoo import load_url as load_state_dict_from_url\n\n# Inception weights ported to Pytorch from\n# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz\nFID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501\n\n\nclass InceptionV3(nn.Module):\n \"\"\"Pretrained InceptionV3 network returning feature maps\"\"\"\n\n # Index of default block of inception to return,\n # corresponds to output of final average pooling\n DEFAULT_BLOCK_INDEX = 3\n\n # Maps feature dimensionality to their output blocks indices\n BLOCK_INDEX_BY_DIM = {\n 64: 0, # First max pooling features\n 192: 1, # Second max pooling featurs\n 768: 2, # Pre-aux classifier features\n 2048: 3 # Final average pooling features\n }\n\n def __init__(self,\n output_blocks=(DEFAULT_BLOCK_INDEX,),\n resize_input=True,\n normalize_input=True,\n requires_grad=False,\n use_fid_inception=True):\n \"\"\"Build pretrained InceptionV3\n Parameters\n ----------\n output_blocks : list of int\n Indices of blocks to return features of. Possible values are:\n - 0: corresponds to output of first max pooling\n - 1: corresponds to output of second max pooling\n - 2: corresponds to output which is fed to aux classifier\n - 3: corresponds to output of final average pooling\n resize_input : bool\n If true, bilinearly resizes input to width and height 299 before\n feeding input to model. As the network without fully connected\n layers is fully convolutional, it should be able to handle inputs\n of arbitrary size, so resizing might not be strictly needed\n normalize_input : bool\n If true, scales the input from range (0, 1) to the range the\n pretrained Inception network expects, namely (-1, 1)\n requires_grad : bool\n If true, parameters of the model require gradients. Possibly useful\n for finetuning the network\n use_fid_inception : bool\n If true, uses the pretrained Inception model used in Tensorflow's\n FID implementation. If false, uses the pretrained Inception model\n available in torchvision. The FID Inception model has different\n weights and a slightly different structure from torchvision's\n Inception model. If you want to compute FID scores, you are\n strongly advised to set this parameter to true to get comparable\n results.\n \"\"\"\n super(InceptionV3, self).__init__()\n\n self.resize_input = resize_input\n self.normalize_input = normalize_input\n self.output_blocks = sorted(output_blocks)\n self.last_needed_block = max(output_blocks)\n\n assert self.last_needed_block <= 3, \\\n 'Last possible output block index is 3'\n\n self.blocks = nn.ModuleList()\n\n if use_fid_inception:\n inception = fid_inception_v3()\n else:\n inception = _inception_v3(pretrained=True)\n\n # Block 0: input to maxpool1\n block0 = [\n inception.Conv2d_1a_3x3,\n inception.Conv2d_2a_3x3,\n inception.Conv2d_2b_3x3,\n nn.MaxPool2d(kernel_size=3, stride=2)\n ]\n self.blocks.append(nn.Sequential(*block0))\n\n # Block 1: maxpool1 to maxpool2\n if self.last_needed_block >= 1:\n block1 = [\n inception.Conv2d_3b_1x1,\n inception.Conv2d_4a_3x3,\n nn.MaxPool2d(kernel_size=3, stride=2)\n ]\n self.blocks.append(nn.Sequential(*block1))\n\n # Block 2: maxpool2 to aux classifier\n if self.last_needed_block >= 2:\n block2 = [\n inception.Mixed_5b,\n inception.Mixed_5c,\n inception.Mixed_5d,\n inception.Mixed_6a,\n inception.Mixed_6b,\n inception.Mixed_6c,\n inception.Mixed_6d,\n inception.Mixed_6e,\n ]\n self.blocks.append(nn.Sequential(*block2))\n\n # Block 3: aux classifier to final avgpool\n if self.last_needed_block >= 3:\n block3 = [\n inception.Mixed_7a,\n inception.Mixed_7b,\n inception.Mixed_7c,\n nn.AdaptiveAvgPool2d(output_size=(1, 1))\n ]\n self.blocks.append(nn.Sequential(*block3))\n\n for param in self.parameters():\n param.requires_grad = requires_grad\n\n def forward(self, inp):\n \"\"\"Get Inception feature maps\n Parameters\n ----------\n inp : torch.autograd.Variable\n Input tensor of shape Bx3xHxW. Values are expected to be in\n range (0, 1)\n Returns\n -------\n List of torch.autograd.Variable, corresponding to the selected output\n block, sorted ascending by index\n \"\"\"\n outp = []\n x = inp\n\n if self.resize_input:\n x = F.interpolate(x,\n size=(299, 299),\n mode='bilinear',\n align_corners=False)\n\n if self.normalize_input:\n x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)\n\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n\n if idx == self.last_needed_block:\n break\n\n return outp\n\n\ndef _inception_v3(*args, **kwargs):\n \"\"\"Wraps `torchvision.models.inception_v3`\n Skips default weight inititialization if supported by torchvision version.\n See https://github.com/mseitzer/pytorch-fid/issues/28.\n \"\"\"\n try:\n version = tuple(map(int, torchvision.__version__.split('.')[:2]))\n except ValueError:\n # Just a caution against weird version strings\n version = (0,)\n\n if version >= (0, 6):\n kwargs['init_weights'] = False\n\n return torchvision.models.inception_v3(*args, **kwargs)\n\n\ndef fid_inception_v3():\n \"\"\"Build pretrained Inception model for FID computation\n The Inception model for FID computation uses a different set of weights\n and has a slightly different structure than torchvision's Inception.\n This method first constructs torchvision's Inception and then patches the\n necessary parts that are different in the FID Inception model.\n \"\"\"\n inception = _inception_v3(num_classes=1008,\n aux_logits=False,\n pretrained=False)\n inception.Mixed_5b = FIDInceptionA(192, pool_features=32)\n inception.Mixed_5c = FIDInceptionA(256, pool_features=64)\n inception.Mixed_5d = FIDInceptionA(288, pool_features=64)\n inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)\n inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)\n inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)\n inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)\n inception.Mixed_7b = FIDInceptionE_1(1280)\n inception.Mixed_7c = FIDInceptionE_2(2048)\n\n state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)\n inception.load_state_dict(state_dict)\n return inception\n\n\nclass FIDInceptionA(torchvision.models.inception.InceptionA):\n \"\"\"InceptionA block patched for FID computation\"\"\"\n def __init__(self, in_channels, pool_features):\n super(FIDInceptionA, self).__init__(in_channels, pool_features)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch5x5 = self.branch5x5_1(x)\n branch5x5 = self.branch5x5_2(branch5x5)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n # Patch: Tensorflow's average pool does not use the padded zero's in\n # its average calculation\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,\n count_include_pad=False)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass FIDInceptionC(torchvision.models.inception.InceptionC):\n \"\"\"InceptionC block patched for FID computation\"\"\"\n def __init__(self, in_channels, channels_7x7):\n super(FIDInceptionC, self).__init__(in_channels, channels_7x7)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch7x7 = self.branch7x7_1(x)\n branch7x7 = self.branch7x7_2(branch7x7)\n branch7x7 = self.branch7x7_3(branch7x7)\n\n branch7x7dbl = self.branch7x7dbl_1(x)\n branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)\n\n # Patch: Tensorflow's average pool does not use the padded zero's in\n # its average calculation\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,\n count_include_pad=False)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass FIDInceptionE_1(torchvision.models.inception.InceptionE):\n \"\"\"First InceptionE block patched for FID computation\"\"\"\n def __init__(self, in_channels):\n super(FIDInceptionE_1, self).__init__(in_channels)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [\n self.branch3x3_2a(branch3x3),\n self.branch3x3_2b(branch3x3),\n ]\n branch3x3 = torch.cat(branch3x3, 1)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = [\n self.branch3x3dbl_3a(branch3x3dbl),\n self.branch3x3dbl_3b(branch3x3dbl),\n ]\n branch3x3dbl = torch.cat(branch3x3dbl, 1)\n\n # Patch: Tensorflow's average pool does not use the padded zero's in\n # its average calculation\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,\n count_include_pad=False)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass FIDInceptionE_2(torchvision.models.inception.InceptionE):\n \"\"\"Second InceptionE block patched for FID computation\"\"\"\n def __init__(self, in_channels):\n super(FIDInceptionE_2, self).__init__(in_channels)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [\n self.branch3x3_2a(branch3x3),\n self.branch3x3_2b(branch3x3),\n ]\n branch3x3 = torch.cat(branch3x3, 1)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = [\n self.branch3x3dbl_3a(branch3x3dbl),\n self.branch3x3dbl_3b(branch3x3dbl),\n ]\n branch3x3dbl = torch.cat(branch3x3dbl, 1)\n\n # Patch: The FID Inception model uses max pooling instead of average\n # pooling. This is likely an error in this specific Inception\n # implementation, as other Inception models use average pooling here\n # (which matches the description in the paper).\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)"
] |
[
[
"torch.nn.Sequential",
"torch.cat",
"torch.utils.model_zoo.load_url",
"torch.nn.ModuleList",
"torch.nn.functional.avg_pool2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thuydotm/xarray-spatial
|
[
"faa144bdfd6d9615c2e7a08ed30a56a6db16b11d"
] |
[
"xrspatial/curvature.py"
] |
[
"# std lib\r\nfrom functools import partial\r\nfrom typing import Union\r\n\r\n# 3rd-party\r\ntry:\r\n import cupy\r\nexcept ImportError:\r\n class cupy(object):\r\n ndarray = False\r\n\r\nimport dask.array as da\r\n\r\nfrom numba import cuda\r\n\r\nimport numpy as np\r\nimport xarray as xr\r\n\r\n# local modules\r\nfrom xrspatial.utils import cuda_args\r\nfrom xrspatial.utils import get_dataarray_resolution\r\nfrom xrspatial.utils import has_cuda\r\nfrom xrspatial.utils import ngjit\r\nfrom xrspatial.utils import is_cupy_backed\r\n\r\nfrom typing import Optional\r\n\r\n\r\n@ngjit\r\ndef _cpu(data, cellsize):\r\n out = np.empty(data.shape, np.float64)\r\n out[:, :] = np.nan\r\n rows, cols = data.shape\r\n for y in range(1, rows - 1):\r\n for x in range(1, cols - 1):\r\n d = (data[y + 1, x] + data[y - 1, x]) / 2 - data[y, x]\r\n e = (data[y, x + 1] + data[y, x - 1]) / 2 - data[y, x]\r\n out[y, x] = -2 * (d + e) * 100 / (cellsize * cellsize)\r\n return out\r\n\r\n\r\ndef _run_numpy(data: np.ndarray,\r\n cellsize: Union[int, float]) -> np.ndarray:\r\n # TODO: handle border edge effect\r\n out = _cpu(data, cellsize)\r\n return out\r\n\r\n\r\ndef _run_dask_numpy(data: da.Array,\r\n cellsize: Union[int, float]) -> da.Array:\r\n _func = partial(_cpu,\r\n cellsize=cellsize)\r\n\r\n out = data.map_overlap(_func,\r\n depth=(1, 1),\r\n boundary=np.nan,\r\n meta=np.array(()))\r\n return out\r\n\r\n\r\[email protected](device=True)\r\ndef _gpu(arr, cellsize):\r\n d = (arr[1, 0] + arr[1, 2]) / 2 - arr[1, 1]\r\n e = (arr[0, 1] + arr[2, 1]) / 2 - arr[1, 1]\r\n curv = -2 * (d + e) * 100 / (cellsize[0] * cellsize[0])\r\n return curv\r\n\r\n\r\[email protected]\r\ndef _run_gpu(arr, cellsize, out):\r\n i, j = cuda.grid(2)\r\n di = 1\r\n dj = 1\r\n if (i - di >= 0 and i + di <= out.shape[0] - 1 and\r\n j - dj >= 0 and j + dj <= out.shape[1] - 1):\r\n out[i, j] = _gpu(arr[i - di:i + di + 1, j - dj:j + dj + 1], cellsize)\r\n\r\n\r\ndef _run_cupy(data: cupy.ndarray,\r\n cellsize: Union[int, float]) -> cupy.ndarray:\r\n\r\n cellsize_arr = cupy.array([float(cellsize)], dtype='f4')\r\n\r\n # TODO: add padding\r\n griddim, blockdim = cuda_args(data.shape)\r\n out = cupy.empty(data.shape, dtype='f4')\r\n out[:] = cupy.nan\r\n\r\n _run_gpu[griddim, blockdim](data, cellsize_arr, out)\r\n\r\n return out\r\n\r\n\r\ndef _run_dask_cupy(data: da.Array,\r\n cellsize: Union[int, float]) -> da.Array:\r\n msg = 'Upstream bug in dask prevents cupy backed arrays'\r\n raise NotImplementedError(msg)\r\n\r\n\r\ndef curvature(agg: xr.DataArray,\r\n name: Optional[str] = 'curvature') -> xr.DataArray:\r\n \"\"\"\r\n Calculates, for all cells in the array, the curvature\r\n (second derivative) of each cell based on the elevation\r\n of its neighbors in a 3x3 grid. A positive curvature\r\n indicates the surface is upwardly convex. A negative\r\n value indicates it is upwardly concave. A value of 0\r\n indicates a flat surface.\r\n\r\n Units of the curvature output raster are one hundredth (1/100) of a z-unit.\r\n\r\n Parameters:\r\n ----------\r\n agg: xarray.DataArray\r\n 2D array of elevation values\r\n NumPy, CuPy, NumPy-backed Dask, or Cupy-backed Dask array.\r\n Must contain \"res\" attribute.\r\n name: str (default = \"curvature\")\r\n Name of output DataArray.\r\n\r\n Returns:\r\n ----------\r\n curvature: xarray.DataArray\r\n 2D array, of the same type as the input, of calculated curvature values\r\n All other input attributes are preserved.\r\n\r\n Notes:\r\n ----------\r\n Algorithm References:\r\n - esri, How Curvature works, https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-analyst/how-curvature-works.htm, Accessed Apr. 21, 2021. # noqa\r\n\r\n Examples:\r\n ----------\r\n Imports\r\n >>> import numpy as np\r\n >>> import xarray as xr\r\n >>> from xrspatial import curvature\r\n\r\n Create Initial DataArray\r\n >>> agg = xr.DataArray(np.array([[0, 1, 0, 0],\r\n >>> [1, 1, 0, 0],\r\n >>> [0, 1, 2, 2],\r\n >>> [1, 0, 2, 0],\r\n >>> [0, 2, 2, 2]]),\r\n >>> dims = [\"lat\", \"lon\"],\r\n >>> attrs = dict(res = 1))\r\n >>> height, width = agg.shape\r\n >>> _lon = np.linspace(0, width - 1, width)\r\n >>> _lat = np.linspace(0, height - 1, height)\r\n >>> agg[\"lon\"] = _lon\r\n >>> agg[\"lat\"] = _lat\r\n >>> print(agg)\r\n <xarray.DataArray (lat: 5, lon: 4)>\r\n array([[0, 1, 0, 0],\r\n [1, 1, 0, 0],\r\n [0, 1, 2, 2],\r\n [1, 0, 2, 0],\r\n [0, 2, 2, 2]])\r\n Coordinates:\r\n * lon (lon) float64 0.0 1.0 2.0 3.0\r\n * lat (lat) float64 0.0 1.0 2.0 3.0 4.0\r\n Attributes:\r\n res: 1\r\n\r\n Create Curvature DataArray\r\n >>> print(curvature(agg))\r\n <xarray.DataArray 'curvature' (lat: 5, lon: 4)>\r\n array([[ nan, nan, nan, nan],\r\n [ nan, 100., -300., nan],\r\n [ nan, 100., 300., nan],\r\n [ nan, -600., 400., nan],\r\n [ nan, nan, nan, nan]])\r\n Coordinates:\r\n * lon (lon) float64 0.0 1.0 2.0 3.0\r\n * lat (lat) float64 0.0 1.0 2.0 3.0 4.0\r\n Attributes:\r\n res: 1\r\n \"\"\"\r\n\r\n cellsize_x, cellsize_y = get_dataarray_resolution(agg)\r\n cellsize = (cellsize_x + cellsize_y) / 2\r\n\r\n # numpy case\r\n if isinstance(agg.data, np.ndarray):\r\n out = _run_numpy(agg.data, cellsize)\r\n\r\n # cupy case\r\n elif has_cuda() and isinstance(agg.data, cupy.ndarray):\r\n out = _run_cupy(agg.data, cellsize)\r\n\r\n # dask + cupy case\r\n elif has_cuda() and isinstance(agg.data, da.Array) and is_cupy_backed(agg):\r\n out = _run_dask_cupy(agg.data, cellsize)\r\n\r\n # dask + numpy case\r\n elif isinstance(agg.data, da.Array):\r\n out = _run_dask_numpy(agg.data, cellsize)\r\n\r\n else:\r\n raise TypeError('Unsupported Array Type: {}'.format(type(agg.data)))\r\n\r\n return xr.DataArray(out,\r\n name=name,\r\n coords=agg.coords,\r\n dims=agg.dims,\r\n attrs=agg.attrs)\r\n"
] |
[
[
"numpy.array",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chrisfsj2051/ssl_detection
|
[
"00d52272f61b56eade8d5ace18213cba6c74f6d8"
] |
[
"third_party/FasterRCNN/FasterRCNN/eval.py"
] |
[
"# -*- coding: utf-8 -*-\n# File: eval.py\n\nimport itertools\nimport json\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import ExitStack\nimport cv2\nimport pycocotools.mask as cocomask\nimport tqdm\nfrom scipy import interpolate\n\nfrom tensorpack.callbacks import Callback\nfrom tensorpack.tfutils.common import get_tf_version_tuple\nfrom tensorpack.utils import logger, get_tqdm\n\nfrom .common import CustomResize, clip_boxes\nfrom .config import config as cfg\nfrom .data import get_eval_dataflow\nfrom .dataset import DatasetRegistry\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\nDetectionResult = namedtuple('DetectionResult',\n ['box', 'score', 'class_id', 'mask'])\n\"\"\"box: 4 float score: float class_id: int, 1~NUM_CLASS mask: None, or a binary image of the original image shape\"\"\"\n\nPseudoInferenceResult = namedtuple('PseudoInferenceResult', [\n 'proposal_box', 'proposal_score', 'frcnn_score', 'box', 'score', 'class_id',\n 'mask'\n])\n\"\"\"box: 4 float score: float class_id: int, 1~NUM_CLASS mask: None, or a binary image of the original image shape\"\"\"\n\n\ndef _scale_box(box, scale):\n w_half = (box[2] - box[0]) * 0.5\n h_half = (box[3] - box[1]) * 0.5\n x_c = (box[2] + box[0]) * 0.5\n y_c = (box[3] + box[1]) * 0.5\n\n w_half *= scale\n h_half *= scale\n\n scaled_box = np.zeros_like(box)\n scaled_box[0] = x_c - w_half\n scaled_box[2] = x_c + w_half\n scaled_box[1] = y_c - h_half\n scaled_box[3] = y_c + h_half\n return scaled_box\n\n\ndef _paste_mask(box, mask, shape):\n \"\"\"\n Args:\n box: 4 float\n mask: MxM floats\n shape: h,w\n\n Returns:\n A uint8 binary image of hxw.\n \"\"\"\n assert mask.shape[0] == mask.shape[1], mask.shape\n\n if cfg.MRCNN.ACCURATE_PASTE:\n # This method is accurate but much slower.\n mask = np.pad(mask, [(1, 1), (1, 1)], mode='constant')\n box = _scale_box(box, float(mask.shape[0]) / (mask.shape[0] - 2))\n\n mask_pixels = np.arange(0.0, mask.shape[0]) + 0.5\n mask_continuous = interpolate.interp2d(\n mask_pixels, mask_pixels, mask, fill_value=0.0)\n h, w = shape\n ys = np.arange(0.0, h) + 0.5\n xs = np.arange(0.0, w) + 0.5\n ys = (ys - box[1]) / (box[3] - box[1]) * mask.shape[0]\n xs = (xs - box[0]) / (box[2] - box[0]) * mask.shape[1]\n # Waste a lot of compute since most indices are out-of-border\n res = mask_continuous(xs, ys)\n return (res >= 0.5).astype('uint8')\n else:\n # This method (inspired by Detectron) is less accurate but fast.\n\n # int() is floor\n # box fpcoor=0.0 -> intcoor=0.0\n x0, y0 = list(map(int, box[:2] + 0.5))\n # box fpcoor=h -> intcoor=h-1, inclusive\n x1, y1 = list(map(int, box[2:] - 0.5)) # inclusive\n x1 = max(x0, x1) # require at least 1x1\n y1 = max(y0, y1)\n\n w = x1 + 1 - x0\n h = y1 + 1 - y0\n\n # rounding errors could happen here, because masks were not originally computed for this shape.\n # but it's hard to do better, because the network does not know the \"original\" scale\n mask = (cv2.resize(mask, (w, h)) > 0.5).astype('uint8')\n ret = np.zeros(shape, dtype='uint8')\n ret[y0:y1 + 1, x0:x1 + 1] = mask\n return ret\n\n\ndef predict_image(img, model_func):\n \"\"\"\n Run detection on one image, using the TF callable.\n This function should handle the preprocessing internally.\n\n Args:\n img: an image\n model_func: a callable from the TF model. It takes image and returns\n (boxes, probs, labels, [masks])\n\n Returns:\n [DetectionResult]\n \"\"\"\n orig_shape = img.shape[:2]\n if not cfg.EVAL.PSEUDO_INFERENCE:\n # added upon original package by STAC\n resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE,\n cfg.PREPROC.MAX_SIZE)\n resized_img = resizer.augment(img)\n else:\n resized_img = img\n scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] *\n resized_img.shape[1] / img.shape[1])\n\n res = model_func(resized_img)\n if len(res) <= 4:\n boxes, probs, labels, *masks = res\n else:\n # zizhaoz: support all results returns\n proposals_boxes, proposals_scores, fastrcnn_all_scores, boxes, probs, labels, *masks = res\n\n # Some slow numpy postprocessing:\n boxes = boxes / scale\n # boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more.\n boxes = clip_boxes(boxes, orig_shape)\n if masks:\n full_masks = [\n _paste_mask(box, mask, orig_shape)\n for box, mask in zip(boxes, masks[0])\n ]\n masks = full_masks\n else:\n # fill with none\n masks = [None] * len(boxes)\n if len(res) <= 4:\n results = [\n DetectionResult(*args)\n for args in zip(boxes, probs, labels.tolist(), masks)\n ]\n else:\n results = [\n PseudoInferenceResult(proposals_boxes, proposals_scores,\n fastrcnn_all_scores, boxes, probs, labels, masks)\n ]\n return results\n\n\ndef predict_dataflow(df, model_func, tqdm_bar=None):\n \"\"\"\n Args:\n df: a DataFlow which produces (image, image_id)\n model_func: a callable from the TF model. It takes image and returns\n (boxes, probs, labels, [masks])\n tqdm_bar: a tqdm object to be shared among multiple evaluation\n instances. If None, will create a new one.\n\n Returns:\n list of dict, in the format used by\n `DatasetSplit.eval_inference_results`\n \"\"\"\n df.reset_state()\n all_results = []\n with ExitStack() as stack:\n # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323\n if tqdm_bar is None:\n tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))\n for img, img_id in df:\n results = predict_image(img, model_func)\n for r in results:\n # int()/float() to make it json-serializable\n\n # adding new outputs\n if hasattr(r, 'proposal_box'):\n res = {\n 'image_id': img_id,\n 'category_id': r.class_id,\n 'bbox': r.box,\n 'score': r.score,\n }\n res.update({\n 'proposal_box': r.proposal_box,\n 'proposal_score': r.proposal_score,\n 'frcnn_score': r.frcnn_score\n })\n else:\n res = {\n 'image_id': img_id,\n 'category_id': int(r.class_id),\n 'bbox': [round(float(x), 4) for x in r.box],\n 'score': round(float(r.score), 4),\n }\n # also append segmentation to results\n if r.mask is not None:\n rle = cocomask.encode(np.array(r.mask[:, :, None], order='F'))[0]\n rle['counts'] = rle['counts'].decode('ascii')\n res['segmentation'] = rle\n\n all_results.append(res)\n tqdm_bar.update(1)\n return all_results\n\n\ndef multithread_predict_dataflow(dataflows, model_funcs):\n \"\"\"\n Running multiple `predict_dataflow` in multiple threads, and aggregate the\n results.\n\n Args:\n dataflows: a list of DataFlow to be used in :func:`predict_dataflow`\n model_funcs: a list of callable to be used in :func:`predict_dataflow`\n\n Returns:\n list of dict, in the format used by\n `DatasetSplit.eval_inference_results`\n \"\"\"\n num_worker = len(model_funcs)\n assert len(dataflows) == num_worker\n if num_worker == 1:\n return predict_dataflow(dataflows[0], model_funcs[0])\n kwargs = {\n 'thread_name_prefix': 'EvalWorker'\n } if sys.version_info.minor >= 6 else {}\n with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \\\n tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:\n futures = []\n for dataflow, pred in zip(dataflows, model_funcs):\n futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))\n all_results = list(itertools.chain(*[fut.result() for fut in futures]))\n return all_results\n\n\nclass EvalCallback(Callback):\n \"\"\"\n A callback that runs evaluation once a while.\n It supports multi-gpu evaluation.\n \"\"\"\n\n _chief_only = False\n\n def __init__(self,\n eval_dataset,\n in_names,\n out_names,\n output_dir,\n eval_start=False):\n self._eval_dataset = eval_dataset\n self._in_names, self._out_names = in_names, out_names\n self._output_dir = output_dir\n self.eval_start = eval_start\n\n def _setup_graph(self):\n num_gpu = cfg.TRAIN.NUM_GPUS\n if cfg.TRAINER == 'replicated':\n # TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750\n buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]\n\n # Use two predictor threads per GPU to get better throughput\n self.num_predictor = num_gpu if buggy_tf else num_gpu * 2\n self.predictors = [\n self._build_predictor(k % num_gpu) for k in range(self.num_predictor)\n ]\n self.dataflows = [\n get_eval_dataflow(\n self._eval_dataset, shard=k, num_shards=self.num_predictor)\n for k in range(self.num_predictor)\n ]\n else:\n # Only eval on the first machine,\n # Because evaluation assumes that all horovod workers share the filesystem.\n # Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs\n self._horovod_run_eval = hvd.rank() == hvd.local_rank()\n if self._horovod_run_eval:\n self.predictor = self._build_predictor(0)\n self.dataflow = get_eval_dataflow(\n self._eval_dataset,\n shard=hvd.local_rank(),\n num_shards=hvd.local_size())\n\n self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))\n\n def _build_predictor(self, idx):\n return self.trainer.get_predictor(\n self._in_names, self._out_names, device=idx)\n\n def _before_train(self):\n eval_period = cfg.TRAIN.EVAL_PERIOD\n self.epochs_to_eval = set()\n for k in itertools.count(1):\n if k * eval_period > self.trainer.max_epoch:\n break\n self.epochs_to_eval.add(k * eval_period)\n self.epochs_to_eval.add(self.trainer.max_epoch)\n logger.info(\n '[EvalCallback] Will evaluate every {} epochs'.format(eval_period))\n if self.eval_start:\n logger.info('[EvalCallback] Call eval before training')\n self._eval()\n\n def _eval(self):\n logdir = self._output_dir\n if cfg.TRAINER == 'replicated':\n all_results = multithread_predict_dataflow(self.dataflows,\n self.predictors)\n else:\n filenames = [\n os.path.join(logdir,\n 'outputs{}-part{}.json'.format(self.global_step, rank))\n for rank in range(hvd.local_size())\n ]\n\n if self._horovod_run_eval:\n local_results = predict_dataflow(self.dataflow, self.predictor)\n fname = filenames[hvd.local_rank()]\n with open(fname, 'w') as f:\n json.dump(local_results, f)\n self.barrier.eval()\n if hvd.rank() > 0:\n return\n all_results = []\n for fname in filenames:\n with open(fname, 'r') as f:\n obj = json.load(f)\n all_results.extend(obj)\n os.unlink(fname)\n\n scores = DatasetRegistry.get(\n self._eval_dataset).eval_inference_results(all_results)\n for k, v in scores.items():\n self.trainer.monitors.put_scalar(self._eval_dataset + '-' + k, v)\n\n def _trigger_epoch(self):\n if self.epoch_num in self.epochs_to_eval:\n logger.info('Running evaluation ...')\n self._eval()\n"
] |
[
[
"numpy.sqrt",
"numpy.pad",
"numpy.arange",
"numpy.zeros_like",
"scipy.interpolate.interp2d",
"numpy.array",
"numpy.zeros",
"tensorflow.random_normal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
bupt-nlp/name-entity-recongnition-tensorflow2
|
[
"98f7939a04ef9a45f4c53afe1c7b7721b7def85c"
] |
[
"bert_ner/bert.py"
] |
[
"from __future__ import annotations\nimport os\nimport json\nimport tensorflow as tf\nfrom nltk import word_tokenize\n\nfrom bert_ner.tokenization import FullTokenizer\nfrom bert_ner.model import BertNer\n\n\nclass NER:\n def __init__(self, model_dir: str):\n self.model, self.tokenizer, self.model_config = self.load_model(model_dir)\n self.label_map = self.model_config['label_map']\n self.max_seq_length = self.model_config['max_seq_length']\n self.label_map = {int(key): value for key, value in self.label_map.items()}\n\n @staticmethod\n def load_model(model_dir: str, model_config_file: str = 'model_config.json'):\n \"\"\"load the model\"\"\"\n model_config_file: str = os.path.join(model_dir, model_config_file)\n model_config = json.load(open(model_config_file, 'r'))\n bert_config = json.load(open(os.path.join(model_dir, 'bert_config.json'), 'r'))\n model = BertNer(\n bert_model=bert_config, float_type=tf.float32, num_labels=model_config['num_labels'],\n max_seq_length=model_config['max_seq_length']\n )\n ids = tf.ones(shape=(1, 128))\n model(ids, ids, ids, ids, training=False)\n model.load_weights(os.path.join(model_dir, 'model.h5'))\n vocab = os.path.join(model_dir, 'vocab.txt')\n tokenizer = FullTokenizer(vocab_file=vocab, do_lower_case=True)\n return model, tokenizer, model_config\n\n def tokenize(self, text: str):\n \"\"\"tokenize the text with full tokenizer\"\"\"\n words = word_tokenize(text)\n tokens, valid_positions = [], []\n for index, word in enumerate(words):\n token = self.tokenizer.tokenize(word)\n tokens.extend(token)\n for i in range(len(tokens)):\n if i == 0:\n valid_positions.append(1)\n else:\n valid_positions.append(0)\n return tokens, valid_positions\n\n def preprocess(self, text: str):\n \"\"\"preprocess the text\"\"\"\n\n # 1. tokenize the text\n tokens, valid_positions = self.tokenize(text)\n\n # 2. insert CLS\n tokens.insert(0, \"[CLS]\")\n valid_positions.insert(0, 1)\n\n # 3. insert SEP\n tokens.append('[SEP]')\n valid_positions.append(1)\n\n # 4. build segment id\n segment_id = [0] * len(tokens)\n\n # 5. generate input_ids\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n # 6. input_mask\n input_mask = [1] * len(input_ids)\n\n while len(input_ids) < self.max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_id.append(0)\n valid_positions.append(0)\n\n return input_ids, input_mask, segment_id, valid_positions\n\n def predict(self, text: str):\n \"\"\"predict by text\"\"\"\n input_ids, input_mask, segment_id, valid_positions = self.preprocess(text)\n input_ids = tf.Variable([input_ids], dtype=tf.int32)\n input_mask = tf.Variable([input_mask], dtype=tf.int32)\n segment_id = tf.Variable([segment_id], dtype=tf.int32)\n valid_positions = tf.Variable([valid_positions], dtype=tf.int32)\n logits: tf.Tensor = self.model([input_ids, segment_id, input_mask, valid_positions])\n logits_labels: tf.Tensor = tf.argmax(logits, axis=-1)\n logits_labels = logits_labels.numpy().tolist()[0]\n # 此过程肯定会有更加高效的写法\n # 后续可针对此处进行优化\n"
] |
[
[
"tensorflow.argmax",
"tensorflow.ones",
"tensorflow.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
jmrf/active-qa
|
[
"c30af7be435ccbbec5a7c6b35659256473503e5d"
] |
[
"third_party/bi_att_flow/my/tensorflow/nn.py"
] |
[
"\n\n\n\nimport tensorflow as tf\n\nfrom third_party.bi_att_flow.my.tensorflow.general import add_wd\nfrom third_party.bi_att_flow.my.tensorflow.general import exp_mask\nfrom third_party.bi_att_flow.my.tensorflow.general import flatten\nfrom third_party.bi_att_flow.my.tensorflow.general import reconstruct\n\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.util import nest\n\n\n# Copied & pasted from 0.11 implementation of RNN cells:\n# https://github.com/tensorflow/tensorflow/blob/r0.11/tensorflow/python/ops/rnn_cell.py\ndef _linear(args, output_size, bias, bias_start=0.0, scope=None):\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\n Args:\n args: a 2D Tensor or a list of 2D, batch x n, Tensors.\n output_size: int, second dimension of W[i].\n bias: boolean, whether to add a bias term or not.\n bias_start: starting value to initialize the bias; 0 by default.\n scope: VariableScope for the created subgraph; defaults to \"Linear\".\n Returns:\n A 2D Tensor with shape [batch x output_size] equal to\n sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n if args is None or (nest.is_sequence(args) and not args):\n raise ValueError(\"`args` must be specified\")\n if not nest.is_sequence(args):\n args = [args]\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size = 0\n shapes = [a.get_shape().as_list() for a in args]\n for shape in shapes:\n if len(shape) != 2:\n raise ValueError(\"Linear is expecting 2D arguments: %s\" % str(shapes))\n if not shape[1]:\n raise ValueError(\"Linear expects shape[1] of arguments: %s\" % str(shapes))\n else:\n total_arg_size += shape[1]\n\n dtype = [a.dtype for a in args][0]\n\n # Now the computation.\n with vs.variable_scope(scope or \"Linear\"):\n matrix = vs.get_variable(\n \"Matrix\", [total_arg_size, output_size], dtype=dtype)\n if len(args) == 1:\n res = math_ops.matmul(args[0], matrix)\n else:\n res = math_ops.matmul(array_ops.concat(args, 1), matrix)\n if not bias:\n return res\n bias_term = vs.get_variable(\n \"Bias\", [output_size],\n dtype=dtype,\n initializer=init_ops.constant_initializer(\n bias_start, dtype=dtype))\n return res + bias_term\n\n\ndef linear(args,\n output_size,\n bias,\n bias_start=0.0,\n scope=None,\n squeeze=False,\n wd=0.0,\n input_keep_prob=1.0,\n is_train=None):\n if args is None or (nest.is_sequence(args) and not args):\n raise ValueError(\"`args` must be specified\")\n if not nest.is_sequence(args):\n args = [args]\n\n flat_args = [flatten(arg, 1) for arg in args]\n if input_keep_prob < 1.0:\n assert is_train is not None\n flat_args = [\n tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob),\n lambda: arg) for arg in flat_args\n ]\n\n flat_out = _linear(\n flat_args, output_size, bias, bias_start=bias_start, scope=scope)\n out = reconstruct(flat_out, args[0], 1)\n if squeeze:\n out = tf.squeeze(out, [len(args[0].get_shape().as_list()) - 1])\n if wd:\n add_wd(wd)\n\n return out\n\n\ndef dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):\n with tf.name_scope(name or \"dropout\"):\n if keep_prob < 1.0:\n d = tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)\n out = tf.cond(is_train, lambda: d, lambda: x)\n return out\n return x\n\n\ndef softmax(logits, mask=None, scope=None):\n with tf.name_scope(scope or \"Softmax\"):\n if mask is not None:\n logits = exp_mask(logits, mask)\n flat_logits = flatten(logits, 1)\n flat_out = tf.nn.softmax(flat_logits)\n out = reconstruct(flat_out, logits, 1)\n\n return out\n\n\ndef softsel(target, logits, mask=None, scope=None):\n \"\"\"\n\n :param target: [ ..., J, d] dtype=float\n :param logits: [ ..., J], dtype=float\n :param mask: [ ..., J], dtype=bool\n :param scope:\n :return: [..., d], dtype=float\n \"\"\"\n with tf.name_scope(scope or \"Softsel\"):\n a = softmax(logits, mask=mask)\n target_rank = len(target.get_shape().as_list())\n out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)\n return out\n\n\ndef double_linear_logits(args,\n size,\n bias,\n bias_start=0.0,\n scope=None,\n mask=None,\n wd=0.0,\n input_keep_prob=1.0,\n is_train=None):\n with tf.variable_scope(scope or \"Double_Linear_Logits\"):\n first = tf.tanh(\n linear(\n args,\n size,\n bias,\n bias_start=bias_start,\n scope=\"first\",\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train))\n second = linear(\n first,\n 1,\n bias,\n bias_start=bias_start,\n squeeze=True,\n scope=\"second\",\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n if mask is not None:\n second = exp_mask(second, mask)\n return second\n\n\ndef linear_logits(args,\n bias,\n bias_start=0.0,\n scope=None,\n mask=None,\n wd=0.0,\n input_keep_prob=1.0,\n is_train=None):\n with tf.variable_scope(scope or \"Linear_Logits\"):\n logits = linear(\n args,\n 1,\n bias,\n bias_start=bias_start,\n squeeze=True,\n scope=\"first\",\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n if mask is not None:\n logits = exp_mask(logits, mask)\n return logits\n\n\ndef sum_logits(args, mask=None, name=None):\n with tf.name_scope(name or \"sum_logits\"):\n if args is None or (nest.is_sequence(args) and not args):\n raise ValueError(\"`args` must be specified\")\n if not nest.is_sequence(args):\n args = [args]\n rank = len(args[0].get_shape())\n logits = sum(tf.reduce_sum(arg, rank - 1) for arg in args)\n if mask is not None:\n logits = exp_mask(logits, mask)\n return logits\n\n\ndef get_logits(args,\n size,\n bias,\n bias_start=0.0,\n scope=None,\n mask=None,\n wd=0.0,\n input_keep_prob=1.0,\n is_train=None,\n func=None):\n if func is None:\n func = \"sum\"\n if func == \"sum\":\n return sum_logits(args, mask=mask, name=scope)\n elif func == \"linear\":\n return linear_logits(\n args,\n bias,\n bias_start=bias_start,\n scope=scope,\n mask=mask,\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n elif func == \"double\":\n return double_linear_logits(\n args,\n size,\n bias,\n bias_start=bias_start,\n scope=scope,\n mask=mask,\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n elif func == \"dot\":\n assert len(args) == 2\n arg = args[0] * args[1]\n return sum_logits([arg], mask=mask, name=scope)\n elif func == \"mul_linear\":\n assert len(args) == 2\n arg = args[0] * args[1]\n return linear_logits(\n [arg],\n bias,\n bias_start=bias_start,\n scope=scope,\n mask=mask,\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n elif func == \"proj\":\n assert len(args) == 2\n d = args[1].get_shape()[-1]\n proj = linear(\n [args[0]],\n d,\n False,\n bias_start=bias_start,\n scope=scope,\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n return sum_logits([proj * args[1]], mask=mask)\n elif func == \"tri_linear\":\n assert len(args) == 2\n new_arg = args[0] * args[1]\n return linear_logits(\n [args[0], args[1], new_arg],\n bias,\n bias_start=bias_start,\n scope=scope,\n mask=mask,\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n else:\n raise Exception()\n\n\ndef highway_layer(arg,\n bias,\n bias_start=0.0,\n scope=None,\n wd=0.0,\n input_keep_prob=1.0,\n is_train=None):\n with tf.variable_scope(scope or \"highway_layer\"):\n d = arg.get_shape()[-1]\n trans = linear(\n [arg],\n d,\n bias,\n bias_start=bias_start,\n scope=\"trans\",\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n trans = tf.nn.relu(trans)\n gate = linear(\n [arg],\n d,\n bias,\n bias_start=bias_start,\n scope=\"gate\",\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n gate = tf.nn.sigmoid(gate)\n out = gate * trans + (1 - gate) * arg\n return out\n\n\ndef highway_network(arg,\n num_layers,\n bias,\n bias_start=0.0,\n scope=None,\n wd=0.0,\n input_keep_prob=1.0,\n is_train=None):\n with tf.variable_scope(scope or \"highway_network\"):\n prev = arg\n cur = None\n for layer_idx in range(num_layers):\n cur = highway_layer(\n prev,\n bias,\n bias_start=bias_start,\n scope=\"layer_{}\".format(layer_idx),\n wd=wd,\n input_keep_prob=input_keep_prob,\n is_train=is_train)\n prev = cur\n return cur\n\n\ndef conv1d(in_,\n filter_size,\n height,\n padding,\n is_train=None,\n keep_prob=1.0,\n scope=None):\n with tf.variable_scope(scope or \"conv1d\"):\n num_channels = in_.get_shape()[-1]\n filter_ = tf.get_variable(\n \"filter\", shape=[1, height, num_channels, filter_size], dtype=\"float\")\n bias = tf.get_variable(\"bias\", shape=[filter_size], dtype=\"float\")\n strides = [1, 1, 1, 1]\n if is_train is not None and keep_prob < 1.0:\n in_ = dropout(in_, keep_prob, is_train)\n xxc = tf.nn.conv2d(in_, filter_, strides,\n padding) + bias # [N*M, JX, W/filter_stride, d]\n out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]\n return out\n\n\ndef multi_conv1d(in_,\n filter_sizes,\n heights,\n padding,\n is_train=None,\n keep_prob=1.0,\n scope=None):\n with tf.variable_scope(scope or \"multi_conv1d\"):\n assert len(filter_sizes) == len(heights)\n outs = []\n for filter_size, height in zip(filter_sizes, heights):\n if filter_size == 0:\n continue\n out = conv1d(\n in_,\n filter_size,\n height,\n padding,\n is_train=is_train,\n keep_prob=keep_prob,\n scope=\"conv1d_{}\".format(height))\n outs.append(out)\n concat_out = tf.concat(outs, 2)\n return concat_out\n"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.nn.sigmoid",
"tensorflow.concat",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.cond",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.reduce_sum",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.expand_dims",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.nn.conv2d",
"tensorflow.nn.dropout"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
teald/vplanet
|
[
"ab640af7d2bbf80c5ea647d5bf971e2ce2c40631"
] |
[
"examples/TidalEarth/makeplot.py"
] |
[
"import vplanet\nimport vplot\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport pathlib\nimport sys\n\n# Path hacks\npath = pathlib.Path(__file__).parents[0].absolute()\nsys.path.insert(1, str(path.parents[0]))\nfrom get_args import get_args\n\n# Tweaks\nplt.rcParams.update({\"font.size\": 16, \"legend.fontsize\": 16})\n\n#\ns_yr = 3600.0 * 24 * 365\nfilepref = \"TidalEarth\"\n\n# Run vplanet\nout0 = vplanet.run(path / \"au0.01\" / \"vpl.in\", units=False)\nout1 = vplanet.run(path / \"au0.02\" / \"vpl.in\", units=False)\nout2 = vplanet.run(path / \"au0.05\" / \"vpl.in\", units=False)\n\n\ndef fig2x3(out, nfig, color=\"k\", legendon=False):\n fig = plt.figure(nfig, figsize=(10, 15))\n panel = 1\n plt.subplot(rows, cols, panel)\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.TMan,\n linestyle=\"-\",\n color=color,\n label=r\"$T_{M}$\",\n )\n plt.plot(\n out.tidalearth.Time, out.tidalearth.TCore, \"--\", color=color, label=r\"$T_{C}$\"\n )\n if legendon:\n plt.legend(loc=\"best\", ncol=2, frameon=True)\n plt.ylabel(\"Temperature (K)\")\n plt.xlabel(\"Time (Gyr)\")\n plt.ylim(0, 6e3)\n plt.xscale(\"log\")\n panel += 1\n plt.subplot(rows, cols, panel)\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.HflowUMan,\n linestyle=\"-\",\n color=color,\n label=r\"$Q_{UMan}$\",\n )\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.HflowMeltMan,\n linestyle=(0, (3, 5, 1, 5, 1, 5)),\n color=color,\n label=r\"$Q_{Melt,Man}$\",\n )\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.HflowCMB,\n linestyle=\"--\",\n color=color,\n label=r\"$Q_{CMB}$\",\n )\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.RadPowerMan,\n linestyle=(0, (1, 5)),\n color=color,\n label=r\"$Q_{Rad,Man}$\",\n )\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.PowerEqtide,\n \"-.\",\n color=color,\n label=r\"$Q_{Tide}$\",\n )\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n if legendon:\n plt.legend(loc=\"upper right\", frameon=True, ncol=2, columnspacing=1)\n plt.ylabel(\"Power (TW)\")\n plt.xlabel(\"Time (Gyr)\")\n ymax = np.max(\n [out.tidalearth.PowerEqtide[50:].max(), out.tidalearth.HflowUMan[50:].max()]\n )\n ymax = 1e10\n plt.ylim(1e-4, ymax)\n panel += 1\n plt.subplot(rows, cols, panel)\n plt.plot(\n out.tidalearth.Time,\n out.tidalearth.SemiMajorAxis,\n color=color,\n label=\"$a_0$=%.2f\" % out.tidalearth.SemiMajorAxis[0],\n )\n plt.ylabel(r\"Semi-Major Axis (AU)\")\n plt.xlabel(\"Time (Gyr)\")\n plt.ylim(0, 0.10)\n plt.xscale(\"log\")\n plt.legend(frameon=True, loc=\"upper left\")\n panel += 1\n plt.subplot(rows, cols, panel)\n plt.loglog(\n out.tidalearth.Time, out.tidalearth.Eccentricity, color=color, label=\"ecc\"\n )\n plt.ylabel(r\"Eccentricity\")\n plt.xlabel(\"Time (Gyr)\")\n plt.ylim(1e-5, 1)\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n panel += 1\n plt.subplot(rows, cols, panel)\n plt.plot(out.tidalearth.Time, out.tidalearth.MagMom, color=color, label=\"MagMom\")\n plt.plot(\n out.tidalearth.Time, out.tidalearth.RIC / 3481, color=color, linestyle=\"--\"\n )\n plt.ylim(0, 1.5)\n plt.ylabel(\"Mag. Mom., R$_{ic}$ ($\\oplus$ Units)\")\n plt.xlabel(\"Time (Gyr)\")\n plt.xscale(\"log\")\n panel += 1\n plt.subplot(rows, cols, panel)\n plt.plot(out.tidalearth.Time, out.tidalearth.MeltMassFluxMan * s_yr, color=color)\n plt.ylabel(r\"Melt Mass Flux Mantle (kg$/$yr)\")\n plt.xlabel(\"Time (Gyr)\")\n plt.ylim(1e12, 1e18)\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n\n # Save\n ext = get_args().ext\n fig.savefig(path / f\"{filepref}{nfig}.{ext}\")\n\n\n# Plots\nrows = 3\ncols = 2\n\n# Mantle Figure\nnfig = 1\ncolors = [\"red\", \"black\", \"blue\"]\nfig = fig2x3(out1, nfig, colors[1], legendon=True)\nfig = fig2x3(out0, nfig, colors[0], legendon=False)\nfig = fig2x3(out2, nfig, colors[2], legendon=False)\n\n# Temperature-dep Orbital evo\nnfig += 1\nfig = plt.figure(nfig, figsize=(10, 15))\npanel = 1\nplt.subplot(rows, cols, panel)\nplanet = out0.tidalearth\nplt.semilogy(\n planet.TUMan,\n planet.PowerEqtide,\n color=colors[0],\n label=\"$a_0$=%.2f\" % planet.SemiMajorAxis[0],\n)\nplanet = out1.tidalearth\nplt.semilogy(\n planet.TUMan,\n planet.PowerEqtide,\n color=colors[1],\n label=\"$a_0$=%.2f\" % planet.SemiMajorAxis[0],\n)\nplanet = out2.tidalearth\nplt.semilogy(\n planet.TUMan,\n planet.PowerEqtide,\n color=colors[2],\n label=\"$a_0$=%.2f\" % planet.SemiMajorAxis[0],\n)\nplt.ylabel(r\"Tidal Power [TW]\")\nplt.xlabel(\"Upper Mantle Temp. [K]\")\nplt.ylim(1e-8, 1e4)\nplt.xlim(1600, 2400)\nplt.legend()\npanel += 1\nplt.subplot(rows, cols, panel)\nplanet = out0.tidalearth\nplt.semilogy(planet.TUMan, planet.Eccentricity, color=colors[0], label=\"Ecc\")\nplanet = out1.tidalearth\nplt.semilogy(planet.TUMan, planet.Eccentricity, color=colors[1], label=\"Ecc\")\nplanet = out2.tidalearth\nplt.semilogy(planet.TUMan, planet.Eccentricity, color=colors[2], label=\"Ecc\")\nplt.ylabel(r\"Eccentricity\")\nplt.xlabel(\"Upper Mantle Temp. [K]\")\nplt.ylim(1e-8, 1e0)\nplt.xlim(1600, 2400)\n\n# Save\next = get_args().ext\nfig.savefig(path / f\"{filepref}{nfig}.{ext}\", bbox_inches=\"tight\", dpi=600)\n"
] |
[
[
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
morpheusthewhite/vae-vampprior
|
[
"6e16de09db8da4529124f8b84d650de0554fc764"
] |
[
"vampprior/layers.py"
] |
[
"import tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow.keras import layers\nimport numpy as np\n\n\nclass GatedDense(tf.keras.layers.Layer):\n def __init__(self, units, hactivation='sigmoid', **kwargs):\n super(GatedDense, self).__init__(**kwargs)\n self.hactivation = hactivation\n self.units = units\n\n def build(self, inputs_shape):\n self.g = tf.keras.layers.Dense(self.units, activation='sigmoid')\n if self.hactivation == 'sigmoid':\n self.h = tf.keras.layers.Dense(self.units, activation='sigmoid')\n else:\n self.h = tf.keras.layers.Dense(self.units)\n\n def call(self, inputs):\n hout = self.h(inputs)\n gout = self.g(inputs)\n\n if self.hactivation != 'sigmoid':\n hout = self.hactivation(hout)\n\n return tf.math.multiply(hout, gout)\n\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, D, **kwargs):\n super(Encoder, self).__init__(**kwargs)\n self.D = D\n\n def build(self, inputs_shape):\n self.flatten = layers.Flatten(input_shape=(inputs_shape[1], inputs_shape[2]),\n name='enc-flatten')\n self.dense0 = GatedDense(300, 'sigmoid', name='enc-dense1')\n self.dense1 = GatedDense(300, 'sigmoid', name='enc-dense1')\n\n self.dense_mu = layers.Dense(self.D, name='enc-out-mu')\n self.dense_logvar = layers.Dense(self.D, name='enc-out-lo', activation=Clamp(min_value=-4., max_value=2.))\n\n def call(self, inputs):\n flattened = self.flatten(inputs)\n\n x = self.dense0(flattened)\n x = self.dense1(x)\n\n mu = self.dense_mu(x)\n logvar = self.dense_logvar(x)\n return mu, logvar\n\n\nclass Sampling(tf.keras.layers.Layer):\n \"\"\"\n When called returns L samples of dimension D from the gaussians with the\n mu and logvar passed as input, using the reparametrization trick\n \"\"\"\n\n def __init__(self, D, L, single=False, **kwargs):\n super(Sampling, self).__init__(**kwargs)\n self.L = L\n self.D = D\n self.single = single\n\n # the standard distribution to be used when sampling\n # needed for the reparametrization trick\n self.normal_standard = tfp.distributions.MultivariateNormalDiag(\n tf.zeros(shape=(self.D,)),\n tf.ones(shape=(self.D,)))\n\n def call(self, inputs):\n mu, logvar = inputs\n\n # samples with the reparametrization trick\n # N(0, I) * sigma + mu\n\n latent_samples = self.normal_standard.sample((self.L, mu.shape[0])) * \\\n tf.sqrt(tf.exp(logvar)) + mu\n\n # the returned samples will have shape (N, L, D)\n # where N is the size of the batch\n if self.single:\n return tf.reshape(latent_samples, (-1, self.D))\n else:\n return tf.reshape(latent_samples, (-1, self.L, self.D))\n\n\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self, output_shape, binary=False, **kwargs):\n super(Decoder, self).__init__(**kwargs)\n self.output_shape_ = output_shape\n\n self.dense0 = GatedDense(300, 'sigmoid', name='dec-dense0')\n self.dense1 = GatedDense(300, 'sigmoid', name='dec-dense1')\n\n self.binary = binary\n if binary:\n self.p_x_mean = layers.Dense(output_shape[0] * output_shape[1], name='dec-out-mean', activation='sigmoid')\n else:\n self.p_x_mean = layers.Dense(output_shape[0] * output_shape[1], name='dec-out-mean', activation='sigmoid')\n self.p_x_logvar = layers.Dense(output_shape[0] * output_shape[1], name='dec-out-logvar',\n activation=Clamp(min_value=-4.5, max_value=0.))\n\n def build(self, inputs_shape):\n # transform the result into a square matrix\n # the result of a single input will be a (L, M, M) tensor\n # where M is the size of the original image\n self.reshape = layers.Reshape((-1, self.output_shape_[0],\n self.output_shape_[1]),\n name='dec-out-reshaped')\n\n def call(self, inputs):\n # inputs will have shape (N, L, D)\n x = self.dense0(inputs)\n x = self.dense1(x)\n\n x_mean = self.p_x_mean(x)\n if self.binary:\n x_logvar = None\n else:\n x_logvar = self.p_x_logvar(x)\n x_logvar = self.reshape(x_logvar)\n\n # (N, L, M, M)\n return self.reshape(x_mean), x_logvar\n\n\nclass MeanReducer(tf.keras.layers.Layer):\n \"\"\"\n Reduce with mean along the L axis. Meant to be used on the result of the\n decoder to aggregate the decoded L samples\n \"\"\"\n\n def __init__(self, **kwargs):\n super(MeanReducer, self).__init__(**kwargs)\n\n def call(self, inputs):\n # inputs has shape (N, L, M, M)\n # output will have shape (N, M, M)\n return tf.reduce_mean(inputs, axis=1)\n\n\nclass HierarchicalEncoder(layers.Layer): # MLP block #1 # layer insieme di altri layer\n \"\"\"Maps MNIST digits to a triplet (z_mean, z_log_var, z) for z1 and z2.\"\"\"\n\n # DEFINE LAYER OUTPUT DIMENSIONALITY: \"attributes\"\n # NOTE: @param units: Positive integer, dimensionality of the output space.\n def __init__(self, D, name=\"encoder\", **kwargs):\n super(HierarchicalEncoder, self).__init__(name=name, **kwargs)\n self.flatten = layers.Flatten(name='enc-flatten')\n # layers for z2,\n self.dense_1 = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_1\")\n self.dense_2 = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_2\")\n self.dense_z2_mean = layers.Dense(D, name=\"dense_z2_mean\")\n self.dense_z2_logvar = layers.Dense(D, name=\"dense_z2_logvar\",\n activation=Clamp(-6., +2.))\n\n # layers for z1,\n self.dense_z1_z2 = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_z1_z2\")\n self.dense_z1_x = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_z1_x\")\n self.dense_joint = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_joint\")\n self.dense_z1_mean = layers.Dense(D, name=\"dense_z1_mean\")\n self.dense_z1_logvar = layers.Dense(D,\n name=\"dense_z1_logvar\",\n activation=Clamp(-6., +2.)) # todo: chnge activation HARD tan #### CONSTRAINT CLASS\n # sampling\n self.sampling = Sampling(D, 1, single=True) # don't consider L\n\n self.D = D\n\n # CONNECT LAYERS\n def call(self, inputs):\n # q(z2|x)\n flat_inputs = self.flatten(inputs)\n res = self.dense_1(flat_inputs)\n res = self.dense_2(res)\n z2_mean = self.dense_z2_mean(res)\n z2_logvar = self.dense_z2_logvar(res)\n\n z2 = self.sampling((z2_mean, z2_logvar)) # (N, L, D)\n\n # q(z1|x,z2)\n res = self.dense_z1_z2(z2) # (N, L, 300)\n res2 = self.dense_z1_x(flat_inputs) # (N, 1, 300)\n # var = Lambda(concat_test, name='concat_test')([var_1, var_2])\n concat_input = layers.Concatenate()([res, res2])\n res = self.dense_joint(concat_input) # concat_input_dim = 600, a_dim = 300\n z1_mean = self.dense_z1_mean(res)\n z1_logvar = self.dense_z1_logvar(res)\n z1 = self.sampling((z1_mean, z1_logvar))\n\n return z1_mean, z1_logvar, z1, z2_mean, z2_logvar, z2\n\n\nclass HierarchicalDecoder(layers.Layer): # MLP block #2 # layer insieme di altri layer\n \"\"\"Converts z1,z2, the encoded digit vectors, back into a readable digit x.\"\"\"\n\n def __init__(self, output_shape, D, binary, name=\"decoder\", **kwargs):\n super(HierarchicalDecoder, self).__init__(name=name, **kwargs)\n # decoder: p(z1 | z2)\n self.dense_1 = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_1\")\n self.dense_z1new_z2 = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_z1new_z2\")\n self.dense_z1new_mean = layers.Dense(D, name=\"dense_z1new_mean\")\n self.dense_z1new_logvar = layers.Dense(D, name=\"dense_z1new_logvar\",\n activation=Clamp(-6., 2.))\n # sampling\n self.sampling = Sampling(D, 1, single=True)\n\n # decoder: p(x | z1, z2)\n self.dense_x_z1new = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_x_z1new\")\n self.dense_x_z2 = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_x_z2\")\n self.dense_joint = GatedDense(300, hactivation=\"sigmoid\", name=\"dense_x_joint\")\n self.dense_x_mean = layers.Dense(np.prod(output_shape),\n activation='sigmoid',\n name=\"dense_x_mean\")\n self.dense_x_logvar = layers.Dense(np.prod(output_shape),\n name=\"dense_x_logvar\",\n activation=Clamp(-6., 2.))\n self.output_shape_ = output_shape\n self.binary = binary\n\n self.mean_reducer = MeanReducer()\n\n def build(self, inputs_shape):\n # transform the result into a square matrix\n # the result of a single input will be a (M, M) tensor\n # where M is the size of the original image\n # input shape (N, D)\n self.reshape = layers.Reshape((self.output_shape_[0],\n self.output_shape_[1]),\n name='dec-out-reshaped')\n\n def call(self, inputs):\n z1_q, z2_q = inputs\n # decoder: p(z1 | z2)\n res = self.dense_1(z2_q)\n res = self.dense_z1new_z2(res)\n z1_p_mean = self.dense_z1new_mean(res)\n z1_p_logvar = self.dense_z1new_logvar(res)\n # there is no sampling for the new z1_p\n\n # decoder: p(x | z1, z2)\n res = self.dense_x_z1new(z1_q)\n res2 = self.dense_x_z2(z2_q)\n\n # joint\n # concat_input = Lambda(concat_test, name='concat_test')([var_1, var_2])\n concat_input = layers.Concatenate()([res, res2])\n joint = self.dense_joint(concat_input)\n\n # p_x_mean X (no sampling)\n x_mean = self.dense_x_mean(joint)\n x_logvar = self.dense_x_logvar(joint)\n\n x_mean_reshaped = self.reshape(x_mean)\n\n if not self.binary:\n x_logvar_reshaped = self.reshape(x_logvar)\n else:\n # not important\n x_logvar_reshaped = x_logvar\n\n return x_mean_reshaped, x_logvar_reshaped, z1_p_mean, z1_p_logvar\n\n def p_z1(self, z2):\n # decoder: p(z1 | z2)\n res = self.dense_1(z2)\n res = self.dense_z1new_z2(res)\n z1_p_mean = self.dense_z1new_mean(res)\n z1_p_logvar = self.dense_z1new_logvar(res)\n return z1_p_mean, z1_p_logvar\n\n # called ONLY when generating phase ??\n def p_x(self, z1, z2):\n # decoder: p(x | z1, z2)\n res = self.dense_x_z1new(z1)\n res = self.mean_reducer(res) ### added to correct shape (100, 1, ?) to (100, ?)\n res2 = self.dense_x_z2(z2)\n # joint\n # concat_input = Lambda(concat_test, name='concat_test')([var_1, var_2])\n concat_input = layers.Concatenate()([res, res2])\n joint = self.dense_joint(concat_input)\n\n # p_x_mean X (no sampling)\n x_mean = self.dense_x_mean(joint)\n x_logvar = self.dense_x_logvar(joint)\n\n x_mean = self.reshape(x_mean)\n x_logvar = self.reshape(x_logvar)\n\n return x_mean, x_logvar\n\n\nclass MinMaxConstraint(tf.keras.constraints.Constraint):\n def __init__(self, min_value, max_value):\n self.min = min_value\n self.max = max_value\n\n def __call__(self, w):\n return tf.clip_by_value(w, self.min, self.max, name=\"min_value-max-constr\")\n\n\nclass Clamp:\n def __init__(self, min_value=0., max_value=1.):\n self.min_value = min_value\n self.max_value = max_value\n\n def __call__(self, x):\n return tf.clip_by_value(x, self.min_value, self.max_value, name='hardtanh')\n"
] |
[
[
"tensorflow.clip_by_value",
"tensorflow.keras.layers.Concatenate",
"tensorflow.reduce_mean",
"tensorflow.zeros",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.exp",
"tensorflow.math.multiply",
"numpy.prod",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Flatten"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
liuhd073/mmgeneration
|
[
"2e09a6b63c5f0ddee850d429c5b739ae1e0cc76d",
"2e09a6b63c5f0ddee850d429c5b739ae1e0cc76d"
] |
[
"mmgen/ops/stylegan3/custom_ops.py",
"mmgen/models/diffusions/utils.py"
] |
[
"# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport glob\nimport hashlib\nimport importlib\nimport os\nimport re\nimport shutil\nimport uuid\n\nimport torch\nimport torch.utils.cpp_extension\n\n# Global options.\n\nverbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'\n\n# Internal helper funcs.\n\n\ndef _find_compiler_bindir():\n patterns = [\n 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', # noqa\n 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', # noqa\n 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', # noqa\n 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',\n ]\n for pattern in patterns:\n matches = sorted(glob.glob(pattern))\n if len(matches):\n return matches[-1]\n return None\n\n\ndef _get_mangled_gpu_name():\n name = torch.cuda.get_device_name().lower()\n out = []\n for c in name:\n if re.match('[a-z0-9_-]+', c):\n out.append(c)\n else:\n out.append('-')\n return ''.join(out)\n\n\n# Main entry point for compiling and loading C++/CUDA plugins.\n\n_cached_plugins = dict()\n\n\ndef get_plugin(module_name,\n sources,\n headers=None,\n source_dir=None,\n **build_kwargs):\n assert verbosity in ['none', 'brief', 'full']\n if headers is None:\n headers = []\n if source_dir is not None:\n sources = [os.path.join(source_dir, fname) for fname in sources]\n headers = [os.path.join(source_dir, fname) for fname in headers]\n\n # Already cached?\n if module_name in _cached_plugins:\n return _cached_plugins[module_name]\n\n # Print status.\n if verbosity == 'full':\n print(f'Setting up PyTorch plugin \"{module_name}\"...')\n elif verbosity == 'brief':\n print(\n f'Setting up PyTorch plugin \"{module_name}\"... ',\n end='',\n flush=True)\n verbose_build = (verbosity == 'full')\n\n # Compile and load.\n try: # pylint: disable=too-many-nested-blocks\n # Make sure we can find the necessary compiler binaries.\n if os.name == 'nt' and os.system('where cl.exe >nul 2>nul') != 0:\n compiler_bindir = _find_compiler_bindir()\n if compiler_bindir is None:\n raise RuntimeError(\n 'Could not find MSVC/GCC/CLANG installation on this '\n f'computer. Check _find_compiler_bindir() in \"{__file__}\".'\n )\n os.environ['PATH'] += ';' + compiler_bindir\n\n # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either\n # break the build or unnecessarily restrict what's available to nvcc.\n # Unset it to let nvcc decide based on what's available on the\n # machine.\n os.environ['TORCH_CUDA_ARCH_LIST'] = ''\n\n # Incremental build md5sum trickery. Copies all the input source files\n # into a cached build directory under a combined md5 digest of the\n # input source files. Copying is done only if the combined digest has\n # changed.\n # This keeps input file timestamps and filenames the same as in\n # previous extension builds, allowing for fast incremental rebuilds.\n #\n # This optimization is done only in case all the source files reside in\n # a single directory (just for simplicity) and if the\n # TORCH_EXTENSIONS_DIR environment variable is set (we take this as a\n # signal that the user\n # actually cares about this.)\n #\n # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to\n # work around the *.cu dependency bug in ninja config.\n\n all_source_files = sorted(sources + headers)\n all_source_dirs = set(\n os.path.dirname(fname) for fname in all_source_files)\n if len(all_source_dirs\n ) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):\n\n # Compute combined hash digest for all source files.\n hash_md5 = hashlib.md5()\n for src in all_source_files:\n with open(src, 'rb') as f:\n hash_md5.update(f.read())\n\n # Select cached build directory name.\n source_digest = hash_md5.hexdigest()\n build_top_dir = torch.utils.cpp_extension._get_build_directory(\n module_name, verbose=verbose_build)\n cached_build_dir = os.path.join(\n build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')\n\n if not os.path.isdir(cached_build_dir):\n tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'\n os.makedirs(tmpdir)\n for src in all_source_files:\n shutil.copyfile(\n src, os.path.join(tmpdir, os.path.basename(src)))\n try:\n os.replace(tmpdir, cached_build_dir) # atomic\n except OSError:\n # source directory already exists\n # delete tmpdir and its contents.\n shutil.rmtree(tmpdir)\n if not os.path.isdir(cached_build_dir):\n raise\n\n # Compile.\n cached_sources = [\n os.path.join(cached_build_dir, os.path.basename(fname))\n for fname in sources\n ]\n torch.utils.cpp_extension.load(\n name=module_name,\n build_directory=cached_build_dir,\n verbose=verbose_build,\n sources=cached_sources,\n **build_kwargs)\n else:\n torch.utils.cpp_extension.load(\n name=module_name,\n verbose=verbose_build,\n sources=sources,\n **build_kwargs)\n\n # Load.\n module = importlib.import_module(module_name)\n\n except Exception as err:\n if verbosity == 'brief':\n print('Failed!')\n raise err\n\n # Print status and add to cache dict.\n if verbosity == 'full':\n print(f'Done setting up PyTorch plugin \"{module_name}\".')\n elif verbosity == 'brief':\n print('Done.')\n _cached_plugins[module_name] = module\n return module\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef _get_noise_batch(noise,\n image_shape,\n num_timesteps=0,\n num_batches=0,\n timesteps_noise=False):\n \"\"\"Get noise batch. Support get sequeue of noise along timesteps.\n\n We support the following use cases ('bz' denotes ```num_batches`` and 'n'\n denotes ``num_timesteps``):\n\n If timesteps_noise is True, we output noise which dimension is 5.\n - Input is [bz, c, h, w]: Expand to [n, bz, c, h, w]\n - Input is [n, c, h, w]: Expand to [n, bz, c, h, w]\n - Input is [n*bz, c, h, w]: View to [n, bz, c, h, w]\n - Dim of the input is 5: Return the input, ignore ``num_batches`` and\n ``num_timesteps``\n - Callable or None: Generate noise shape as [n, bz, c, h, w]\n - Otherwise: Raise error\n\n If timestep_noise is False, we output noise which dimension is 4 and\n ignore ``num_timesteps``.\n - Dim of the input is 3: Unsqueeze to [1, c, h, w], ignore ``num_batches``\n - Dim of the input is 4: Return input, ignore ``num_batches``\n - Callable or None: Generate noise shape as [bz, c, h, w]\n - Otherwise: Raise error\n\n It's to be noted that, we do not move the generated label to target device\n in this function because we can not get which device the noise should move\n to.\n\n Args:\n noise (torch.Tensor | callable | None): You can directly give a\n batch of noise through a ``torch.Tensor`` or offer a callable\n function to sample a batch of noise data. Otherwise, the\n ``None`` indicates to use the default noise sampler.\n image_shape (torch.Size): Size of images in the diffusion process.\n num_timesteps (int, optional): Total timestpes of the diffusion and\n denoising process. Defaults to 0.\n num_batches (int, optional): The number of batch size. To be noted that\n this argument only work when the input ``noise`` is callable or\n ``None``. Defaults to 0.\n timesteps_noise (bool, optional): If True, returned noise will shape\n as [n, bz, c, h, w], otherwise shape as [bz, c, h, w].\n Defaults to False.\n device (str, optional): If not ``None``, move the generated noise to\n corresponding device.\n Returns:\n torch.Tensor: Generated noise with desired shape.\n \"\"\"\n if isinstance(noise, torch.Tensor):\n # conduct sanity check for the last three dimension\n assert noise.shape[-3:] == image_shape\n if timesteps_noise:\n if noise.ndim == 4:\n assert num_batches > 0 and num_timesteps > 0\n # noise shape as [n, c, h, w], expand to [n, bz, c, h, w]\n if noise.shape[0] == num_timesteps:\n noise_batch = noise.view(num_timesteps, 1, *image_shape)\n noise_batch = noise_batch.expand(-1, num_batches, -1, -1,\n -1)\n # noise shape as [bz, c, h, w], expand to [n, bz, c, h, w]\n elif noise.shape[0] == num_batches:\n noise_batch = noise.view(1, num_batches, *image_shape)\n noise_batch = noise_batch.expand(num_timesteps, -1, -1, -1,\n -1)\n # noise shape as [n*bz, c, h, w], reshape to [b, bz, c, h, w]\n elif noise.shape[0] == num_timesteps * num_batches:\n noise_batch = noise.view(num_timesteps, -1, *image_shape)\n else:\n raise ValueError(\n 'The timesteps noise should be in shape of '\n '(n, c, h, w), (bz, c, h, w), (n*bz, c, h, w) or '\n f'(n, bz, c, h, w). But receive {noise.shape}.')\n\n elif noise.ndim == 5:\n # direct return noise\n noise_batch = noise\n else:\n raise ValueError(\n 'The timesteps noise should be in shape of '\n '(n, c, h, w), (bz, c, h, w), (n*bz, c, h, w) or '\n f'(n, bz, c, h, w). But receive {noise.shape}.')\n else:\n if noise.ndim == 3:\n # reshape noise to [1, c, h, w]\n noise_batch = noise[None, ...]\n elif noise.ndim == 4:\n # do nothing\n noise_batch = noise\n else:\n raise ValueError(\n 'The noise should be in shape of (n, c, h, w) or'\n f'(c, h, w), but got {noise.shape}')\n # receive a noise generator and sample noise.\n elif callable(noise):\n assert num_batches > 0\n noise_generator = noise\n if timesteps_noise:\n assert num_timesteps > 0\n # generate noise shape as [n, bz, c, h, w]\n noise_batch = noise_generator(\n (num_timesteps, num_batches, *image_shape))\n else:\n # generate noise shape as [bz, c, h, w]\n noise_batch = noise_generator((num_batches, *image_shape))\n # otherwise, we will adopt default noise sampler.\n else:\n assert num_batches > 0\n if timesteps_noise:\n assert num_timesteps > 0\n # generate noise shape as [n, bz, c, h, w]\n noise_batch = torch.randn(\n (num_timesteps, num_batches, *image_shape))\n else:\n # generate noise shape as [bz, c, h, w]\n noise_batch = torch.randn((num_batches, *image_shape))\n\n return noise_batch\n\n\ndef _get_label_batch(label,\n num_timesteps=0,\n num_classes=0,\n num_batches=0,\n timesteps_noise=False):\n \"\"\"Get label batch. Support get sequeue of label along timesteps.\n\n We support the following use cases ('bz' denotes ```num_batches`` and 'n'\n denotes ``num_timesteps``):\n\n If num_classes <= 0, return None.\n\n If timesteps_noise is True, we output label which dimension is 2.\n - Input is [bz, ]: Expand to [n, bz]\n - Input is [n, ]: Expand to [n, bz]\n - Input is [n*bz, ]: View to [n, bz]\n - Dim of the input is 2: Return the input, ignore ``num_batches`` and\n ``num_timesteps``\n - Callable or None: Generate label shape as [n, bz]\n - Otherwise: Raise error\n\n If timesteps_noise is False, we output label which dimension is 1 and\n ignore ``num_timesteps``.\n - Dim of the input is 1: Unsqueeze to [1, ], ignore ``num_batches``\n - Dim of the input is 2: Return the input. ignore ``num_batches``\n - Callable or None: Generate label shape as [bz, ]\n - Otherwise: Raise error\n\n It's to be noted that, we do not move the generated label to target device\n in this function because we can not get which device the noise should move\n to.\n\n Args:\n label (torch.Tensor | callable | None): You can directly give a\n batch of noise through a ``torch.Tensor`` or offer a callable\n function to sample a batch of noise data. Otherwise, the\n ``None`` indicates to use the default noise sampler.\n num_timesteps (int, optional): Total timestpes of the diffusion and\n denoising process. Defaults to 0.\n num_batches (int, optional): The number of batch size. To be noted that\n this argument only work when the input ``noise`` is callable or\n ``None``. Defaults to 0.\n timesteps_noise (bool, optional): If True, returned noise will shape\n as [n, bz, c, h, w], otherwise shape as [bz, c, h, w].\n Defaults to False.\n Returns:\n torch.Tensor: Generated label with desired shape.\n \"\"\"\n # no labels output if num_classes is 0\n if num_classes == 0:\n assert label is None, ('\\'label\\' should be None '\n 'if \\'num_classes == 0\\'.')\n return None\n\n # receive label and conduct sanity check.\n if isinstance(label, torch.Tensor):\n if timesteps_noise:\n if label.ndim == 1:\n assert num_batches > 0 and num_timesteps > 0\n # [n, ] to [n, bz]\n if label.shape[0] == num_timesteps:\n label_batch = label.view(num_timesteps, 1)\n label_batch = label_batch.expand(-1, num_batches)\n # [bz, ] to [n, bz]\n elif label.shape[0] == num_batches:\n label_batch = label.view(1, num_batches)\n label_batch = label_batch.expand(num_timesteps, -1)\n # [n*bz, ] to [n, bz]\n elif label.shape[0] == num_timesteps * num_batches:\n label_batch = label.view(num_timesteps, -1)\n else:\n raise ValueError(\n 'The timesteps label should be in shape of '\n '(n, ), (bz,), (n*bz, ) or (n, bz, ). But receive '\n f'{label.shape}.')\n\n elif label.ndim == 2:\n # dimension is 2, direct return\n label_batch = label\n else:\n raise ValueError(\n 'The timesteps label should be in shape of '\n '(n, ), (bz,), (n*bz, ) or (n, bz, ). But receive '\n f'{label.shape}.')\n else:\n # dimension is 0, expand to [1, ]\n if label.ndim == 0:\n label_batch = label[None, ...]\n # dimension is 1, do nothing\n elif label.ndim == 1:\n label_batch = label\n else:\n raise ValueError(\n 'The label should be in shape of (bz, ) or'\n f'zero-dimension tensor, but got {label.shape}')\n # receive a noise generator and sample noise.\n elif callable(label):\n assert num_batches > 0\n label_generator = label\n if timesteps_noise:\n assert num_timesteps > 0\n # generate label shape as [n, bz]\n label_batch = label_generator((num_timesteps, num_batches))\n else:\n # generate label shape as [bz, ]\n label_batch = label_generator((num_batches, ))\n # otherwise, we will adopt default label sampler.\n else:\n assert num_batches > 0\n if timesteps_noise:\n assert num_timesteps > 0\n # generate label shape as [n, bz]\n label_batch = torch.randint(0, num_classes,\n (num_timesteps, num_batches))\n else:\n # generate label shape as [bz, ]\n label_batch = torch.randint(0, num_classes, (num_batches, ))\n\n return label_batch\n\n\ndef var_to_tensor(var, index, target_shape=None, device=None):\n \"\"\"Function used to extract variables by given index, and convert into\n tensor as given shape.\n Args:\n var (np.array): Variables to be extracted.\n index (torch.Tensor): Target index to extract.\n target_shape (torch.Size, optional): If given, the indexed variable\n will expand to the given shape. Defaults to None.\n device (str): If given, the indexed variable will move to the target\n device. Otherwise, indexed variable will on cpu. Defaults to None.\n\n Returns:\n torch.Tensor: Converted variable.\n \"\"\"\n # we must move var to cuda for it's ndarray in current design\n var_indexed = torch.from_numpy(var)[index].float()\n\n if device is not None:\n var_indexed = var_indexed.to(device)\n\n while len(var_indexed.shape) < len(target_shape):\n var_indexed = var_indexed[..., None]\n return var_indexed\n"
] |
[
[
"torch.utils.cpp_extension.load",
"torch.utils.cpp_extension._get_build_directory",
"torch.cuda.get_device_name"
],
[
"torch.randn",
"torch.randint",
"torch.from_numpy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hfwittmann/kedro
|
[
"b0d4fcd8f19b49a7916d78fd09daeb6209a7b6c6"
] |
[
"tests/extras/datasets/pandas/test_excel_dataset.py"
] |
[
"from pathlib import Path, PurePosixPath\n\nimport pandas as pd\nimport pytest\nfrom fsspec.implementations.http import HTTPFileSystem\nfrom fsspec.implementations.local import LocalFileSystem\nfrom gcsfs import GCSFileSystem\nfrom pandas.testing import assert_frame_equal\nfrom s3fs.core import S3FileSystem\n\nfrom kedro.extras.datasets.pandas import ExcelDataSet\nfrom kedro.io import DataSetError\nfrom kedro.io.core import PROTOCOL_DELIMITER, Version\n\n\[email protected]\ndef filepath_excel(tmp_path):\n return (tmp_path / \"test.xlsx\").as_posix()\n\n\[email protected]\ndef excel_data_set(filepath_excel, load_args, save_args, fs_args):\n return ExcelDataSet(\n filepath=filepath_excel,\n load_args=load_args,\n save_args=save_args,\n fs_args=fs_args,\n )\n\n\[email protected]\ndef excel_multisheet_data_set(filepath_excel, save_args, fs_args):\n load_args = {\"sheet_name\": None}\n return ExcelDataSet(\n filepath=filepath_excel,\n load_args=load_args,\n save_args=save_args,\n fs_args=fs_args,\n )\n\n\[email protected]\ndef versioned_excel_data_set(filepath_excel, load_version, save_version):\n return ExcelDataSet(\n filepath=filepath_excel, version=Version(load_version, save_version)\n )\n\n\[email protected]\ndef dummy_dataframe():\n return pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5], \"col3\": [5, 6]})\n\n\[email protected]\ndef another_dummy_dataframe():\n return pd.DataFrame({\"x\": [10, 20], \"y\": [\"hello\", \"world\"]})\n\n\nclass TestExcelDataSet:\n def test_save_and_load(self, excel_data_set, dummy_dataframe):\n \"\"\"Test saving and reloading the data set.\"\"\"\n excel_data_set.save(dummy_dataframe)\n reloaded = excel_data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded)\n assert excel_data_set._fs_open_args_load == {}\n assert excel_data_set._fs_open_args_save == {\"mode\": \"wb\"}\n\n def test_save_and_load_multiple_sheets(\n self, excel_multisheet_data_set, dummy_dataframe, another_dummy_dataframe\n ):\n \"\"\"Test saving and reloading the data set with multiple sheets.\"\"\"\n dummy_multisheet = {\n \"sheet 1\": dummy_dataframe,\n \"sheet 2\": another_dummy_dataframe,\n }\n excel_multisheet_data_set.save(dummy_multisheet)\n reloaded = excel_multisheet_data_set.load()\n assert_frame_equal(dummy_multisheet[\"sheet 1\"], reloaded[\"sheet 1\"])\n assert_frame_equal(dummy_multisheet[\"sheet 2\"], reloaded[\"sheet 2\"])\n\n def test_exists(self, excel_data_set, dummy_dataframe):\n \"\"\"Test `exists` method invocation for both existing and\n nonexistent data set.\"\"\"\n assert not excel_data_set.exists()\n excel_data_set.save(dummy_dataframe)\n assert excel_data_set.exists()\n\n @pytest.mark.parametrize(\n \"load_args\", [{\"k1\": \"v1\", \"index\": \"value\"}], indirect=True\n )\n def test_load_extra_params(self, excel_data_set, load_args):\n \"\"\"Test overriding the default load arguments.\"\"\"\n for key, value in load_args.items():\n assert excel_data_set._load_args[key] == value\n\n @pytest.mark.parametrize(\n \"save_args\", [{\"k1\": \"v1\", \"index\": \"value\"}], indirect=True\n )\n def test_save_extra_params(self, excel_data_set, save_args):\n \"\"\"Test overriding the default save arguments.\"\"\"\n for key, value in save_args.items():\n assert excel_data_set._save_args[key] == value\n\n @pytest.mark.parametrize(\n \"fs_args\",\n [{\"open_args_load\": {\"mode\": \"rb\", \"compression\": \"gzip\"}}],\n indirect=True,\n )\n def test_open_extra_args(self, excel_data_set, fs_args):\n assert excel_data_set._fs_open_args_load == fs_args[\"open_args_load\"]\n assert excel_data_set._fs_open_args_save == {\"mode\": \"wb\"} # default unchanged\n\n def test_load_missing_file(self, excel_data_set):\n \"\"\"Check the error when trying to load missing file.\"\"\"\n pattern = r\"Failed while loading data from data set ExcelDataSet\\(.*\\)\"\n with pytest.raises(DataSetError, match=pattern):\n excel_data_set.load()\n\n @pytest.mark.parametrize(\n \"filepath,instance_type\",\n [\n (\"s3://bucket/file.xlsx\", S3FileSystem),\n (\"file:///tmp/test.xlsx\", LocalFileSystem),\n (\"/tmp/test.xlsx\", LocalFileSystem),\n (\"gcs://bucket/file.xlsx\", GCSFileSystem),\n (\"https://example.com/file.xlsx\", HTTPFileSystem),\n ],\n )\n def test_protocol_usage(self, filepath, instance_type):\n data_set = ExcelDataSet(filepath=filepath)\n assert isinstance(data_set._fs, instance_type)\n\n path = filepath.split(PROTOCOL_DELIMITER, 1)[-1]\n\n assert str(data_set._filepath) == path\n assert isinstance(data_set._filepath, PurePosixPath)\n\n def test_catalog_release(self, mocker):\n fs_mock = mocker.patch(\"fsspec.filesystem\").return_value\n filepath = \"test.xlsx\"\n data_set = ExcelDataSet(filepath=filepath)\n data_set.release()\n fs_mock.invalidate_cache.assert_called_once_with(filepath)\n\n\nclass TestExcelDataSetVersioned:\n def test_version_str_repr(self, load_version, save_version):\n \"\"\"Test that version is in string representation of the class instance\n when applicable.\"\"\"\n filepath = \"test.xlsx\"\n ds = ExcelDataSet(filepath=filepath)\n ds_versioned = ExcelDataSet(\n filepath=filepath, version=Version(load_version, save_version)\n )\n assert filepath in str(ds)\n assert \"version\" not in str(ds)\n\n assert filepath in str(ds_versioned)\n ver_str = f\"version=Version(load={load_version}, save='{save_version}')\"\n assert ver_str in str(ds_versioned)\n assert \"ExcelDataSet\" in str(ds_versioned)\n assert \"ExcelDataSet\" in str(ds)\n assert \"protocol\" in str(ds_versioned)\n assert \"protocol\" in str(ds)\n assert \"writer_args\" in str(ds_versioned)\n assert \"writer_args\" in str(ds)\n # Default save_args and load_args\n assert \"save_args={'index': False}\" in str(ds)\n assert \"save_args={'index': False}\" in str(ds_versioned)\n assert \"load_args={'engine': xlrd}\" in str(ds_versioned)\n assert \"load_args={'engine': xlrd}\" in str(ds)\n\n def test_save_and_load(self, versioned_excel_data_set, dummy_dataframe):\n \"\"\"Test that saved and reloaded data matches the original one for\n the versioned data set.\"\"\"\n versioned_excel_data_set.save(dummy_dataframe)\n reloaded_df = versioned_excel_data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded_df)\n\n def test_no_versions(self, versioned_excel_data_set):\n \"\"\"Check the error if no versions are available for load.\"\"\"\n pattern = r\"Did not find any versions for ExcelDataSet\\(.+\\)\"\n with pytest.raises(DataSetError, match=pattern):\n versioned_excel_data_set.load()\n\n def test_exists(self, versioned_excel_data_set, dummy_dataframe):\n \"\"\"Test `exists` method invocation for versioned data set.\"\"\"\n assert not versioned_excel_data_set.exists()\n versioned_excel_data_set.save(dummy_dataframe)\n assert versioned_excel_data_set.exists()\n\n def test_prevent_overwrite(self, versioned_excel_data_set, dummy_dataframe):\n \"\"\"Check the error when attempting to override the data set if the\n corresponding Excel file for a given save version already exists.\"\"\"\n versioned_excel_data_set.save(dummy_dataframe)\n pattern = (\n r\"Save path \\`.+\\` for ExcelDataSet\\(.+\\) must \"\n r\"not exist if versioning is enabled\\.\"\n )\n with pytest.raises(DataSetError, match=pattern):\n versioned_excel_data_set.save(dummy_dataframe)\n\n @pytest.mark.parametrize(\n \"load_version\", [\"2019-01-01T23.59.59.999Z\"], indirect=True\n )\n @pytest.mark.parametrize(\n \"save_version\", [\"2019-01-02T00.00.00.000Z\"], indirect=True\n )\n def test_save_version_warning(\n self, versioned_excel_data_set, load_version, save_version, dummy_dataframe\n ):\n \"\"\"Check the warning when saving to the path that differs from\n the subsequent load path.\"\"\"\n pattern = (\n fr\"Save version `{save_version}` did not match load version \"\n fr\"`{load_version}` for ExcelDataSet\\(.+\\)\"\n )\n with pytest.warns(UserWarning, match=pattern):\n versioned_excel_data_set.save(dummy_dataframe)\n\n def test_http_filesystem_no_versioning(self):\n pattern = r\"HTTP\\(s\\) DataSet doesn't support versioning\\.\"\n\n with pytest.raises(DataSetError, match=pattern):\n ExcelDataSet(\n filepath=\"https://example.com/file.xlsx\", version=Version(None, None)\n )\n\n def test_versioning_existing_dataset(\n self, excel_data_set, versioned_excel_data_set, dummy_dataframe\n ):\n \"\"\"Check the error when attempting to save a versioned dataset on top of an\n already existing (non-versioned) dataset.\"\"\"\n excel_data_set.save(dummy_dataframe)\n assert excel_data_set.exists()\n assert excel_data_set._filepath == versioned_excel_data_set._filepath\n pattern = (\n f\"(?=.*file with the same name already exists in the directory)\"\n f\"(?=.*{versioned_excel_data_set._filepath.parent.as_posix()})\"\n )\n with pytest.raises(DataSetError, match=pattern):\n versioned_excel_data_set.save(dummy_dataframe)\n\n # Remove non-versioned dataset and try again\n Path(excel_data_set._filepath.as_posix()).unlink()\n versioned_excel_data_set.save(dummy_dataframe)\n assert versioned_excel_data_set.exists()\n"
] |
[
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dmopalmer/pds-tools
|
[
"a61916e039c4fc5149e4b7e0d797cf890370ac8c"
] |
[
"pdstools/gravity.py"
] |
[
"#!/usr/bin/python\n################################################################################\n# gravity.py\n#\n# Classes and methods to deal with gravity fields of oblate planets.\n#\n# Mark R. Showalter, SETI Institute, March 2010\n# Revised October 2011.\n# Revised December 2, 2011 (BSW) - add unit tests\n# - change solve_a() to handle arrays\n# Revised December 3, 2011 (MRS)\n# - Fixed errors that made poor initial guesses in solve_a(). Reduced default\n# number of iterations to 5.\n# - Added unit tests giving array arguments to solve_a().\n#\n# Revised February 18, 2012 (MRS)\n# - Added gravity fields of more bodies as class constants.\n#\n# Revised July 30, 2013 (MRS)\n# - Added a new constant PLUTO_CHARON_AS_RINGS that properly describes the\n# time-averaged gravity field up to J10.\n# - Added a default value of (1,0,0) for the factors in solve_a().\n#\n# Revised May 3, 2014 (MRS)\n# - Updated Pluto system gravity based on Brozovic et al. 2014.\n# - Redefined PLUTO_CHARON as PLUTO_CHARON_OLD\n# - Redefined PLUTO_CHARON_AS_RINGS as PLUTO_CHARON\n#\n# Revised July 16, 2018 (MRS)\n# - Added second-order dependence on e and sin(i) to the functions for n,\n# kappa and nu. Formulas are adapted from from Renner & Sicardy, Use of the\n# Geometric Elements in Numerical Simulations, Cel. Mech. and Dyn. Astron.\n# 94, 237-248 (2006). See Eqs. 14-16.\n################################################################################\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport warnings\n\n# Useful unit conversions\nDPR = 180. / np.pi # Converts radians to degrees\nDPD = DPR * 86400. # Converts radians per second to degrees per day \nTWOPI = 2. * np.pi\n\nclass Gravity():\n \"\"\"A class describing the gravity field of a planet.\"\"\"\n\n def __init__(self, gm, jlist=[], radius=1.):\n \"\"\"The constructor for a Gravity object.\n\n Input:\n gm The body's GM in units of km^3/s^2\n jlist optional list of even gravity harmonics: [jJ2, J4, ...].\n radius body radius for associated J-values.\n \"\"\"\n\n self.gm = gm\n self.jn = jlist\n self.rp = radius\n self.r2 = radius * radius\n\n # Evaluate coefficients for frequencies\n n = 0\n pn_zero = 1.\n potential_jn = []\n omega_jn = []\n kappa_jn = []\n nu_jn = []\n domega_jn = []\n dkappa_jn = []\n dnu_jn = []\n for i in range(len(jlist)):\n n += 2 # i == 0 corresponds to J2; i == 1 to J4; etc.\n pn_zero = -(n-1.)/n * pn_zero\n\n potential_jn.append(pn_zero * jlist[i])\n\n omega_jn.append( -(n+1) * pn_zero * jlist[i])\n kappa_jn.append( (n-1)*(n+1) * pn_zero * jlist[i])\n nu_jn.append( -(n+1)*(n+1) * pn_zero * jlist[i])\n\n domega_jn.append(-(n+3) * omega_jn[i])\n dkappa_jn.append(-(n+3) * kappa_jn[i])\n dnu_jn.append( -(n+3) * nu_jn[i])\n\n self.potential_jn = np.array(potential_jn)\n\n self.omega_jn = np.array(omega_jn)\n self.kappa_jn = np.array(kappa_jn)\n self.nu_jn = np.array(nu_jn)\n self.domega_jn = np.array(domega_jn)\n self.dkappa_jn = np.array(dkappa_jn)\n self.dnu_jn = np.array(dnu_jn)\n\n @staticmethod\n def _jseries(coefficients, ratio2):\n \"\"\"Internal method to evaluate a series of the form:\n coefficients[0] * ratio2 + coefficients[1] * ratio2^2 ...\"\"\"\n\n return ratio2 * np.polyval(coefficients[::-1], ratio2)\n\n def potential(self, a):\n \"\"\"Returns the potential energy at radius a, in the equatorial plane.\"\"\"\n\n return -self.gm/a * (1. - OblateGravity._jseries(self.potential_jn,\n self.r2/a2))\n\n def omega(self, a, e=0., sin_i=0.):\n \"\"\"Returns the mean motion (radians/s) at semimajor axis a.\n\n Corrections for e and sin(i) are accurate to second order.\n \"\"\"\n\n a2 = a * a\n gm_a3 = self.gm / (a*a2)\n ratio2 = self.r2 / a2\n\n omega2 = gm_a3 * (1. + Gravity._jseries(self.omega_jn, ratio2))\n omega1 = np.sqrt(omega2)\n\n if (e or sin_i) and self.jn:\n omega1 += np.sqrt(gm_a3) * ratio2 * self.jn[0] * \\\n (3. * e**2 - 12. * sin_i**2)\n\n return omega1\n\n def kappa2(self, a):\n \"\"\"Returns the square of the radial oscillation frequency (radians/s) at\n semimajor axis a.\"\"\"\n\n a2 = a * a\n kappa2 = self.gm/(a*a2) * (1. + Gravity._jseries(self.kappa_jn,\n self.r2/a2))\n return kappa2\n\n def kappa(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial oscillation frequency (radians/s) at semimajor\n axis a.\"\"\"\n\n a2 = a * a\n gm_a3 = self.gm / (a*a2)\n ratio2 = self.r2 / a2\n\n kappa2 = gm_a3 * (1. + Gravity._jseries(self.kappa_jn, ratio2))\n kappa1 = np.sqrt(kappa2)\n\n if (e or sin_i) and self.jn:\n kappa1 += np.sqrt(gm_a3) * ratio2 * self.jn[0] * (-9. * sin_i**2)\n\n return kappa1\n\n def nu(self, a, e=0., sin_i=0.):\n \"\"\"Returns the vertical oscillation frequency (radians/s) at semimajor\n axis a.\"\"\"\n\n a2 = a * a\n gm_a3 = self.gm / (a*a2)\n ratio2 = self.r2 / a2\n\n nu2 = gm_a3 * (1. + Gravity._jseries(self.nu_jn, ratio2))\n nu1 = np.sqrt(nu2)\n\n if (e or sin_i) and self.jn:\n nu1 += np.sqrt(gm_a3) * ratio2 * self.jn[0] * \\\n (6. * e**2 - 12.75 * sin_i**2)\n\n return nu1\n\n def domega_da(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of the mean motion (radians/s/km) at\n semimajor axis a.\"\"\"\n\n a2 = a * a\n gm_a4 = self.gm / (a2*a2)\n ratio2 = self.r2 / a2\n\n domega2 = gm_a4 * (-3. + Gravity._jseries(self.domega_jn, ratio2))\n domega1 = domega2 / (2. * self.omega(a))\n\n if (e or sin_i) and self.jn:\n domega1 -= 3.5 * np.sqrt(self.gm/a)/a2 * ratio2 * self.jn[0] * \\\n (3. * e**2 - 12. * sin_i**2)\n\n return domega1\n\n def dkappa_da(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of the radial oscillation frequency\n (radians/s/km) at semimajor axis a.\"\"\"\n\n a2 = a * a\n gm_a4 = self.gm / (a2*a2)\n ratio2 = self.r2 / a2\n\n dkappa2 = gm_a4 * (-3. + Gravity._jseries(self.dkappa_jn, ratio2))\n dkappa1 = dkappa2 / (2. * self.kappa(a))\n\n if (e or sin_i) and self.jn:\n dkappa1 -= 3.5 * np.sqrt(self.gm/a)/a2 * ratio2 * self.jn[0] * \\\n (-9. * sin_i**2)\n\n return dkappa1\n\n def dnu_da(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of the vertical oscillation frequency\n (radians/s/km) at semimajor axis a.\"\"\"\n\n a2 = a * a\n gm_a4 = self.gm / (a2*a2)\n ratio2 = self.r2 / a2\n\n dnu2 = gm_a4 * (-3. + Gravity._jseries(self.dnu_jn, ratio2))\n dnu1 = dnu2 / (2. * self.nu(a))\n\n if (e or sin_i) and self.jn:\n dnu1 -= 3.5 * np.sqrt(self.gm/a)/a2 * ratio2 * self.jn[0] * \\\n (6. * e**2 - 12.75 * sin_i**2)\n\n return dnu1\n\n def combo(self, a, factors, e=0., sin_i=0.):\n \"\"\"Returns a frequency combination, based on given coefficients for\n omega, kappa and nu. Full numeric precision is preserved in the limit\n of first- or second-order cancellation of the coefficients.\"\"\"\n\n # Shortcut for nonzero e or i, to be refined later\n if e or sin_i:\n sum_values = 0.\n if factors[0]:\n sum_values = sum_values + factors[0] * self.omega(a, e, sin_i)\n if factors[1]:\n sum_values = sum_values + factors[1] * self.kappa(a, e, sin_i)\n if factors[2]:\n sum_values = sum_values + factors[2] * self.nu(a, e, sin_i)\n\n return sum_values\n\n a2 = a * a\n ratio2 = self.r2 / a2\n gm_over_a3 = self.gm / (a * a2)\n\n sum_factors = 0\n sum_values = 0.\n\n # omega term\n if factors[0] != 0:\n omega2_jsum = Gravity._jseries(self.omega_jn, ratio2)\n omega2 = gm_over_a3 * (1. + omega2_jsum)\n omega = np.sqrt(omega2)\n\n sum_factors += factors[0]\n sum_values += factors[0] * omega\n\n # kappa term\n if factors[1] != 0:\n kappa2_jsum = Gravity._jseries(self.kappa_jn, ratio2)\n kappa2 = gm_over_a3 * (1. + kappa2_jsum)\n kappa = np.sqrt(kappa2)\n\n sum_factors += factors[1]\n sum_values += factors[1] * kappa\n\n # nu term\n if factors[2] != 0:\n nu2_jsum = Gravity._jseries(self.nu_jn, ratio2)\n nu2 = gm_over_a3 * (1. + nu2_jsum)\n nu = np.sqrt(nu2)\n\n sum_factors += factors[2]\n sum_values += factors[2] * nu\n\n if sum_factors != 0: return sum_values\n\n # In the special cause where sum_factors = 0, we get cancellation to\n # leading order. We employ the following trick to improve accuracy.\n #\n # Because\n # omega^2 - GM/a^3 = GM/a^3 * Jsum\n # we have\n # [omega - sqrt(GM/a^3)] [omega + sqrt(GM/a^3)] = GM/a^3 * Jsum\n # or\n # omega - sqrt(GM/a^3) = GM/a^3 * Jsum / [omega + sqrt(GM/a^3)]\n #\n # Similarly for kappa and nu. Our solution is to sum the quantities\n # (omega - sqrt(GM/a^3)), (kappa - sqrt(GM/a^3)) and (nu - sqrt(GM/a^3))\n # instead.\n\n sqrt_gm_over_a3 = np.sqrt(gm_over_a3)\n sum_values = 0.\n\n if factors[0] != 0:\n omega_diff = gm_over_a3 * omega2_jsum / (omega + sqrt_gm_over_a3)\n sum_values += factors[0] * omega_diff\n\n if factors[1] != 0:\n kappa_diff = gm_over_a3 * kappa2_jsum / (kappa + sqrt_gm_over_a3)\n sum_values += factors[1] * kappa_diff\n\n if factors[2] != 0:\n nu_diff = gm_over_a3 * nu2_jsum / (nu + sqrt_gm_over_a3)\n sum_values += factors[2] * nu_diff\n\n if factors[1] != factors[2]: return sum_values\n\n # In the final special case where\n # factors[1] = factors[2] = -factors[0]/2\n # we get still higher-order cancellation. We employ another trick. The\n # expression becomes\n # -factors[1] (2 omega - kappa - nu)\n # \n # Note that\n # (2 omega - kappa - nu) (omega + kappa)\n # = 2 omega^2 + omega kappa - omega nu - kappa^2 - kappa nu\n # Because\n # 2 omega^2 - kappa^2 = nu^2,\n # we get\n # (2 omega - kappa - nu) (omega + kappa)\n # = nu^2 + omega kappa - omega nu - kappa nu\n # = (nu - omega) (nu - kappa)\n # Thus,\n # 2 omega - kappa - nu = (nu - omega) (nu - kappa) / (omega + kappa)\n\n if factors[1] == 0: return 0\n\n sum_values = -factors[1] * ((nu_diff - omega_diff)\n * (nu_diff - kappa_diff)\n / (omega + kappa))\n\n return sum_values\n\n def dcombo_da(self, a, factors, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of a frequency combination, based on\n given coefficients for omega, kappa and nu. Unlike method combo(), this\n one does not guarantee full precision if the coefficients cancel to\n first or second order.\"\"\"\n\n sum_values = 0.\n\n if factors[0]: sum_values += factors[0] * self.domega_da(a, e, sin_i)\n if factors[1]: sum_values += factors[1] * self.dkappa_da(a, e, sin_i)\n if factors[2]: sum_values += factors[2] * self.dnu_da(a, e, sin_i)\n\n return sum_values\n\n def solve_a(self, freq, factors=(1,0,0), e=0., sin_i=0.):\n \"\"\"Solves for the semimajor axis at which the frequency is equal to the\n given combination of factors on omega, kappa and nu. Solution is via\n Newton's method.\"\"\"\n\n # Find an initial guess\n sum_factors = np.sum(factors)\n\n # No first-order cancellation:\n # freq(a) ~ sum[factors] * sqrt(GM/a^3)\n #\n # a^3 ~ GM * (sum[factors] / freq)^2\n\n if sum_factors != 0:\n a = (self.gm * (sum_factors/freq)**2)**(1./3.)\n\n # No second-order cancellation:\n # freq(a) ~ 1/2 * sum[factor*term] * sqrt(GM/a^3) * Rp^2 / a^2\n #\n # a^7 ~ GM * (sum[factor*term]/2 / freq)^2 Rp^4\n\n elif factors[1] != factors[2]:\n term = (factors[0] * self.omega_jn[0] +\n factors[1] * self.kappa_jn[0] +\n factors[2] * self.nu_jn[0]) / 2.\n a = (self.gm * (term * self.r2 / freq)**2)**(1/7.)\n\n # Second-order cancellation:\n # freq(a) ~ -1/8 * sum[factor*term^2] * sqrt(GM/a^3) * Rp^4 / a^4\n #\n # a^11 ~ GM * (-sum[factor*term^2]/8 / freq)^2 Rp^8\n\n else:\n term = (factors[0] * self.omega_jn[0]**2 +\n factors[1] * self.kappa_jn[0]**2 +\n factors[2] * self.nu_jn[0]**2) / (-8.)\n a = (self.gm * (term * self.r2 * self.r2 / freq)**2)**(1/11.)\n\n # Iterate using Newton's method\n da_prev_max = 1.e99\n for iter in range(20):\n # a step in Newton's method: x(i+1) = x(i) - f(xi) / fp(xi)\n # our f(x) = self.combo() - freq\n # fp(x) = self.dcombo()\n\n da = ((self.combo(a, factors, e, sin_i) - freq) / \\\n self.dcombo_da(a, factors, e, sin_i))\n da_max = np.max(np.abs(da))\n if da_max == 0.: break\n\n a -= da\n\n # If Newton's method stops converging, return what we've got\n if iter > 4 and da_max >= da_prev_max:\n break\n\n da_prev_max = da_max\n\n return a\n\n # Useful alternative names...\n def n(self, a, e=0., sin_i=0.):\n \"\"\"Returns the mean motion at semimajor axis a. Identical to omega(a).\n \"\"\"\n\n return self.omega(a, e, sin_i)\n\n def dmean_dt(self, a, e=0., sin_i=0.):\n \"\"\"Returns the mean motion at semimajor axis a. Identical to omega(a).\n \"\"\"\n\n return self.omega(a, e, sin_i)\n\n def dperi_dt(self, a, e=0., sin_i=0.):\n \"\"\"Returns the pericenter precession rate at semimajor axis a. Identical\n to combo(a, (1,-1,0)).\n \"\"\"\n\n return self.combo(a, (1,-1,0), e, sin_i)\n\n def dnode_dt(self, a, e=0., sin_i=0.):\n \"\"\"Returns the nodal regression rate (negative) at semimajor axis a.\n Identical to combo(a, (1,0,-1)).\n \"\"\"\n\n return self.combo(a, (1,0,-1), e, sin_i)\n\n def d_dmean_dt_da(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of the mean motion at semimajor axis a. \n Identical to domega_da(a).\n \"\"\"\n\n return self.domega_da(a, e, sin_i)\n\n def d_dperi_dt_da(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of the pericenter precession rate at\n semimajor axis a. Identical to dcombo_da(a, (1,-1,0)).\n \"\"\"\n\n return self.dcombo_da(a, (1,-1,0), e, sin_i)\n\n def d_dnode_dt_da(self, a, e=0., sin_i=0.):\n \"\"\"Returns the radial derivative of the nodal regression rate (negative)\n at semimajor axis a. Identical to dcombo_da(a, (1,0,-1)).\n \"\"\"\n\n return self.dcombo_da(a, (1,0,-1), e, sin_i)\n\n def ilr_pattern(self, n, m, p=1):\n \"\"\"Returns the pattern speed of the m:m-p inner Lindblad resonance,\n given the mean motion n of the perturber.\n \"\"\"\n\n a = self.solve_a(n, (1,0,0))\n return (n + self.kappa(a) * p/m)\n\n def olr_pattern(self, n, m, p=1):\n \"\"\"Returns the pattern speed of the m:m+p outer Lindblad resonance,\n given the mean motion n of the perturber.\n \"\"\"\n\n a = self.solve_a(n, (1,0,0))\n return (n - self.kappa(a) * p/(m+p))\n\n################################################################################\n# Orbital elements\n################################################################################\n\n def state_from_osc(self, elements, body_gm=0.):\n \"\"\"Return position and velocity based on osculating orbital elements:\n (a, e, i, mean longitude, longitude of pericenter,\n longitude of ascending node).\n\n Routine adapted from SWIFT's orbel_el2xv.f by Rob French. Only works\n well for e < 0.18.\n \"\"\"\n\n gm = self.gm + body_gm\n\n (a, e, inc, mean_lon, long_peri, long_node) = elements\n a = np.asfarray(a)\n e = np.asfarray(e)\n inc = np.asfarray(inc)\n mean_lon = np.asfarray(mean_lon)\n long_peri = np.asfarray(long_peri)\n long_node = np.asfarray(long_node)\n\n mean_anomaly = mean_lon - long_peri\n\n sp = np.sin(long_peri)\n cp = np.cos(long_peri)\n so = np.sin(long_node)\n co = np.cos(long_node)\n si = np.sin(inc)\n ci = np.cos(inc)\n d11 = cp*co - sp*so*ci\n d12 = cp*so + sp*co*ci\n d13 = sp*si\n d21 = -sp*co - cp*so*ci\n d22 = -sp*so + cp*co*ci\n d23 = cp*si\n\n sm = np.sin(mean_anomaly)\n cm = np.cos(mean_anomaly)\n\n x = mean_anomaly + e*sm*( 1. + e*( cm + e*( 1. - 1.5*sm*sm)))\n\n sx = np.sin(x)\n cx = np.cos(x)\n es = e*sx\n ec = e*cx\n f = x - es - mean_anomaly\n fp = 1. - ec \n fpp = es \n fppp = ec \n dx = -f/fp\n dx = -f/(fp + dx*fpp/2.)\n dx = -f/(fp + dx*fpp/2. + dx*dx*fppp/6.)\n\n cape = x + dx\n\n scap = np.sin(cape)\n ccap = np.cos(cape)\n sqe = np.sqrt(1. -e*e)\n sqgma = np.sqrt(gm*a)\n xfac1 = a*(ccap - e)\n xfac2 = a*sqe*scap\n ri = 1./(a*(1. - e*ccap))\n vfac1 = -ri * sqgma * scap\n vfac2 = ri * sqgma * sqe * ccap\n\n x = d11*xfac1 + d21*xfac2\n y = d12*xfac1 + d22*xfac2\n z = d13*xfac1 + d23*xfac2\n vx = d11*vfac1 + d21*vfac2\n vy = d12*vfac1 + d22*vfac2\n vz = d13*vfac1 + d23*vfac2\n\n # Broadcast to a common shape and create vectors\n (x,y,z,vx,vy,vz) = np.broadcast_arrays(x,y,z,vx,vy,vz)\n\n pos = np.stack([x, y, z], axis=-1)\n vel = np.stack([vx, vy, vz], axis=-1)\n\n return (pos,vel)\n\n ############################################################################\n # Orbital elements\n ############################################################################\n\n def osc_from_state(self, pos, vel, body_gm=0.):\n \"\"\"Return osculating orbital elements based on position and velocity.\n\n Routine adapted from SWIFT's orbel_vx2el.f by Rob French.\n \"\"\"\n\n (pos, vel) = np.broadcast_arrays(pos, vel)\n pos = np.asfarray(pos)\n vel = np.asfarray(vel)\n\n x = pos[...,0]\n y = pos[...,1]\n z = pos[...,2]\n\n vx = vel[...,0]\n vy = vel[...,1]\n vz = vel[...,2]\n\n tiny = 1e-300\n\n # Warning: This only works with elliptical orbits!\n gmsum = self.gm + body_gm\n \n # Compute the angular momentum H, and thereby the inclination INC.\n hx = y*vz - z*vy\n hy = z*vx - x*vz\n hz = x*vy - y*vx\n h2 = hx*hx + hy*hy + hz*hz\n h = np.sqrt(h2)\n inc = np.arccos(hz/h)\n\n # Compute longitude of ascending node long_node and the argument of\n # latitude u.\n fac = np.sqrt(hx**2 + hy**2)/h\n\n long_node = np.where(fac < tiny, np.zeros(x.shape),\n Gravity._pos_arctan2(hx,-hy))\n tmp = np.arctan2(y, x)\n tmp = np.where(np.abs(inc - np.pi) < 10.*tiny, -tmp, tmp)\n tmp = tmp % TWOPI\n\n sin_inc = np.sin(inc)\n if np.shape(sin_inc) == (): # Avoid possible divide-by-zero\n if sin_inc == 0.: sin_inc = 1.\n else:\n sin_inc[sin_inc == 0.] = 1.\n\n u = np.where(fac < tiny, tmp, Gravity._pos_arctan2(z/sin_inc, \n x*np.cos(long_node) + \n y*np.sin(long_node)))\n\n # Compute the radius R and velocity squared V2, and the dot\n # product RDOTV, the energy per unit mass ENERGY.\n r = np.sqrt(x*x + y*y + z*z)\n v2 = vx*vx + vy*vy + vz*vz\n v = np.sqrt(v2)\n vdotr = x*vx + y*vy + z*vz\n energy = 0.5*v2 - gmsum/r\n\n a = -0.5*gmsum/energy\n\n fac = 1. - h2/(gmsum*a)\n e = np.where(fac > tiny, np.minimum(np.sqrt(fac), 1.), 0.) # XXX\n face = (a-r)/(a*e)\n face = np.minimum(face, 1.)\n face = np.maximum(face, -1.)\n cape = np.arccos(face)\n cape = np.where(vdotr < 0., 2.*np.pi-cape, cape)\n cape = np.where(fac > tiny, cape, u)\n cw = (np.cos(cape) - e)/(1. - e*np.cos(cape))\n sw = np.sqrt(1. - e*e)*np.sin(cape)/(1. - e*np.cos(cape))\n w = np.where(fac > 0., Gravity._pos_arctan2(sw,cw), u)\n\n mean_anomaly = (cape - e*np.sin(cape)) % TWOPI\n long_peri = (u - w) % TWOPI\n\n mean_lon = (mean_anomaly + long_peri) % TWOPI\n\n # Convert any shapeless arrays to scalars\n elements = []\n for element in (a, e, inc, mean_lon, long_peri, long_node):\n if isinstance(element, np.ndarray) and element.shape == ():\n elements.append(element[()])\n else:\n elements.append(element)\n\n return tuple(elements)\n\n # Take the geometric osculating elements and convert to X,Y,Z,VX,VY,VZ\n # Returns x, y, z, vx, vy, vz\n # From Renner & Sicardy (2006) EQ 2-13\n\n def state_from_geom(self, elements, body_gm=0.):\n \"\"\"Return position and velocity based on geometric orbital elements:\n (a, e, i, mean longitude, longitude of pericenter,\n longitude of ascending node).\n\n Adapted from Renner & Sicardy (2006) EQ 2-13 by Rob French.\n \"\"\"\n\n (a, e, inc, mean_lon, long_peri, long_node) = elements\n a = np.asfarray(a)\n e = np.asfarray(e)\n inc = np.asfarray(inc)\n lam = np.asfarray(mean_lon)\n long_peri = np.asfarray(long_peri)\n long_node = np.asfarray(long_node)\n\n mean_anomaly = lam - long_peri\n\n (n, kappa, nu, eta2, chi2,\n alpha1, alpha2, alphasq) = self._geom_to_freq(a, e, inc, body_gm)\n kappa2 = kappa**2\n n2 = n**2\n nu2 = nu**2\n\n # Convert to cylindrical\n r = a*(1. - e*np.cos(lam-long_peri) + \n e**2*(3./2. * eta2/kappa2 - 1. -\n eta2/2./kappa2 * np.cos(2.*(lam-long_peri))) +\n inc**2*(3./4.*chi2/kappa2 - 1. +\n chi2/4./alphasq * np.cos(2.*(lam-long_node))))\n\n L = (lam + 2.*e*n/kappa*np.sin(lam-long_peri) + \n e**2*(3./4. + nu2/2./kappa2)*n/kappa * np.sin(2.*(lam-long_peri)) -\n inc**2*chi2/4./alphasq*n/nu*np.sin(2.*(lam-long_node)))\n\n z = a * inc * (np.sin(lam-long_node) + \n e*chi2/2./kappa/alpha1*np.sin(2.*lam-long_peri-long_node) -\n e*3./2.*chi2/kappa/alpha2*np.sin(long_peri-long_node))\n\n rdot = a * kappa * (e*np.sin(lam-long_peri) + \n e**2*eta2/kappa2*np.sin(2.*(lam-long_peri)) -\n inc**2*chi2/2./alphasq*nu/kappa*\n np.sin(2.*(lam-long_node)))\n\n Ldot = n*(1. + 2.*e*np.cos(lam-long_peri) +\n e**2 * (7./2. - 3.*eta2/kappa2 - kappa2/2./n2 + \n (3./2.+eta2/kappa2)*np.cos(2.*(lam-long_peri))) +\n inc**2 * (2. - kappa2/2./n2 - 3./2.*chi2/kappa2 - \n chi2/2./alphasq*np.cos(2.*(lam-long_node))))\n\n vz = a*inc*nu*(np.cos(lam-long_node) + \n e*chi2*(kappa+nu)/2./kappa/alpha1/nu *\n np.cos(2*lam-long_peri-long_node) +\n e*3./2.*chi2*(kappa-nu)/kappa/alpha2/nu*np.cos(long_peri-long_node))\n\n x = r*np.cos(L)\n y = r*np.sin(L)\n vx = rdot*np.cos(L) - r*Ldot*np.sin(L)\n vy = rdot*np.sin(L) + r*Ldot*np.cos(L)\n\n # Broadcast to a common shape and create vectors\n (x,y,z,vx,vy,vz) = np.broadcast_arrays(x,y,z,vx,vy,vz)\n\n pos = np.stack([x, y, z], axis=-1)\n vel = np.stack([vx, vy, vz], axis=-1)\n\n return (pos, vel)\n\n # Given the state vector x,y,z,vx,vy,vz retrieve the geometric elements\n # Returns: a, e, inc, long_peri, long_node, mean_anomaly\n # From Renner and Sicardy (2006) EQ 22-47\n\n def geom_from_state(self, pos, vel, body_gm=0., tol=1.e-6):\n \"\"\"Return geometric orbital elements based on position and velocity.\n\n Routine adapted from SWIFT's orbel_vx2el.f by Rob French.\n \"\"\"\n\n (pos, vel) = np.broadcast_arrays(pos, vel)\n pos = np.asfarray(pos)\n vel = np.asfarray(vel)\n\n x = pos[...,0]\n y = pos[...,1]\n z = pos[...,2]\n\n vx = vel[...,0]\n vy = vel[...,1]\n vz = vel[...,2]\n\n # EQ 22-25\n r = np.sqrt(x**2 + y**2)\n L = Gravity._pos_arctan2(y, x)\n rdot = vx*np.cos(L) + vy*np.sin(L)\n Ldot = (vy*np.cos(L)-vx*np.sin(L))/r\n\n # Initial conditions\n a = r\n e = 0.\n inc = 0.\n rc = 0.\n Lc = 0.\n zc = 0.\n rdotc = 0.\n Ldotc = 0.\n zdotc = 0.\n\n old_diffmax = 1.e38\n old_diff = None\n idx_to_use = np.where(x!=-1e38,True,False) # All True\n announced = False\n while True:\n (n, kappa, nu, eta2, chi2, \n alpha1, alpha2, alphasq) = self._geom_to_freq(a, e, inc, body_gm)\n ret = Gravity._freq_to_geom(r, L, z, rdot, Ldot, vz, rc, Lc, zc, rdotc, \n Ldotc, zdotc, n, kappa, nu, eta2, chi2,\n alpha1, alpha2, alphasq)\n old_a = a\n (a, e, inc, long_peri, long_node, lam, \n rc, Lc, zc, rdotc, Ldotc, zdotc) = ret\n diff = np.abs(a-old_a)\n diffmax = np.max(diff[idx_to_use])\n if diffmax < tol:\n break\n if diffmax > old_diffmax:\n idx_to_use = np.where(diff > old_diff,False,True) & idx_to_use\n if not idx_to_use.any(): break\n if not announced:\n warnings.warn('geom_from_state() started diverging! ' +\n 'Tolerance met = %e' % diffmax)\n announced = True\n\n diff_of_diff = diff - old_diff\n bad_idx = diff_of_diff.argmax()\n warnings.warn('Bad index ' + str(bad_idx) +\n '; X = ' + str(x[bad_idx]) +\n '; Y = ' + str(y[bad_idx]) +\n '; Z =' + str(z[bad_idx]) +\n '; VX = ' + str(vx[bad_idx]) +\n '; VY = ' + str(vy[bad_idx]) +\n '; VZ = ' + str(vz[bad_idx]))\n old_diffmax = diffmax\n old_diff = diff\n\n return (a, e, inc, lam, long_peri, long_node)\n\n ####################################\n # Internal methods\n ####################################\n\n # Take the geometric osculating elements and create frequencies\n # Returns n, kappa, nu, eta2, chi2, alpha1, alpha2, alphasq\n # From Renner & Sicardy (2006) EQ 14-21\n\n def _geom_to_freq(self, a, e, inc, body_gm=0.):\n gmsum = self.gm + body_gm\n j2 = 0.\n j4 = 0.\n if len(self.jn) > 0:\n j2 = self.jn[0] * self.r2/a**2\n if len(self.jn) > 1:\n j4 = self.jn[1] * self.r2**2/a**4\n\n gm_a3 = gmsum / a**3\n sqrt_gm_a3 = np.sqrt(gm_a3)\n\n n = sqrt_gm_a3 * (1. + 3./4.*j2 - 15./16.*j4 -\n 9./32.*j2**2 + 45./64.*j2*j4 +\n 27./128.*j2**3 +\n 3.*j2*e**2 - 12.*j2*inc**2)\n\n kappa = sqrt_gm_a3 * (1. - 3./4.*j2 + 45./16.*j4 -\n 9./32.*j2**2 + 135./64.*j2*j4 -\n 27./128.*j2**3 - 9.*j2*inc**2)\n\n nu = sqrt_gm_a3 * (1. + 9./4.*j2 - 75./16.*j4 -\n 81./32.*j2**2 + 675./64.*j2*j4 +\n 729./128.*j2**3 +\n 6.*j2*e**2 - 51./4.*j2*inc**2)\n\n eta2 = gm_a3 * (1. - 2.*j2 + 75./8.*j4)\n\n chi2 = gm_a3 * (1. + 15./2.*j2 - 175./8.*j4)\n\n alpha1 = 1./3. * (2.*nu + kappa)\n alpha2 = 2.*nu - kappa\n alphasq = alpha1 * alpha2\n\n return (n, kappa, nu, eta2, chi2, alpha1, alpha2, alphasq)\n\n\n # Take the frequencies and convert them to cylindrical coordinates\n # Returns a, e, inc, long_peri, long_node, lam, rc, Lc, zc, rdotc, Ldotc, zdotc\n # From Renner & Sicardy (2006) EQ 36-41\n\n @staticmethod\n def _freq_to_geom(r, L, z, rdot, Ldot, zdot, rc, Lc, zc, rdotc, Ldotc, \n zdotc, n, kappa, nu, eta2, chi2, alpha1, alpha2, alphasq):\n kappa2 = kappa**2\n n2 = n**2\n\n # EQ 42-47\n a = (r-rc) / (1.-(Ldot-Ldotc-n)/(2.*n))\n\n e = np.sqrt(((Ldot-Ldotc-n)/(2.*n))**2 + ((rdot-rdotc)/(a*kappa))**2)\n\n inc = np.sqrt(((z-zc)/a)**2 + ((zdot-zdotc)/(a*nu))**2)\n\n lam = L - Lc - 2.*n/kappa*(rdot-rdotc)/(a*kappa)\n\n long_peri = (lam - Gravity._pos_arctan2(rdot-rdotc,\n a*kappa*(1.-(r-rc)/a))) % TWOPI\n\n long_node = (lam - Gravity._pos_arctan2(nu*(z-zc), zdot-zdotc)) % TWOPI\n\n # EQ 36-41\n rc = (a * e**2 * (3./2.*eta2/kappa2 - 1. - \n eta2/2./kappa2*np.cos(2.*(lam-long_peri))) +\n a * inc**2 * (3./4.*chi2/kappa2 - 1. + \n chi2/4./alphasq*np.cos(2.*(lam-long_node))))\n\n Lc = (e**2*(3./4. + eta2/2./kappa2)*n/kappa*np.sin(2.*(lam-long_peri)) - \n inc**2*chi2/4./alphasq*n/nu*np.sin(2.*(lam-long_node)))\n\n zc = a*inc*e*(chi2/2./kappa/alpha1*np.sin(2*lam-long_peri-long_node) - \n 3./2.*chi2/kappa/alpha2*np.sin(long_peri-long_node))\n\n rdotc = (a*e**2*eta2/kappa*np.sin(2.*(lam-long_peri)) - \n a*inc**2*chi2/2./alphasq*nu*np.sin(2.*(lam-long_node)))\n\n Ldotc = (e**2*n*(7./2. - 3.*eta2/kappa2 - kappa2/2./n2 + \n (3./2. + eta2/kappa2)*np.cos(2.*(lam-long_peri))) +\n inc**2*n*(2. - kappa2/2./n2 - 3./2.*chi2/kappa2 - \n chi2/2./alphasq*np.cos(2.*(lam-long_node))))\n\n zdotc = a*inc*e*(chi2*(kappa+nu)/2./kappa/\n alpha1*np.cos(2*lam-long_peri-long_node) + \n 3./2.*chi2*(kappa-nu)/kappa/alpha2*np.cos(long_peri-long_node))\n\n # EQ 30-35\n # r = a*(1. - e*np.cos(lam-long_peri)) + rc\n # \n # L = lam + 2*e*n/kappa*np.sin(lam-long_peri) + Lc\n # \n # z = a*inc*np.sin(lam-long_node) + zc\n # \n # rdot = a*e*kappa*np.sin(lam-long_peri) + rdotc\n # \n # Ldot = n*(1. + 2.*e*np.cos(lam-long_peri)) + Ldotc\n # \n # zdot = a*inc*nu*np.cos(lam-long_node) + zdotc\n\n return (a, e, inc, long_peri, long_node, lam,\n rc, Lc, zc, rdotc, Ldotc, zdotc)\n\n # A nicer version of arctan2\n @staticmethod\n def _pos_arctan2(y, x):\n return np.arctan2(y, x) % TWOPI \n\n################################################################################\n# Planetary gravity fields defined...\n################################################################################\n\n# From http://ssd.jpl.nasa.gov/?planet_phys_par\nG_MKS = 6.67428e-11 # m^3 kg^-1 s^-2\nG_CGS = 6.67428e-08 # cm^3 g^-1 s^-2\n\nG_PER_KG = G_MKS / 1.e9\nG_PER_G = G_CGS / 1.e15\n\n# From http://ssd.jpl.nasa.gov/?planet_phys_par\nSUN = Gravity(132712440018., [], 695500.)\n\n# From http://ssd.jpl.nasa.gov/?planet_phys_par\nMERCURY = Gravity(0.330104e24 * G_PER_KG, [], 2439.7 )\nVENUS = Gravity( 4.86732e24 * G_PER_KG, [], 6051.8 )\nEARTH = Gravity( 5.97219e24 * G_PER_KG, [], 6378.14)\nMARS = Gravity(0.641693e24 * G_PER_KG, [], 3396.19)\n\n# Earlier values from http://ssd.jpl.nasa.gov/?gravity_fields_op\nJUPITER_V1 = Gravity(126686535., [14696.43e-06, -587.14e-06, 34.25e-06], 71492.)\n#SATURN = Gravity( 37931208., [16290.71e-06, -935.83e-06, 86.14e-06], 60330.)\nSATURN_V1 = Gravity( 37931207.7, [16290.71e-06, -936.83e-06, 86.14e-06, -10.e-06], 60330.)\nURANUS_V1 = Gravity( 5793964., [ 3341.29e-06, -30.44e-06 ], 26200.)\nNEPTUNE_V1 = Gravity( 6835100., [ 3408.43e-06, -33.40e-06 ], 25225.)\n\n# Updated September 15, 2015 from http://ssd.jpl.nasa.gov/?gravity_fields_op\nJUPITER = Gravity(126686536.1, [14695.62e-06, -591.31e-06, 20.78e-06], 71492.)\nSATURN = Gravity( 37931208. , [16290.71e-06, -935.83e-06, 86.14e-06,\n -10.e-06], 60330.)\nURANUS = Gravity( 5793951.3, [ 3510.68e-06, -34.17e-06 ], 25559.)\nNEPTUNE = Gravity( 6835100. , [ 3408.43e-06, -33.40e-06 ], 25225.)\n\n# From http://arxiv.org/abs/0712.1261\nPLUTO_ONLY = Gravity(869.6, [], 1151.)\nPLUTO = PLUTO_ONLY\n\n# From http://ssd.jpl.nasa.gov/?sat_phys_par\nMOON = Gravity(4902.801, [], 1737.5)\n\nIO = Gravity(5959.916, [], 1821.6)\nEUROPA = Gravity(3202.739, [], 1560.8)\nGANYMEDE = Gravity(9887.834, [], 2631.2)\nCALLISTO = Gravity(7179.289, [], 2410.3)\n\nMIMAS = Gravity( 2.5026, [], 198.20)\nENCELADUS = Gravity( 7.2027, [], 252.10)\nTETHYS = Gravity( 41.2067, [], 533.00)\nDIONE = Gravity( 73.1146, [], 561.70)\nRHEA = Gravity( 153.9426, [], 764.30)\nTITAN = Gravity(8978.1382, [], 2574.73)\nHYPERION = Gravity( 0.3727, [], 135.00)\nIAPETUS = Gravity( 120.5038, [], 735.60)\nPHOEBE = Gravity( 0.5532, [], 106.50)\n\nMIRANDA = Gravity( 4.4, [], 235.8)\nARIEL = Gravity( 86.4, [], 578.9)\nUMBRIEL = Gravity( 81.5, [], 584.7)\nTITANIA = Gravity( 228.2, [], 788.9)\nOBERON = Gravity( 192.4, [], 761.4)\n\nTRITON = Gravity(1427.6, [], 1353.4)\nNEREID = Gravity( 2.06, [], 170.)\n\nCHARON = Gravity(105.9, [], 603.6)\n\n# Sets with relatively large mass ratios\nSUN_JUPITER = Gravity(SUN.gm + JUPITER.gm, [], SUN.rp)\n\nJUPITER_GALS = Gravity(JUPITER.gm + IO.gm + EUROPA.gm + GANYMEDE.gm +\n CALLISTO.gm, JUPITER.jn, JUPITER.rp)\n\nSATURN_TITAN = Gravity(SATURN.gm + TITAN.gm, SATURN.jn, SATURN.rp)\n\nPLUTO_CHARON_OLD = Gravity(PLUTO_ONLY.gm + CHARON.gm, [], PLUTO_ONLY.rp)\n\n################################################################################\n# Revised Pluto-Charon gravity\n#\n# Outside a ring of radius R, the gravity moments are -P2n(0).\n# J2 = 1/2; J4 = -3/8; J6 = 5/16; J8 = -35/128; J10 = 63/256\n# We can stop there.\n#\n# The gravity potential in the equatorial plane for one body is:\n# phi(r) = -GM/r (1 - SUM[ J2n (R/r)^(2n) P_2n(0) ]\n# = -GM/r + (J2 GM R^2 P_2(0)) / r^3\n# + (J4 GM R^4 P_4(0)) / r^5 + ...\n#\n# For two bodies with GM1, GM2, R1, R2, but the same J2n series...\n#\n# phi(r) = -(GM1 + GM2) / r\n# + (GM1 R1^2 + GM2 R2^2) (J2 P_2(0)) / r^3\n# + (GM1 R1^4 + GM2 R2^4) (J4 P_4(0)) / r^5 ...\n#\n# Scaling everything to GM = GM1 + GM2; R = R2:\n# J2' = J2 (GM1 (R1/R2)^2 + GM2) / (GM1 + GM2)\n# J4' = J4 (GM1 (R1/R2)^4 + GM2) / (GM1 + GM2)\n# etc.\n\nPLUTO_A = 19596. * CHARON.gm / (PLUTO.gm + CHARON.gm)\nCHARON_A = 19596. - PLUTO_A\nratio2 = (PLUTO_A / CHARON_A)**2\ngm1 = PLUTO_ONLY.gm\ngm2 = CHARON.gm\nPLUTO_CHARON_AS_RINGS = Gravity(gm1 + gm2,\n [ 1/2. * (gm1 * ratio2 + gm2) / (gm1 + gm2),\n -3/8. * (gm1 * ratio2**2 + gm2) / (gm1 + gm2),\n 5/16. * (gm1 * ratio2**3 + gm2) / (gm1 + gm2),\n -35/128. * (gm1 * ratio2**4 + gm2) / (gm1 + gm2),\n 63/256. * (gm1 * ratio2**5 + gm2) / (gm1 + gm2)], CHARON_A)\nPLUTO_CHARON = PLUTO_CHARON_AS_RINGS\n################################################################################\n\nLOOKUP = {\n \"SUN\": SUN,\n \"MERCURY\": MERCURY,\n \"VENUS\": VENUS,\n \"EARTH\": EARTH,\n \"MARS\": MARS,\n \"JUPITER\": JUPITER,\n \"SATURN\": SATURN,\n \"URANUS\": URANUS,\n \"NEPTUNE\": NEPTUNE,\n \"PLUTO_ONLY\": PLUTO_ONLY,\n \"PLUTO\": PLUTO_ONLY,\n \"MOON\": MOON,\n \"IO\": IO,\n \"EUROPA\": EUROPA,\n \"GANYMEDE\": GANYMEDE,\n \"CALLISTO\": CALLISTO,\n \"MIMAS\": MIMAS,\n \"ENCELADUS\": ENCELADUS,\n \"TETHYS\": TETHYS,\n \"DIONE\": DIONE,\n \"RHEA\": RHEA,\n \"TITAN\": TITAN,\n \"HYPERION\": HYPERION,\n \"IAPETUS\": IAPETUS,\n \"PHOEBE\": PHOEBE,\n \"MIRANDA\": MIRANDA,\n \"ARIEL\": ARIEL,\n \"UMBRIEL\": UMBRIEL,\n \"TITANIA\": TITANIA,\n \"OBERON\": OBERON,\n \"TRITON\": TRITON,\n \"NEREID\": NEREID,\n \"CHARON\": CHARON,\n \"SUN_JUPITER\": SUN_JUPITER,\n \"JUPITER_GALS\": JUPITER_GALS,\n \"SATURN_TITAN\": SATURN_TITAN,\n \"PLUTO_CHARON\": PLUTO_CHARON,\n \"SOLAR SYSTEM BARYCENTER\": SUN_JUPITER,\n \"SSB\": SUN_JUPITER,\n \"JUPITER BARYCENTER\": JUPITER_GALS,\n \"SATURN BARYCENTER\": SATURN_TITAN,\n \"URANUS BARYCENTER\": URANUS,\n \"NEPTUNE BARYCENTER\": NEPTUNE,\n \"PLUTO BARYCENTER\": PLUTO_CHARON\n}\n\n########################################\n# UNIT TESTS\n########################################\n\nimport unittest\n\nERROR_TOLERANCE = 1.e-15\n\nclass Test_Gravity(unittest.TestCase):\n\n def test_uncombo(self):\n\n # Testing scalars in a loop...\n tests = 100\n planets = [JUPITER, SATURN, URANUS, NEPTUNE]\n factors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]\n\n for test in range(tests):\n for obj in planets:\n for e in (0., 0.1):\n for i in (0., 0.1):\n a = obj.rp * 10. ** (np.random.rand() * 2.)\n for f in factors:\n b = obj.solve_a(obj.combo(a,f,e,i), f, e, i)\n c = abs((b - a) / a)\n self.assertTrue(c < ERROR_TOLERANCE)\n\n # PLUTO_CHARON with factors (1,0,0) and (0,0,1)\n for test in range(tests):\n for obj in [PLUTO_CHARON]:\n for e in (0., 0.1):\n for i in (0., 0.1):\n a = obj.rp * 10. ** (np.random.rand() * 2.)\n for f in [(1,0,0),(0,0,1)]:\n b = obj.solve_a(obj.combo(a,f,e,i), f, e, i)\n c = abs((b - a) / a)\n self.assertTrue(c < ERROR_TOLERANCE)\n\n # PLUTO_CHARON with factors (0,1,0) can have duplicated values...\n for test in range(tests):\n for obj in [PLUTO_CHARON]:\n a = obj.rp * 10. ** (np.random.rand() * 2.)\n if obj.kappa2(a) < 0.: continue # this would raise RuntimeError\n\n for f in [(0,1,0)]:\n combo1 = obj.combo(a,f)\n b = obj.solve_a(combo1, f)\n combo2 = obj.combo(b,f)\n c = abs((combo2 - combo1) / combo1)\n self.assertTrue(c < ERROR_TOLERANCE)\n\n # Testing a 100x100 array\n for obj in planets:\n a = obj.rp * 10. ** (np.random.rand(100,100) * 2.)\n for e in (0., 0.1):\n for i in (0., 0.1):\n for f in factors:\n b = obj.solve_a(obj.combo(a,f,e,i), f, e, i)\n c = abs((b - a) / a)\n self.assertTrue(np.all(c < ERROR_TOLERANCE))\n\n # Testing with first-order cancellation\n factors = [(1, -1, 0), (1, 0, -1), (0, 1, -1)]\n planets = [JUPITER, SATURN, URANUS, NEPTUNE]\n\n for obj in planets:\n a = obj.rp * 10. ** (np.random.rand(100,100) * 2.)\n for f in factors:\n b = obj.solve_a(obj.combo(a, f), f)\n c = abs((b - a) / a)\n self.assertTrue(np.all(c < ERROR_TOLERANCE))\n\n # Testing with second-order cancellation\n factors = [(2, -1, -1)]\n planets = [JUPITER, SATURN, URANUS, NEPTUNE]\n\n for obj in planets:\n a = obj.rp * 10. ** (np.random.rand(100,100) * 2.)\n for f in factors:\n b = obj.solve_a(obj.combo(a, f), f)\n c = abs((b - a) / a)\n self.assertTrue(np.all(c < ERROR_TOLERANCE))\n\nif __name__ == '__main__':\n unittest.main()\n\n################################################################################\n"
] |
[
[
"numpy.minimum",
"numpy.sqrt",
"numpy.arctan2",
"numpy.max",
"numpy.all",
"numpy.polyval",
"numpy.where",
"numpy.stack",
"numpy.sin",
"numpy.asfarray",
"numpy.zeros",
"numpy.arccos",
"numpy.random.rand",
"numpy.broadcast_arrays",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.cos",
"numpy.shape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leoalfonso/dit
|
[
"e7d5f680b3f170091bb1e488303f4255eeb11ef4",
"e7d5f680b3f170091bb1e488303f4255eeb11ef4",
"e7d5f680b3f170091bb1e488303f4255eeb11ef4",
"e7d5f680b3f170091bb1e488303f4255eeb11ef4",
"e7d5f680b3f170091bb1e488303f4255eeb11ef4"
] |
[
"dit/npdist.py",
"dit/inference/estimators.py",
"dit/pid/imin.py",
"dit/algorithms/maxentropy.py",
"dit/profiles/marginal_utility_of_information.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule defining NumPy array-based distribution classes.\n\nOne of the features of joint distributions is that we can marginalize them.\nThis requires that we are able to construct smaller outcomes from larger\noutcomes. For example, an outcome like '10101' might become '010' if the first\nand last random variables are marginalized. Given the alphabet of the joint\ndistribution, we can construct a tuple such as ('0','1','0') using\nitertools.product, but we really want an outcome which is a string. For\ntuples and other similar containers, we just pass the tuple to the outcome\nconstructor. This technique does not work for strings, as str(('0','1','0'))\nyields \"('0', '1', '0')\". So we have to handle strings separately.\n\nNote:\n For dictionaries...\n \"k in d\" being True means d[k] is a valid operation.\n \"k in d\" being False means d[k] is a KeyError.\n d[k] describes the underlying data structure.\n __in__ describes the underlying data structure.\n __iter__ describes the underlying data structure.\n __len__ describes the underlying data structure.\n For default dictionaries...\n \"k in d\" being True means d[k] is a valid operation.\n \"k in d\" being False means d[k] will modify the data structure to\n make the operation valid. So \"k in d\" is True afterwards.\n d[k] describes the underlying data structure.\n __in__ describes the underlying data structure.\n __iter__ describes the underlying data structure.\n __len__ describes the underlying data structure.\n For distributions...\n \"e in d\" being True means d[e] is a valid operation.\n \"e in d\" being False says nothing about d[e].\n d[e] will be valid if e is in the sample space.\n d[e] will raise an InvalidOutcome if e is not in the sample space.\n d[e] does not describe the underlying data structure.\n It provides a view of the dense data structure.\n With defaultdict, if e not in d, then d[e] will add it.\n With distributions, we don't want the pmf changing size\n just because we queried it. The size will change only on\n assignment.\n __in__ describes the underlying data structure.\n __iter__ describes the underlying data structure.\n __len__ describes the underlying data structure.\n\nFor scalar distributions, the sample space is the alphabet and the alphabet\nis a single set. For (joint) distributions, the sample space is provided\nat initialization and the alphabet is a tuple of alphabets for each random\nvariable. The alphabet for each random variable is a tuple.\n\nAs of now, dit does not support mixed-type alphabets within a single r.v.\nSo you can have outcomes like:\n\n (0, '0'), (1, '1')\n\nbut not like:\n\n (0, '0'), (1, 1)\n\nThis has to do with sorting the alphabets. Probably this can be relaxed.\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom operator import itemgetter\nimport itertools\n\nimport numpy as np\nfrom six.moves import map, range, zip # pylint: disable=redefined-builtin\n\nfrom .npscalardist import ScalarDistribution\n\nfrom .helpers import (\n construct_alphabets,\n get_outcome_ctor,\n get_product_func,\n parse_rvs,\n reorder,\n RV_MODES,\n)\n\nfrom .samplespace import SampleSpace, CartesianProduct\n\nfrom .exceptions import (\n InvalidDistribution, InvalidOutcome, ditException\n)\nfrom .math import get_ops, LinearOperations\nfrom .params import ditParams\n\n\ndef _make_distribution(outcomes, pmf, base,\n sample_space=None, prng=None, sparse=True):\n \"\"\"\n An unsafe, but faster, initialization for distributions.\n\n If used incorrectly, the data structure will be inconsistent.\n\n This function can be useful when you are creating many distributions\n in a loop and can guarantee that:\n\n 0) all outcomes are of the same type (eg tuple, str) and length.\n 1) the sample space is in the desired order.\n 1) outcomes and pmf are in the same order as the sample space.\n [Thus, `pmf` should not be a dictionary.]\n\n This function will not order the sample space, nor will it reorder outcomes\n or pmf. It will not forcibly make outcomes and pmf to be sparse or dense.\n It will simply declare the distribution to be sparse or dense. The\n distribution is not validated either.\n\n Returns\n -------\n d : Distribution\n The new distribution.\n\n \"\"\"\n d = Distribution.__new__(Distribution)\n\n # Call init function of BaseDistribution, not of Distribution.\n # This sets the prng.\n super(ScalarDistribution, d).__init__(prng)\n\n d._meta['is_joint'] = True\n d._meta['is_numerical'] = True\n d._meta['is_sparse'] = None\n\n if base is None:\n # Assume default base.\n base = ditParams['base']\n d.ops = get_ops(base)\n\n ## Set the outcome class, ctor, and product function.\n ## Assumption: the class of each outcome is the same.\n klass = outcomes[0].__class__\n d._outcome_class = klass\n d._outcome_ctor = get_outcome_ctor(klass)\n d._product = get_product_func(klass)\n\n # Force the distribution to be numerical and a NumPy array.\n d.pmf = np.asarray(pmf, dtype=float)\n\n # Tuple outcomes, and an index.\n d.outcomes = tuple(outcomes)\n d._outcomes_index = dict(zip(outcomes, range(len(outcomes))))\n\n # Alphabet\n d.alphabet = tuple(construct_alphabets(outcomes))\n\n # Sample space.\n if sample_space is None:\n d._sample_space = CartesianProduct(d.alphabet, d._product)\n elif isinstance(sample_space, SampleSpace):\n d._sample_space = sample_space\n else:\n d._sample_space = SampleSpace(outcomes)\n\n # Set the mask\n d._mask = d._new_mask()\n\n d._meta['is_sparse'] = sparse\n d.rvs = [[i] for i in range(d.outcome_length())]\n\n return d\n\n\nclass Distribution(ScalarDistribution):\n \"\"\"\n A numerical distribution for joint random variables.\n\n Meta Properties\n ---------------\n is_joint\n Boolean specifying if the pmf represents a joint distribution.\n\n is_numerical\n Boolean specifying if the pmf represents numerical values or not.\n The values could be symbolic, for example.\n\n is_sparse : bool\n `True` if `outcomes` and `pmf` represent a sparse distribution.\n\n Private Attributes\n ------------------\n _mask : tuple\n A tuple of booleans specifying if the corresponding random variable\n has been masked or not.\n\n _meta : dict\n A dictionary containing the meta information, described above.\n\n _outcome_class : class\n The class of all outcomes in the distribution.\n\n _outcome_ctor : callable\n A callable responsible for converting tuples to outcomes.\n\n _outcomes_index : dict\n A dictionary mapping outcomes to their index in self.outcomes.\n\n _product : function\n A specialized product function, similar to itertools.product. The\n primary difference is that instead of yielding tuples, this product\n function will yield objects which are of the same type as the outcomes.\n\n _rvs : dict\n A dictionary mapping random variable names to their index into the\n outcomes of the distribution.\n\n _sample_space : SampleSpace\n The sample space of the distribution.\n\n Public Attributes\n -----------------\n alphabet : tuple\n A tuple representing the alphabet of the joint random variable. The\n elements of the tuple are tuples, each of which represents the ordered\n alphabet for a single random variable.\n\n outcomes : tuple\n The outcomes of the probability distribution.\n\n ops : Operations instance\n A class which manages addition and multiplication operations for log\n and linear probabilities.\n\n pmf : array-like\n The probability mass function for the distribution. The elements of\n this array are in a one-to-one correspondence with those in `outcomes`.\n\n prng : RandomState\n A pseudo-random number generator with a `rand` method which can\n generate random numbers. For now, this is assumed to be something\n with an API compatibile to NumPy's RandomState class. This attribute\n is initialized to equal dit.math.prng.\n\n Public Methods\n --------------\n from_distribution\n Alternative constructor from an existing distribution.\n\n atoms\n Returns the atoms of the probability space.\n\n coalesce\n Returns a new joint distribution after coalescing random variables.\n\n copy\n Returns a deep copy of the distribution.\n\n outcome_length\n Returns the length of the outcomes in the distribution.\n\n sample_space\n Returns an iterator over the outcomes in the sample space.\n\n get_base\n Returns the base of the distribution.\n\n get_rv_names\n Returns the names of the random variables.\n\n has_outcome\n Returns `True` is the distribution has `outcome` in the sample space.\n\n is_dense\n Returns `True` if the distribution is dense.\n\n is_homogeneous\n Returns `True` if the alphabet for each random variable is the same.\n\n is_joint\n Returns `True` if the distribution is a joint distribution.\n\n is_log\n Returns `True` if the distribution values are log probabilities.\n\n is_numerical\n Returns `True` if the distribution values are numerical.\n\n is_sparse\n Returns `True` if the distribution is sparse.\n\n marginal\n Returns a marginal distribution of the specified random variables.\n\n marginalize\n Returns a marginal distribution after marginalizing random variables.\n\n make_dense\n Add all null outcomes to the pmf.\n\n make_sparse\n Remove all null outcomes from the pmf.\n\n normalize\n Normalizes the distribution.\n\n sample\n Returns a sample from the distribution.\n\n set_base\n Changes the base of the distribution, in-place.\n\n set_rv_names\n Sets the names of the random variables.\n\n to_string\n Returns a string representation of the distribution.\n\n validate\n A method to validate that the distribution is valid.\n\n zipped\n Returns an iterator over (outcome, probability) tuples. The probability\n could be a log probability or a linear probability.\n\n Implementation Notes\n --------------------\n The outcomes and pmf of the distribution are stored as a tuple and a NumPy\n array, respectively. The sequences can both be sparse or dense. By sparse,\n we do not mean that the representation is a NumPy sparse array. Rather,\n we mean that the sequences need not contain every outcome in the sample\n space. The order of the outcomes and probabilities will always match the\n order of the sample space, even though their length might not equal the\n length of the sample space.\n\n \"\"\"\n ## Unadvertised attributes\n _sample_space = None\n _mask = None\n _meta = None\n _outcome_class = None\n _outcome_ctor = None\n _outcomes_index = None\n _product = None\n _rvs = None\n _rv_mode = 'indices'\n\n ## Advertised attributes.\n alphabet = None\n outcomes = None\n ops = None\n pmf = None\n prng = None\n\n def __init__(self, outcomes, pmf=None, sample_space=None, base=None,\n prng=None, sort=True, sparse=True, trim=True,\n validate=True):\n \"\"\"\n Initialize the distribution.\n\n Parameters\n ----------\n outcomes : sequence, dict\n The outcomes of the distribution. If `outcomes` is a dictionary,\n then the keys are used as `outcomes`, and the values of\n the dictionary are used as `pmf` instead. The values will not be\n used if probabilities are passed in via `pmf`. Outcomes must be\n hashable, orderable, sized, iterable containers. The length of an\n outcome must be the same for all outcomes, and every outcome must\n be of the same type.\n\n pmf : sequence, None\n The outcome probabilities or log probabilities. `pmf` can be None\n only if `outcomes` is a dict.\n\n sample_space : sequence, CartesianProduct\n A sequence representing the sample space, and corresponding to the\n complete set of possible outcomes. The order of the sample space\n is important. If `None`, then the outcomes are used to determine\n a Cartesian product sample space instead.\n\n base : float, str, None\n If `pmf` specifies log probabilities, then `base` should specify\n the base of the logarithm. If 'linear', then `pmf` is assumed to\n represent linear probabilities. If `None`, then the value for\n `base` is taken from ditParams['base'].\n\n prng : RandomState\n A pseudo-random number generator with a `rand` method which can\n generate random numbers. For now, this is assumed to be something\n with an API compatibile to NumPy's RandomState class. This attribute\n is initialized to equal dit.math.prng.\n\n sort : bool\n If `True`, then each random variable's alphabets are sorted before\n they are finalized. Usually, this is desirable, as it normalizes\n the behavior of distributions which have the same sample spaces\n (when considered as a set). Note that addition and multiplication\n of distributions is defined only if the sample spaces are\n compatible.\n\n sparse : bool\n Specifies the form of the pmf. If `True`, then `outcomes` and `pmf`\n will only contain entries for non-null outcomes and probabilities,\n after initialization. The order of these entries will always obey\n the order of `sample_space`, even if their number is not equal to\n the size of the sample space. If `False`, then the pmf will be\n dense and every outcome in the sample space will be represented.\n\n trim : bool\n Specifies if null-outcomes should be removed from pmf when\n `make_sparse()` is called (assuming `sparse` is `True`) during\n initialization.\n\n validate : bool\n If `True`, then validate the distribution. If `False`, then assume\n the distribution is valid, and perform no checks.\n\n Raises\n ------\n InvalidDistribution\n If the length of `values` and `outcomes` are unequal.\n If no outcomes can be obtained from `pmf` and `outcomes` is `None`.\n\n See :meth:`validate` for a list of other potential exceptions.\n\n \"\"\"\n # Note, we are not calling ScalarDistribution.__init__\n # Instead, we want to call BaseDistribution.__init__.\n # And BaseDistribution is the parent of ScalarDistribution.\n # We do this because we want to init the prng AND ignore everything\n # that ScalarDistribution does.\n super(ScalarDistribution, self).__init__(prng) # pylint: disable=bad-super-call\n\n # Set *instance* attributes\n self._meta['is_joint'] = True\n self._meta['is_numerical'] = True\n self._meta['is_sparse'] = None\n\n # Do any checks/conversions necessary to get the parameters.\n outcomes, pmf = self._init(outcomes, pmf, base)\n\n if len(outcomes) == 0 and sample_space is None:\n msg = '`outcomes` must be nonempty if no sample space is given'\n raise InvalidDistribution(msg)\n\n if isinstance(sample_space, SampleSpace):\n if not sample_space._meta['is_joint']:\n msg = '`sample_space` must be a joint sample space.'\n raise InvalidDistribution(msg)\n\n if sort:\n sample_space.sort()\n self._outcome_class = sample_space._outcome_class\n self._outcome_ctor = sample_space._outcome_ctor\n self._product = sample_space._product\n self._sample_space = sample_space\n if isinstance(sample_space, CartesianProduct):\n alphabets = sample_space.alphabets\n else:\n alphabets = construct_alphabets(sample_space._samplespace)\n else:\n if sample_space is None:\n ss = outcomes\n else:\n ss = sample_space\n\n alphabets = construct_alphabets(ss)\n if sort:\n alphabets = tuple(map(tuple, map(sorted, alphabets)))\n\n ## Set the outcome class, ctor, and product function.\n ## Assumption: the class of each outcome is the same.\n klass = ss[0].__class__\n self._outcome_class = klass\n self._outcome_ctor = get_outcome_ctor(klass)\n self._product = get_product_func(klass)\n\n if sample_space is None:\n self._sample_space = CartesianProduct(alphabets, self._product)\n else:\n self._sample_space = SampleSpace(ss)\n\n # Sort everything to match the order of the sample space.\n ## Question: Using sort=False seems very strange and supporting it\n ## makes things harder, since we can't assume the outcomes\n ## and sample space are sorted. Is there a valid use case\n ## for an unsorted sample space?\n if sort and len(outcomes) > 0:\n outcomes, pmf, index = reorder(outcomes, pmf, self._sample_space)\n else:\n index = dict(zip(outcomes, range(len(outcomes))))\n\n # Force the distribution to be numerical and a NumPy array.\n self.pmf = np.asarray(pmf, dtype=float)\n\n\n # Tuple outcomes, and an index.\n self.outcomes = tuple(outcomes)\n self._outcomes_index = index\n\n self.alphabet = tuple(alphabets)\n\n self.rvs = [[i] for i in range(self.outcome_length())]\n\n # Mask\n self._mask = self._new_mask()\n\n if sparse:\n self.make_sparse(trim=trim)\n else:\n self.make_dense()\n\n if validate:\n self.validate()\n\n def _init(self, outcomes, pmf, base):\n \"\"\"\n Pre-initialization with various sanity checks.\n\n \"\"\"\n # Note: We've changed the behavior of _init here.\n # In ScalarDistribution it returns a 3-tuple. Here, a 2-tuple.\n\n # Attempt to grab outcomes and pmf from a dictionary\n try:\n outcomes_ = tuple(outcomes.keys())\n pmf_ = tuple(outcomes.values())\n except AttributeError:\n pass\n else:\n outcomes = outcomes_\n if pmf is not None:\n msg = '`pmf` must be `None` if `outcomes` is a dict.'\n raise InvalidDistribution(msg)\n pmf = pmf_\n\n if pmf is None:\n msg = '`pmf` was `None` but `outcomes` was not a dict.'\n raise InvalidDistribution(msg)\n\n # Make sure pmf and outcomes are sequences\n try:\n len(outcomes)\n len(pmf)\n except TypeError:\n raise TypeError('`outcomes` and `pmf` must be sequences.')\n\n if len(pmf) != len(outcomes):\n msg = \"Unequal lengths for `pmf` and `outcomes`\"\n raise InvalidDistribution(msg)\n\n # reorder() and other functions require that outcomes be indexable. So\n # we make sure it is. We must check for zero length outcomes since, in\n # principle, you can initialize with a 0-length `pmf` and `outcomes`.\n if len(outcomes):\n try:\n outcomes[0]\n except TypeError:\n raise ditException('`outcomes` must be indexable.')\n\n # Determine if the pmf represents log probabilities or not.\n if base is None:\n # Provide help for obvious case of linear probabilities.\n from .validate import is_pmf\n if is_pmf(np.asarray(pmf, dtype=float), LinearOperations()):\n base = 'linear'\n else:\n base = ditParams['base']\n self.ops = get_ops(base)\n\n return outcomes, pmf\n\n def _new_mask(self, from_mask=None, complement=None):\n \"\"\"\n Creates a new mask for the distribution.\n\n Parameters\n ----------\n from_mask : iter | None\n Create a mask from an existing mask. If ``None``, then a mask\n will be created which is ``False`` for each random variable.\n\n complement : bool\n If ``True``, invert the mask that would have been built.\n This includes inverting the mask when ``from_mask=None``.\n\n Returns\n -------\n mask : tuple\n The newly created mask.\n\n \"\"\"\n if from_mask is None:\n L = self.outcome_length(masked=False)\n mask = [False for _ in range(L)]\n else:\n mask = [bool(b) for b in from_mask]\n\n if complement:\n mask = [not b for b in mask]\n\n mask = tuple(mask)\n\n self._mask = mask\n return mask\n\n @classmethod\n def from_distribution(cls, dist, base=None, prng=None):\n \"\"\"\n Returns a new Distribution from an existing distribution.\n\n Parameters\n ----------\n dist : Distribution, ScalarDistribution\n The existing distribution\n\n base : 'linear', 'e', or float\n Optionally, change the base of the new distribution. If `None`,\n then the new distribution will have the same base as the existing\n distribution.\n\n prng : RandomState\n A pseudo-random number generator with a `rand` method which can\n generate random numbers. For now, this is assumed to be something\n with an API compatible to NumPy's RandomState class. If `None`,\n then we initialize to dit.math.prng. Importantly, we do not\n copy the prng of the existing distribution. For that, see copy().\n\n Returns\n -------\n d : Distribution\n The new distribution.\n\n \"\"\"\n if dist.is_joint():\n if not isinstance(dist, ScalarDistribution):\n raise NotImplementedError\n else:\n # Assume it is a Distribution.\n # Easiest way is to just copy it and then override the prng.\n d = dist.copy(base=base)\n else:\n if not isinstance(dist, ScalarDistribution):\n raise NotImplementedError\n else:\n # Assume it is a ScalarDistribution\n from .convert import SDtoD\n d = SDtoD(dist)\n if base is not None:\n d.set_base(base)\n\n if prng is None:\n # Do not use copied prng.\n d.prng = np.random.RandomState()\n else:\n # Use specified prng.\n d.prng = prng\n\n return d\n\n @classmethod\n def from_ndarray(cls, ndarray, base=None, prng=None):\n \"\"\"\n Construct a Distribution from a pmf stored as an ndarray.\n\n Parameters\n ----------\n ndarray : np.ndarray\n pmf in the form of an ndarray, where each axis is a variable and\n the index along that axis is the variable's value.\n base : 'linear', 'e', or float\n Optionally, specify the base of the new distribution. If `None`,\n then the new distribution will be assumed to have a linear\n distribution.\n prng : RandomState\n A pseudo-random number generator with a `rand` method which can\n generate random numbers. For now, this is assumed to be something\n with an API compatible to NumPy's RandomState class. If `None`,\n then we initialize to dit.math.prng.\n\n Returns\n -------\n d : Distribution\n The distribution resulting from interpreting `ndarray` as a pmf.\n\n \"\"\"\n return cls(*zip(*np.ndenumerate(ndarray)), base=base, prng=prng)\n\n @classmethod\n def from_rv_discrete(cls, ssrv, prng=None):\n \"\"\"\n Create a Distribution from a scipy.states.rv_discrete instance.\n\n Parameters\n ----------\n ssrv : scipy.stats.rv_discrete\n The random variable to convert to a dit.Distribution.\n prng : RandomState\n A pseudo-random number generator with a `rand` method which can\n generate random numbers. For now, this is assumed to be something\n with an API compatibile to NumPy's RandomState class. If `None`,\n then we initialize to dit.math.prng.\n\n Returns\n -------\n d : Distribution\n A Distribution representation of `ssrv`.\n \"\"\"\n sd = ScalarDistribution.from_rv_discrete(ssrv=ssrv, prng=prng)\n return cls.from_distribution(sd)\n\n def __setitem__(self, outcome, value):\n \"\"\"\n Sets the probability associated with `outcome`.\n\n Parameters\n ----------\n outcome : outcome\n Any hashable and equality comparable object in the sample space.\n If `outcome` does not exist in the sample space, then an\n InvalidOutcome exception is raised.\n value : float\n The probability or log probability of the outcome.\n\n Returns\n -------\n p : float\n The probability (or log probability) of the outcome.\n\n Raises\n ------\n InvalidOutcome\n If `outcome` does not exist in the sample space.\n\n Notes\n -----\n Setting the value of the outcome never deletes the outcome, even if the\n value is equal to the null probabilty. After a setting operation,\n the outcome will always exist in `outcomes` and `pmf`.\n\n See Also\n --------\n __delitem__\n\n \"\"\"\n if not self.has_outcome(outcome, null=True):\n # Then, the outcome is not in the sample space.\n raise InvalidOutcome(outcome)\n\n idx = self._outcomes_index.get(outcome, None)\n new_outcome = idx is None\n\n if not new_outcome:\n # If the distribution is dense, we will always be here.\n # If the distribution is sparse, then we are here for an existing\n # outcome. In the sparse case, we *could* delete the outcome\n # if the value was zero, but we have choosen to let setting always\n # \"set\" and deleting always \"delete\".\n self.pmf[idx] = value\n else:\n # Thus, the outcome is new in a sparse distribution. Even if the\n # value is zero, we still set the value and add it to pmf.\n\n # 1. Add the new outcome and probability\n self.outcomes = self.outcomes + (outcome,)\n self._outcomes_index[outcome] = len(self.outcomes) - 1\n pmf = [p for p in self.pmf] + [value]\n\n # 2. Reorder ### This call is different from Distribution\n outcomes, pmf, index = reorder(self.outcomes, pmf,\n self._sample_space)\n\n # 3. Store\n self.outcomes = tuple(outcomes)\n self._outcomes_index = index\n self.pmf = np.array(pmf, dtype=float)\n\n def _validate_outcomes(self):\n \"\"\"\n Returns `True` if the outcomes are valid.\n\n Valid means each outcome is in the sample space (and thus of the\n proper class and proper length) and also that the outcome class\n supports the Sequence idiom.\n\n Returns\n -------\n v : bool\n `True` if the outcomes are valid.\n\n Raises\n ------\n InvalidOutcome\n When an outcome is not in the sample space.\n\n \"\"\"\n from .validate import validate_sequence\n\n v = super(Distribution, self)._validate_outcomes()\n # If we survived, then all outcomes have the same class.\n # Now, we just need to make sure that class is a sequence.\n v &= validate_sequence(self.outcomes[0])\n return v\n\n def coalesce(self, rvs, rv_mode=None, extract=False):\n \"\"\"\n Returns a new joint distribution after coalescing random variables.\n\n Given n lists of random variables in the original joint distribution,\n the coalesced distribution is a joint distribution over n random\n variables. Each random variable is a coalescing of random variables\n in the original joint distribution.\n\n Parameters\n ----------\n rvs : sequence\n A sequence whose elements are also sequences. Each inner sequence\n defines a random variable in the new distribution as a combination\n of random variables in the original distribution. The length of\n `rvs` must be at least one. The inner sequences need not be\n pairwise mutually exclusive with one another, and each can contain\n repeated random variables.\n rv_mode : str, None\n Specifies how to interpret the elements of `rvs`. Valid options\n are: {'indices', 'names'}. If equal to 'indices', then the elements\n of `rvs` are interpreted as random variable indices. If equal to\n 'names', the the elements are interpreted as random variable names.\n If `None`, then the value of `dist._rv_mode` is consulted.\n extract : bool\n If the length of `rvs` is 1 and `extract` is `True`, then instead\n of the new outcomes being 1-tuples, we extract the sole element to\n create a joint distribution over the random variables in `rvs[0]`.\n\n Returns\n -------\n d : distribution\n The coalesced distribution.\n\n Examples\n --------\n If we have a joint distribution ``d`` over 3 random variables such as:\n A = (X,Y,Z)\n and would like a new joint distribution over 6 random variables:\n B = (X,Y,Z,X,Y,Z)\n then this is achieved as:\n >>> B = d.coalesce([[0,1,2,0,1,2]], extract=True)\n\n If you want:\n B = ((X,Y), (Y,Z))\n Then you do:\n >>> B = d.coalesce([[0,1],[1,2]])\n\n Notes\n -----\n Generally, the outcomes of the new distribution will be tuples instead\n of matching the outcome class of the original distribution. This is\n because some outcome classes are not recursive containers. For example,\n one cannot have a string of strings where each string consists of more\n than one character. Note however, that it is perfectly valid to have\n a tuple of tuples. Either way, the elements within each tuple of the\n new distribution will still match the outcome class of the original\n distribution.\n\n See Also\n --------\n marginal, marginalize\n\n \"\"\"\n from array import array\n\n # We allow repeats and want to keep the order. We don't need the names.\n parse = lambda rv: parse_rvs(self, rv, rv_mode=rv_mode,\n unique=False, sort=False)[1]\n indexes = [parse(rv) for rv in rvs]\n\n # Determine how new outcomes are constructed.\n if len(rvs) == 1 and extract:\n ctor_o = lambda x: x[0]\n else:\n ctor_o = tuple\n if extract:\n raise Exception('Cannot extract with more than one rv.')\n\n # Determine how elements of new outcomes are constructed.\n ctor_i = self._outcome_ctor\n\n # Build the distribution.\n factory = lambda: array('d')\n d = defaultdict(factory)\n for outcome, p in self.zipped():\n # Build a list of inner outcomes. \"c\" stands for \"constructed\".\n c_outcome = [ctor_i([outcome[i] for i in rv]) for rv in indexes]\n # Build the outer outcome from the inner outcomes.\n c_outcome = ctor_o(c_outcome)\n d[c_outcome].append(p)\n\n outcomes = tuple(d.keys())\n pmf = map(np.frombuffer, d.values())\n pmf = map(self.ops.add_reduce, pmf)\n pmf = tuple(pmf)\n\n # Preserve the sample space during coalescing.\n sample_spaces = [self._sample_space.coalesce([idxes], extract=True)\n for idxes in indexes]\n if isinstance(self._sample_space, CartesianProduct):\n sample_space = CartesianProduct(sample_spaces,\n product=itertools.product)\n if extract:\n sample_space = sample_space.alphabets[0]\n else:\n if extract:\n # There is only one sample space: len(indexes) = 1\n sample_space = sample_spaces[0]\n else:\n sample_space = list(zip(*sample_spaces))\n\n d = Distribution(outcomes, pmf,\n base=self.get_base(),\n sort=True,\n sample_space=sample_space,\n sparse=self.is_sparse(),\n validate=False)\n\n # We do not set the rv names, since these are new random variables.\n\n # Set the mask\n L = len(indexes)\n d._mask = tuple(False for _ in range(L))\n\n return d\n\n def condition_on(self, crvs, rvs=None, rv_mode=None, extract=False):\n \"\"\"\n Returns distributions conditioned on random variables ``crvs``.\n\n Optionally, ``rvs`` specifies which random variables should remain.\n\n NOTE: Eventually this will return a conditional distribution.\n\n Parameters\n ----------\n crvs : list\n The random variables to condition on.\n rvs : list, None\n The random variables for the resulting conditional distributions.\n Any random variable not represented in the union of ``crvs`` and\n ``rvs`` will be marginalized. If ``None``, then every random\n variable not appearing in ``crvs`` is used.\n rv_mode : str, None\n Specifies how to interpret ``crvs`` and ``rvs``. Valid options are:\n {'indices', 'names'}. If equal to 'indices', then the elements\n of ``crvs`` and ``rvs`` are interpreted as random variable indices.\n If equal to 'names', the the elements are interpreted as random\n varible names. If ``None``, then the value of ``self._rv_mode`` is\n consulted, which defaults to 'indices'.\n extract : bool\n If the length of either ``crvs`` or ``rvs`` is 1 and ``extract`` is\n ``True``, then instead of the new outcomes being 1-tuples, we\n extract the sole element to create scalar distributions.\n\n Returns\n -------\n cdist : dist\n The distribution of the conditioned random variables.\n dists : list of distributions\n The conditional distributions for each outcome in ``cdist``.\n\n Examples\n --------\n First we build a distribution P(X,Y,Z) representing the XOR logic gate.\n\n >>> pXYZ = dit.example_dists.Xor()\n >>> pXYZ.set_rv_names('XYZ')\n\n We can obtain the conditional distributions P(X,Z|Y) and the marginal\n of the conditioned variable P(Y) as follows::\n\n >>> pY, pXZgY = pXYZ.condition_on('Y')\n\n If we specify ``rvs='Z'``, then only 'Z' is kept and thus, 'X' is\n marginalized out::\n\n >>> pY, pZgY = pXYZ.condition_on('Y', rvs='Z')\n\n We can condition on two random variables::\n\n >>> pXY, pZgXY = pXYZ.condition_on('XY')\n\n The equivalent call using indexes is:\n\n >>> pXY, pZgXY = pXYZ.condition_on([0, 1], rv_mode='indexes')\n\n \"\"\"\n crvs, cindexes = parse_rvs(self, crvs, rv_mode, unique=True, sort=True)\n if rvs is None:\n indexes = set(range(self.outcome_length())) - set(cindexes)\n else:\n rvs, indexes = parse_rvs(self, rvs, rv_mode, unique=True, sort=True)\n\n union = set(cindexes).union(indexes)\n if len(union) != len(cindexes) + len(indexes):\n raise ditException('`crvs` and `rvs` must have no intersection.')\n\n # Marginalize the random variables not in crvs or rvs\n if len(union) < self.outcome_length():\n mapping = dict(zip(sorted(union), range(len(union))))\n d = self.marginal(union, rv_mode=RV_MODES.INDICES)\n # Now we need to shift the indices to their new index values.\n cindexes = [mapping[idx] for idx in cindexes]\n indexes = [mapping[idx] for idx in indexes]\n else:\n # Make a copy so we don't have to worry about changing the input\n # distribution when we make it sparse.\n d = self.copy()\n\n # It's just easier to not worry about conditioning on zero probs.\n sparse = d.is_sparse()\n d.make_sparse()\n\n # Note that any previous mask of d from the marginalization will be\n # ignored when we take new marginals. This is desirable here.\n\n cdist = d.marginal(cindexes, rv_mode=RV_MODES.INDICES)\n dist = d.marginal(indexes, rv_mode=RV_MODES.INDICES)\n sample_space = dist._sample_space\n rv_names = dist.get_rv_names()\n\n ops = d.ops\n base = ops.get_base()\n ctor = d._outcome_ctor\n\n # A list of indexes of conditioned outcomes for each joint outcome.\n # These are the indexes of w in the pmf of P(w) for each ws in P(ws).\n cidx = cdist._outcomes_index\n coutcomes = [cidx[ctor([o[i] for i in cindexes])] for o in d.outcomes]\n\n # A list of indexes of outcomes for each joint outcome.\n # These are the indexes of s in the pmf of P(s) for each ws in P(ws).\n idx = dist._outcomes_index\n outcomes = [idx[ctor([o[i] for i in indexes])] for o in d.outcomes]\n\n cprobs = np.array([ops.invert(cdist.pmf[i]) for i in coutcomes])\n probs = ops.mult(d.pmf, cprobs)\n\n # Now build the distributions\n pmfs = np.empty((len(cdist), len(dist)), dtype=float)\n pmfs.fill(ops.zero)\n for i, (coutcome, outcome) in enumerate(zip(coutcomes, outcomes)):\n pmfs[coutcome, outcome] = probs[i]\n dists = [Distribution(dist.outcomes, pmfs[i], sparse=sparse,\n base=base, sample_space=sample_space, validate=False)\n for i in range(pmfs.shape[0])]\n\n # Set the masks and r.v. names for each conditional distribution.\n for dd in dists:\n dd._new_mask(from_mask=dist._mask)\n dd.set_rv_names(rv_names)\n\n if extract:\n if len(cindexes) == 1:\n cdist = ScalarDistribution.from_distribution(cdist)\n if len(indexes) == 1:\n dists = [ScalarDistribution.from_distribution(d) for d in dists]\n\n return cdist, dists\n\n def copy(self, base=None):\n \"\"\"\n Returns a (deep) copy of the distribution.\n\n Parameters\n ----------\n base : 'linear', 'e', or float\n Optionally, copy and change the base of the copied distribution.\n If `None`, then the copy will keep the same base.\n\n \"\"\"\n from copy import deepcopy\n\n # Make an exact copy of the PRNG.\n prng = np.random.RandomState()\n prng.set_state(self.prng.get_state())\n\n d = _make_distribution(outcomes=deepcopy(self.outcomes),\n pmf=np.array(self.pmf, copy=True),\n base=self.ops.base,\n sample_space=deepcopy(self._sample_space),\n prng=prng,\n sparse=self._meta['is_sparse'])\n\n if base is not None:\n d.set_base(base)\n\n # The following are not initialize-able from the constructor.\n d.set_rv_names(self.get_rv_names())\n d._mask = tuple(self._mask)\n\n return d\n\n def outcome_length(self, masked=False):\n \"\"\"\n Returns the length of outcomes in the joint distribution.\n\n This is also equal to the number of random variables in the joint\n distribution. This value is fixed once the distribution is initialized.\n\n Parameters\n ----------\n masked : bool\n If `True`, then the outcome length additionally includes masked\n random variables. If `False`, then the outcome length does not\n include masked random variables. Including the masked random\n variables is not usually helpful since that represents the outcome\n length of a different, unmarginalized distribution.\n\n \"\"\"\n if masked:\n return len(self._mask)\n else:\n # Equivalently: sum(self._mask)\n # Equivalently: len(self.outcomes[0])\n # Recall, self.alphabet contains only the unmasked/valid rvs.\n return len(self.alphabet)\n\n def get_rv_names(self):\n \"\"\"\n Returns the names of the random variables.\n\n Returns\n -------\n rv_names : tuple or None\n A tuple with length equal to the outcome length, containing the\n names of the random variables in the distribution. If no random\n variable names have been set, then None is returned.\n\n \"\"\"\n if self._rvs is None:\n rv_names = None\n else:\n # _rvs is a dict mapping random variable names to indexes.\n rv_names = [x for x in self._rvs.items()]\n # Sort by index.\n rv_names.sort(key=itemgetter(1))\n # Keep only the sorted names.\n rv_names = tuple(map(itemgetter(0), rv_names))\n return rv_names\n\n def has_outcome(self, outcome, null=True):\n \"\"\"\n Returns `True` if `outcome` exists in the sample space.\n\n Whether or not an outcome is in the sample space is a separate question\n from whether or not an outcome currently appears in the pmf.\n See __contains__ for this latter question.\n\n Parameters\n ----------\n outcome : outcome\n The outcome to be tested.\n null : bool\n Specifies if null outcomes are acceptable. If `True`, then null\n outcomes are acceptable. Thus, the only requirement on `outcome`\n is that it exist in the distribution's sample space. If `False`,\n then null outcomes are not acceptable. Thus, `outcome` must exist\n in the distribution's sample space and be a nonnull outcome.\n\n Notes\n -----\n This is an O( len(outcome) ) operation.\n\n \"\"\"\n # Make sure the outcome exists in the sample space.\n is_atom = outcome in self._sample_space\n if not is_atom:\n # Outcome does not exist in the sample space.\n return False\n elif null:\n # Outcome exists in the sample space and we don't care about\n # whether it represents a null probability.\n return True\n else:\n idx = self._outcomes_index.get(outcome, None)\n if idx is None:\n # Outcome is not represented in pmf and thus, represents\n # a null probability.\n return False\n else:\n # Outcome is in pmf. We still need to test if it represents\n # a null probability.\n return self.pmf[idx] > self.ops.zero\n\n def is_homogeneous(self):\n \"\"\"\n Returns `True` if the alphabet for each random variable is the same.\n\n \"\"\"\n if len(self.alphabet) == 0:\n # Degenerate case: No random variables, no alphabet.\n return True\n\n a1 = self.alphabet[0]\n try:\n h = all(a2 == a1 for a2 in self.alphabet[1:])\n except ValueError:\n try:\n h = all(np.equal(a1, a2).all() for a2 in self.alphabet[1:])\n h &= all(len(a1) == len(a2) for a2 in self.alphabet[1:])\n except ValueError:\n return False\n\n return h\n\n def marginal(self, rvs, rv_mode=None):\n \"\"\"\n Returns a marginal distribution.\n\n Parameters\n ----------\n rvs : list\n The random variables to keep. All others are marginalized.\n rv_mode : str, None\n Specifies how to interpret the elements of `rvs`. Valid options\n are: {'indices', 'names'}. If equal to 'indices', then the elements\n of `rvs` are interpreted as random variable indices. If equal to\n 'names', the the elements are interpreted as random variable names.\n If `None`, then the value of `self._rv_mode` is consulted.\n\n Returns\n -------\n d : joint distribution\n A new joint distribution with the random variables in `rvs`\n kept and all others marginalized.\n\n \"\"\"\n # For marginals, we must have unique indexes. Additionally, we do\n # not allow the order of the random variables to change. So we sort.\n # We parse the rv_mode now, so that we can reassign their names\n # after coalesce has finished.\n rvs, indexes = parse_rvs(self, rvs, rv_mode, unique=True, sort=True)\n\n ## Eventually, add in a method specialized for dense distributions.\n ## This one would work only with the pmf, and not the outcomes.\n\n # Marginalization is a special case of coalescing where there is only\n # one new random variable and it is composed of a strict subset of\n # the original random variables, with no duplicates, that maintains\n # the order of the original random variables.\n d = self.coalesce([indexes], rv_mode=RV_MODES.INDICES, extract=True)\n\n # Handle parts of d that are not settable through initialization.\n\n # Set the random variable names\n if self._rvs is None:\n # There are no names...\n names = None\n else:\n # We only have the indexes...so reverse lookup to get the names.\n names_, indexes_ = self._rvs.keys(), self._rvs.values()\n rev = dict(zip(indexes_, names_))\n names = [rev[i] for i in indexes]\n d.set_rv_names(names)\n\n # Set the mask\n L = self.outcome_length()\n d._mask = tuple(False if i in indexes else True for i in range(L))\n return d\n\n def marginalize(self, rvs, rv_mode=None):\n \"\"\"\n Returns a new distribution after marginalizing random variables.\n\n Parameters\n ----------\n rvs : list\n The random variables to marginalize. All others are kept.\n rv_mode : str, None\n Specifies how to interpret the elements of `rvs`. Valid options\n are: {'indices', 'names'}. If equal to 'indices', then the elements\n of `rvs` are interpreted as random variable indices. If equal to\n 'names', the the elements are interpreted as random variable names.\n If `None`, then the value of `self._rv_mode` is consulted.\n\n\n Returns\n -------\n d : joint distribution\n A new joint distribution with the random variables in `rvs`\n marginalized and all others kept.\n\n \"\"\"\n rvs, indexes = parse_rvs(self, rvs, rv_mode)\n indexes = set(indexes)\n all_indexes = range(self.outcome_length())\n marginal_indexes = [i for i in all_indexes if i not in indexes]\n d = self.marginal(marginal_indexes, rv_mode=RV_MODES.INDICES)\n return d\n\n def set_rv_names(self, rv_names):\n \"\"\"\n Sets the names of the random variables.\n\n Returns\n -------\n rv_names : tuple\n A tuple with length equal to the outcome length, containing the\n names of the random variables in the distribution.\n\n \"\"\"\n if rv_names is None:\n # This is an explicit clearing of the rv names.\n rvs = None\n else:\n L = self.outcome_length()\n if len(set(rv_names)) < L:\n raise ditException('Too few unique random variable names.')\n elif len(set(rv_names)) > L:\n raise ditException('Too many unique random variable names.')\n if L > 0:\n rvs = dict(zip(rv_names, range(L)))\n else:\n # This is a corner case of a distribution with 0 rvs.\n # We keep rvs equal to None, instead of an empty dict.\n rvs = None\n\n self._rvs = rvs\n\n if self._rvs is not None:\n # Unsure if we should change this automatically.\n self._rv_mode = 'names'\n\n def to_html(self, digits=None, exact=None, tol=1e-9): # pragma: no cover\n \"\"\"\n Construct an HTML representation of the distribution.\n\n Returns\n -------\n output : str\n An HTML version of this distribution.\n \"\"\"\n from .distribution import prepare_string\n\n if exact is None:\n exact = ditParams['print.exact']\n\n x = prepare_string(self, digits, exact, tol)\n pmf, outcomes, base, colsep, max_length, pstr = x\n\n # Alphabet\n if len(self.alphabet) == 0:\n alpha = \"()\"\n elif self.is_homogeneous():\n alpha = str(self.alphabet[0]) + \" for all rvs\"\n else:\n alpha = str(self.alphabet)\n\n # Outcome class\n outcome_class = self._outcome_class\n if outcome_class is not None:\n outcome_class = outcome_class.__name__\n\n info = [\n (\"Class\", self.__class__.__name__),\n (\"Alphabet\", alpha),\n (\"Base\", base),\n (\"Outcome Class\", outcome_class),\n (\"Outcome Lenght\", self.outcome_length()),\n ]\n infos = ''.join(\"<tr><th>{}:</th><td>{}</td></tr>\".format(a, b) for a, b in info)\n header = '<table border=\"1\">{}</table>'.format(infos)\n\n rv_names = self.get_rv_names()\n if rv_names is None:\n rv_names = [\"x[{}]\".format(i) for i in range(self.outcome_length())]\n\n table_header = '<tr>' + ''.join(\"<th>{}</th>\".format(a) for a in rv_names) + \"<th>{}</th></tr>\".format(pstr)\n table_rows = ''.join(\n '<tr>' + ''.join('<td>{}</td>'.format(str(_)) for _ in o) + '<td>{}</td></tr>'.format(p) for o, p in zip(self.outcomes, pmf))\n table = '<table>{}{}</table>'.format(table_header, table_rows)\n\n output = '<div><div style=\"float: left\">{}</div><div style=\"float: left\">{}</div></div>'.format(header, table)\n\n return output\n\n def to_string(self, digits=None, exact=None, tol=1e-9, show_mask=False,\n str_outcomes=False):\n \"\"\"\n Returns a string representation of the distribution.\n\n Parameters\n ----------\n digits : int or None\n The probabilities will be rounded to the specified number of\n digits, using NumPy's around function. If `None`, then no rounding\n is performed. Note, if the number of digits is greater than the\n precision of the floats, then the resultant number of digits will\n match that smaller precision.\n exact : bool\n If `True`, then linear probabilities will be displayed, even if\n the underlying pmf contains log probabilities. The closest\n rational fraction within a tolerance specified by `tol` is used\n as the display value.\n tol : float\n If `exact` is `True`, then the probabilities will be displayed\n as the closest rational fraction within `tol`.\n show_mask : bool\n If `True`, show the outcomes in the proper context. Thus, masked\n and unmasked random variables are shown. If `show_mask` is anything\n other than `True` or `False`, it is used as the wildcard symbol.\n str_outcomes\n If `True`, then attempt to convert outcomes which are tuples to\n just strings. This is just a dislplay technique.\n\n Returns\n -------\n s : str\n A string representation of the distribution.\n\n \"\"\"\n from .distribution import prepare_string\n\n from six import StringIO\n\n if exact is None:\n exact = ditParams['print.exact']\n\n s = StringIO()\n\n x = prepare_string(self, digits, exact, tol, show_mask, str_outcomes)\n pmf, outcomes, base, colsep, max_length, pstr = x\n\n headers = [\n \"Class\",\n \"Alphabet\",\n \"Base\",\n \"Outcome Class\",\n \"Outcome Length\",\n \"RV Names\"\n ]\n\n vals = []\n\n # Class\n vals.append(self.__class__.__name__)\n\n # Alphabet\n if len(self.alphabet) == 0:\n alpha = \"()\"\n elif self.is_homogeneous():\n alpha = str(self.alphabet[0]) + \" for all rvs\"\n else:\n alpha = str(self.alphabet)\n vals.append(alpha)\n\n # Base\n vals.append(base)\n\n # Outcome class\n outcome_class = self._outcome_class\n if outcome_class is not None:\n outcome_class = outcome_class.__name__\n vals.append(outcome_class)\n\n # Outcome length\n if show_mask:\n outcome_length = \"{0} (mask: {1})\"\n outcome_length = outcome_length.format(self.outcome_length(),\n len(self._mask))\n else:\n outcome_length = str(self.outcome_length())\n vals.append(outcome_length)\n\n # Random variable names\n rv_names = self.get_rv_names()\n vals.append(rv_names)\n\n # Info\n L = max(map(len, headers))\n for head, val in zip(headers, vals):\n s.write(\"{0}{1}\\n\".format(\"{0}: \".format(head).ljust(L+2), val))\n s.write(\"\\n\")\n\n # Distribution\n s.write(''.join(['x'.ljust(max_length), colsep, pstr, \"\\n\"]))\n # Adjust for empty outcomes. Min length should be: len('x') == 1\n max_length = max(1, max_length)\n for o, p in zip(outcomes, pmf):\n s.write(''.join([o.ljust(max_length), colsep, str(p), \"\\n\"]))\n s.seek(0)\n\n s = s.read()\n # Remove the last \\n\n s = s[:-1]\n\n return s\n",
"\"\"\"\n\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nfrom scipy.special import digamma\n\nfrom .counts import get_counts\n\n\ndef entropy_0(data, length=1):\n \"\"\"\n Estimate the entropy of length `length` subsequences in `data`.\n\n Parameters\n ----------\n data : iterable\n An iterable of samples.\n length : int\n The length to group samples into.\n\n Returns\n -------\n h0 : float\n An estimate of the entropy.\n\n Notes\n -----\n This returns the naive estimate of the entropy.\n \"\"\"\n counts = get_counts(data, length)\n probs = counts/counts.sum()\n h0 = -np.nansum(probs * np.log2(probs))\n return h0\n\n\ndef entropy_1(data, length=1):\n \"\"\"\n Estimate the entropy of length `length` subsequences in `data`.\n\n Parameters\n ----------\n data : iterable\n An iterable of samples.\n length : int\n The length to group samples into.\n\n Returns\n -------\n h1 : float\n An estimate of the entropy.\n\n Notes\n -----\n If M is the alphabet size and N is the number of samples, then the bias of this estimator is:\n B ~ M/N\n \"\"\"\n counts = get_counts(data, length)\n total = counts.sum()\n digamma_N = digamma(total)\n\n h1 = np.log2(np.e)*(counts/total*(digamma_N - digamma(counts))).sum()\n\n return h1\n\n\ndef entropy_2(data, length=1):\n \"\"\"\n Estimate the entropy of length `length` subsequences in `data`.\n\n Parameters\n ----------\n data : iterable\n An iterable of samples.\n length : int\n The length to group samples into.\n\n Returns\n -------\n h2 : float\n An estimate of the entropy.\n\n Notes\n -----\n If M is the alphabet size and N is the number of samples, then the bias of this estimator is:\n B ~ (M+1)/(2N)\n \"\"\"\n counts = get_counts(data, length)\n total = counts.sum()\n digamma_N = digamma(total)\n log2 = np.log(2)\n jss = [np.arange(1, count) for count in counts]\n\n alt_terms = np.array([(((-1)**js)/js).sum() for js in jss])\n\n h2 = np.log2(np.e)*(counts/total*(digamma_N - digamma(counts) + log2 + alt_terms)).sum()\n\n return h2\n",
"\"\"\"\nThe I_min measure as proposed by Williams & Beer.\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom .pid import BasePID\n\n\ndef s_i(d, input_, output, output_value):\n \"\"\"\n Compute the specific mutual information I(input_ : output=output_value)\n\n Parameters\n ----------\n d : Distribution\n The distribution from which this quantity is to be calculated.\n input_ : iterable\n The input aggregate variable.\n output : iterable\n The output aggregate variable.\n output_value : iterable\n The value of the output.\n\n Returns\n -------\n s : float\n The specific information\n \"\"\"\n pp_s, pp_a_s = d.condition_on(output, rvs=input_)\n p_s = pp_s[output_value]\n p_a_s = pp_a_s[pp_s.outcomes.index(output_value)]\n pp_a, pp_s_a = d.condition_on(input_, rvs=output)\n p_s_a = {a: pp[output_value] for a, pp in zip(pp_a.outcomes, pp_s_a)}\n\n return np.nansum([p_a_s[a] * np.log2(psa / p_s) for a, psa in p_s_a.items()])\n\n\nclass PID_WB(BasePID):\n \"\"\"\n The Williams & Beer partial information decomposition.\n \"\"\"\n _name = \"I_min\"\n\n @staticmethod\n def _measure(d, inputs, output):\n \"\"\"\n Compute I_min(inputs : output) =\n \\sum_{s \\in output} p(s) min_{input_ \\in inputs} I(input_ : output=s)\n\n Parameters\n ----------\n d : Distribution\n The distribution to compute i_min for.\n inputs : iterable of iterables\n The input variables.\n output : iterable\n The output variable.\n\n Returns\n -------\n imin : float\n The value of I_min.\n \"\"\"\n p_s = d.marginal(output)\n return sum(p_s[s] * min(s_i(d, input_, output, s) for input_ in inputs) for s in p_s.outcomes)\n",
"\"\"\"\nMaximum entropy with marginal distribution constraints.\n\nNote: We are actually doing the maximum entropy optimization. So we have not\nbuilt in the fact that the solution is an exponential family.\n\nAlso, this doesn't seem to work that well in practice. The optimization\nsimply fails to converge for many distributions. Xor() works great, but And()\nfails to converge for 2-way marginals. Random distributions seem to work.\nJittering the distributions sometimes helps.\n\nWe might need to assume the exponential form and then fit the params to match\nthe marginals. Perhaps exact gradient and Hessians might help, or maybe even\nsome rescaling of the linear constraints.\n\n\nTODO:\n\nThis code for moment-based maximum entropy needs to be updated so that it can\nhandle any Cartesian product sample space, rather than just homogeneous ones.\n\n\"\"\"\n\nfrom __future__ import division, print_function\n\nfrom debtcollector import removals\n\nimport itertools\n\nimport numpy as np\n\nimport dit\n\nfrom dit.abstractdist import AbstractDenseDistribution, get_abstract_dist\n\nfrom ..helpers import RV_MODES, parse_rvs\nfrom .optutil import as_full_rank, CVXOPT_Template, prepare_dist, Bunch\nfrom ..utils import flatten\n# from ..utils import powerset\n\n__all__ = [\n # 'MarginalMaximumEntropy',\n 'MomentMaximumEntropy',\n # Use version provided by maxentropyfw.py\n #'marginal_maxent_dists',\n 'moment_maxent_dists',\n]\n\n\[email protected](message=\"Please see methods in dit.algorithms.distribution_optimizers.py.\",\n version='1.0.1')\ndef isolate_zeros_generic(dist, rvs):\n \"\"\"\n Determines if there are any elements of the optimization vector that must\n be zero.\n\n If p(marginal) = 0, then every component of the joint that contributes to\n that marginal probability must be exactly zero for all feasible solutions.\n\n \"\"\"\n assert dist.is_dense()\n assert dist.get_base() == 'linear'\n\n rvs_, indexes = parse_rvs(dist, set(flatten(rvs)), unique=True, sort=True)\n rvs = [[indexes[rvs_.index(rv)] for rv in subrv] for subrv in rvs]\n\n d = get_abstract_dist(dist)\n n_variables = d.n_variables\n n_elements = d.n_elements\n\n zero_elements = np.zeros(n_elements, dtype=int)\n cache = {}\n pmf = dist.pmf\n\n for subrvs in rvs:\n marray = d.parameter_array(subrvs, cache=cache)\n for idx in marray:\n # Convert the sparse nonzero elements to a dense boolean array\n bvec = np.zeros(n_elements, dtype=int)\n bvec[idx] = 1\n p = pmf[idx].sum()\n if np.isclose(p, 0):\n zero_elements += bvec\n\n zero = np.nonzero(zero_elements)[0]\n zeroset = set(zero)\n nonzero = [i for i in range(n_elements) if i not in zeroset]\n variables = Bunch(nonzero=nonzero, zero=zero)\n\n return variables\n\n\[email protected](message=\"Please see methods in dit.algorithms.distribution_optimizers.py.\",\n version='1.0.1')\ndef isolate_zeros(dist, k):\n \"\"\"\n Determines if there are any elements of the optimization vector that must\n be zero.\n\n If p(marginal) = 0, then every component of the joint that contributes to\n that marginal probability must be exactly zero for all feasible solutions.\n\n \"\"\"\n assert dist.is_dense()\n assert dist.get_base() == 'linear'\n\n d = get_abstract_dist(dist)\n n_variables = d.n_variables\n n_elements = d.n_elements\n\n rvs = range(n_variables)\n zero_elements = np.zeros(n_elements, dtype=int)\n cache = {}\n pmf = dist.pmf\n if k > 0:\n for subrvs in itertools.combinations(rvs, k):\n marray = d.parameter_array(subrvs, cache=cache)\n for idx in marray:\n # Convert the sparse nonzero elements to a dense boolean array\n bvec = np.zeros(n_elements, dtype=int)\n bvec[idx] = 1\n p = pmf[idx].sum()\n if np.isclose(p, 0):\n zero_elements += bvec\n\n zero = np.nonzero(zero_elements)[0]\n zeroset = set(zero)\n nonzero = [i for i in range(n_elements) if i not in zeroset]\n variables = Bunch(nonzero=nonzero, zero=zero)\n\n return variables\n\ndef marginal_constraints_generic(dist, rvs, rv_mode=None,\n with_normalization=True):\n \"\"\"\n Returns `A` and `b` in `A x = b`, for a system of marginal constraints.\n\n In general, the resulting matrix `A` will not have full rank.\n\n Parameters\n ----------\n dist : distribution\n The distribution used to calculate the marginal constraints.\n\n rvs : sequence\n A sequence whose elements are also sequences. Each inner sequence\n specifies a marginal distribution as a set of random variable from\n `dist`. The inner sequences need not be pairwise mutually exclusive\n with one another. A random variable can only appear once within\n each inner sequence, but it can occur in multiple inner sequences.\n\n rv_mode : str, None\n Specifies how to interpret the elements of `rvs`. Valid options\n are: {'indices', 'names'}. If equal to 'indices', then the elements\n of `rvs` are interpreted as random variable indices. If equal to\n 'names', the the elements are interpreted as random variable names.\n If `None`, then the value of `dist._rv_mode` is consulted.\n\n \"\"\"\n assert dist.is_dense()\n assert dist.get_base() == 'linear'\n\n parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,\n unique=True, sort=True)[1]\n\n # potential inclusion: include implied constraints\n # rvs = set().union(*[set(r for r in powerset(rv) if r) for rv in rvs])\n indexes = [parse(rv) for rv in rvs]\n\n pmf = dist.pmf\n\n d = get_abstract_dist(dist)\n\n A = []\n b = []\n\n # Begin with the normalization constraint.\n if with_normalization:\n A.append(np.ones(d.n_elements))\n b.append(1)\n\n # Now add all the marginal constraints.\n cache = {}\n for rvec in indexes:\n for idx in d.parameter_array(rvec, cache=cache):\n bvec = np.zeros(d.n_elements)\n bvec[idx] = 1\n A.append(bvec)\n b.append(pmf[idx].sum())\n\n A = np.asarray(A, dtype=float)\n b = np.asarray(b, dtype=float)\n\n return A, b\n\n\ndef marginal_constraints(dist, m, with_normalization=True):\n \"\"\"\n Returns `A` and `b` in `A x = b`, for a system of marginal constraints.\n\n The resulting matrix `A` is not guaranteed to have full rank.\n\n Parameters\n ----------\n dist : distribution\n The distribution from which the marginal constraints are constructed.\n\n m : int\n The size of the marginals to constrain. When `m=2`, pairwise marginals\n are constrained to equal the pairwise marginals in `pmf`. When `m=3`,\n three-way marginals are constrained to equal those in `pmf.\n\n with_normalization : bool\n If true, include a constraint for normalization.\n\n Returns\n -------\n A : array-like, shape (p, q)\n The matrix defining the marginal equality constraints and also the\n normalization constraint. The number of rows is:\n p = C(n_variables, m) * n_symbols ** m + 1\n where C() is the choose formula. The number of columns is:\n q = n_symbols ** n_variables\n\n b : array-like, (p,)\n The RHS of the linear equality constraints.\n\n \"\"\"\n n_variables = dist.outcome_length()\n\n if m > n_variables:\n msg = \"Cannot constrain {0}-way marginals\"\n msg += \" with only {1} random variables.\"\n msg = msg.format(m, n_variables)\n raise ValueError(msg)\n\n rv_mode = dist._rv_mode\n\n if rv_mode in [RV_MODES.NAMES, 'names']:\n vars = dist.get_rv_names()\n rvs = list(itertools.combinations(vars, m))\n else:\n rvs = list(itertools.combinations(range(n_variables), m))\n\n A, b = marginal_constraints_generic(dist, rvs, rv_mode,\n with_normalization=with_normalization)\n return A, b\n\n\ndef marginal_constraint_rank(dist, m):\n \"\"\"\n Returns the rank of the marginal constraint matrix.\n\n \"\"\"\n dist = prepare_dist(dist)\n n_variables = dist.outcome_length()\n n_symbols = len(dist.alphabet[0])\n pmf = dist.pmf\n\n A, b = marginal_constraints(dist, m)\n _, _, rank = as_full_rank(A, b)\n return rank\n\n\ndef moment(f, pmf, center=0, n=1):\n \"\"\"\n Return the nth moment of `f` about `center`, distributed by `pmf`.\n\n Explicitly: \\sum_i (f(i) - center)**n p(i)\n\n Note, `pmf` is the joint distribution. So n=1 can be used even when\n calculating covariances such as <xx> and <xy>. The first would actually\n be a 2nd moment, while the second would be a mixed 1st moment.\n\n Parameters\n ----------\n f : array-like\n The numerical values assigned to each outcome of `p`.\n pmf : array-like\n The pmf for a distribution, linear-distributed values.\n center : float\n Calculate a centered moment.\n n : int\n The moment to calculate.\n\n \"\"\"\n return ((f - center)**n * pmf).sum()\n\n\ndef moment_constraints(pmf, n_variables, m, symbol_map, with_replacement=True):\n \"\"\"\n Returns `A` and `b` in `A x = b`, for an Ising-like system.\n\n If without replacement, we include only m-way first-moment constraints\n where each element is distinct. So <xx> and <yy> would not be included if\n n_variables=2 and m=2.\n\n The function we take means of is: f(x) = \\prod_i x_i\n\n The resulting matrix `A` is not guaranteed to have full rank.\n\n Parameters\n ----------\n pmf : array-like, shape ( n_symbols ** n_variables, )\n The probability mass function of the distribution. The pmf must have\n a Cartesian product sample space with the same sample space used for\n each random variable.\n n_variables : int\n The number of random variables.\n m : int | list\n The size of the moments to constrain. When `m=2`, pairwise means\n are constrained to equal the pairwise means in `pmf`. When `m=3`,\n three-way means are constrained to equal those in `pmf.\n If m is a list, then include all m-way moments in the list.\n symbol_map : array-like\n A mapping from the ith symbol to a real number that is to be used in\n the calculation of moments. For example, symbol_map=[-1, 1] corresponds\n to the typical Ising model.\n with_replacement : bool\n If `True`, variables are selected with replacement. The standard Ising\n does not select with replacement, and so terms like <xx>, <yy> do not\n appear for m=2. When `True`, we are constraining the entire moment\n matrix.\n\n Returns\n -------\n A : array-like, shape (p, q)\n The matrix defining the marginal equality constraints and also the\n normalization constraint. The number of rows is:\n p = C(n_variables, m) * n_symbols ** m + 1\n where C() is the choose formula. The number of columns is:\n q = n_symbols ** n_variables\n\n b : array-like, (p,)\n The RHS of the linear equality constraints.\n\n \"\"\"\n n_symbols = len(symbol_map)\n d = AbstractDenseDistribution(n_variables, n_symbols)\n\n if len(pmf) != d.n_elements:\n msg = 'Length of `pmf` != n_symbols ** n_variables. Symbol map: {0!r}'\n raise ValueError(msg.format(symbol_map))\n\n # Begin with the normalization constraint.\n A = [np.ones(d.n_elements)]\n b = [1]\n\n\n try:\n m[0]\n except TypeError:\n mvals = [m]\n except IndexError:\n # m is empty list\n pass\n else:\n mvals = m\n\n if with_replacement:\n combinations = itertools.combinations_with_replacement\n else:\n combinations = itertools.combinations\n\n # Now add all the moment constraints.\n for m in mvals:\n if m < 1:\n continue\n\n outcomes = list(itertools.product(symbol_map, repeat=n_variables))\n outcomes = np.asarray(outcomes)\n for rvs in combinations(range(n_variables), m):\n # Make it a list for NumPy indexing\n rvs = list(rvs)\n f = np.array([outcome[rvs].prod() for outcome in outcomes])\n mean = moment(f, pmf, n=1)\n A.append(f)\n b.append(mean)\n\n A = np.asarray(A, dtype=float)\n b = np.asarray(b, dtype=float)\n\n return A, b\n\n\ndef moment_constraint_rank(dist, m, symbol_map=None,\n cumulative=True, with_replacement=True):\n \"\"\"\n Returns the rank of the moment constraint matrix.\n\n \"\"\"\n if cumulative:\n mvals = range(m + 1)\n else:\n mvals = [m]\n\n\n dist = prepare_dist(dist)\n n_variables = dist.outcome_length()\n n_symbols = len(dist.alphabet[0])\n pmf = dist.pmf\n\n # Symbol map\n if symbol_map is None:\n symbol_map = range(n_symbols)\n\n A, b = moment_constraints(pmf, n_variables, mvals, symbol_map,\n with_replacement=with_replacement)\n _, _, rank = as_full_rank(A, b)\n\n return rank\n\n\ndef ising_constraint_rank(dist, m, symbol_map=None, cumulative=True):\n \"\"\"\n Returns the rank of the Ising constraint matrix.\n\n \"\"\"\n return moment_constraint_rank(dist, m, symbol_map, cumulative,\n with_replacement=False)\n\n\ndef negentropy(p):\n \"\"\"\n Entropy which operates on vectors of length N.\n\n \"\"\"\n negH = np.nansum(p * np.log2(p))\n return negH\n\n\[email protected]_class('MaximumEntropy',\n replacement=\"dit.algorithms.scipy_optimizers.MaxEntOptimizer\",\n message=\"Please see methods in dit.algorithms.distribution_optimizers.py.\",\n version='1.0.1')\nclass MaximumEntropy(CVXOPT_Template):\n \"\"\"\n Find maximum entropy distribution.\n\n \"\"\"\n def build_function(self):\n self.func = negentropy\n\n\[email protected]_class('MarginalMaximumEntropy',\n replacement=\"dit.algorithms.scipy_optimizers.MaxEntOptimizer\",\n message=\"Please see methods in dit.algorithms.distribution_optimizers.py.\",\n version='1.0.1')\nclass MarginalMaximumEntropy(MaximumEntropy):\n \"\"\"\n Find maximum entropy distribution subject to k-way marginal constraints.\n\n k=0 should reproduce the behavior of MaximumEntropy.\n\n \"\"\"\n def __init__(self, dist, k, tol=None, prng=None):\n \"\"\"\n Initialize optimizer.\n\n Parameters\n ----------\n dist : distribution\n The distribution used to specify the marginal constraints.\n k : int\n The number of variables in the constrained marginals.\n\n \"\"\"\n self.k = k\n super(MarginalMaximumEntropy, self).__init__(dist, tol=tol, prng=prng)\n\n def prep(self):\n\n # We are only removing elements which should be fixed at zero.\n # This means they don't contribute to the entropy, so there is no\n # need to adjust the function. Also, we are using numdifftools.\n self.variables = isolate_zeros(self.dist, self.k)\n\n # Make self.n reflect only the size of the nonzero elements. This\n # automatically adjusts the size of G for the inequality constraint.\n self.n = len(self.variables.nonzero) # pylint: disable=no-member\n\n def build_linear_equality_constraints(self):\n from cvxopt import matrix\n\n A, b = marginal_constraints(self.dist, self.k)\n\n # Reduce the size of the constraint matrix\n # Since we are only removing elements which are exactly zero, then\n # the constraint equations are unchanged. E.g. the normalization is\n # still that the nonzero values should add to 1.\n\n Asmall = A[:, self.variables.nonzero] # pylint: disable=no-member\n Asmall, b, rank = as_full_rank(Asmall, b)\n if rank > Asmall.shape[1]:\n raise ValueError('More independent constraints than free parameters.')\n\n Asmall = matrix(Asmall)\n b = matrix(b) # now a column vector\n\n self.A = Asmall\n self.b = b\n\n def initial_dist(self):\n from .maxentropyfw import initial_point\n initial_x, _ = initial_point(self.dist, self.k, A=self.A, b=self.b,\n isolated=self.variables,\n show_progress=False)\n return initial_x\n\n def build_gradient_hessian(self):\n\n ln2 = np.log(2)\n def gradient(xarr):\n # This operates only on nonzero elements.\n\n # All of the optimization elements should be greater than zero\n # But occasional they might go slightly negative or zero.\n # In those cases, we will just set the gradient to zero and keep the\n # value fixed from that point forward.\n bad_x = xarr <= 0\n grad = np.log2(xarr) + 1 / ln2\n grad[bad_x] = 0\n return grad\n\n def hessian(xarr):\n bad_x = xarr <= 0\n diag = 1 / xarr / ln2\n diag[bad_x] = 0\n return np.diag(diag)\n\n self.gradient = gradient\n self.hessian = hessian\n\n\nclass MomentMaximumEntropy(MaximumEntropy):\n \"\"\"\n Find maximum entropy distribution subject to k-way marginal constraints.\n\n k=0 should reproduce the behavior of MaximumEntropy.\n\n \"\"\"\n def __init__(self, dist, k, symbol_map, cumulative=True,\n with_replacement=True, tol=None, prng=None):\n \"\"\"\n Initialize optimizer.\n\n Parameters\n ----------\n dist : distribution\n The distribution used to specify the marginal constraints.\n k : int\n The number of variables in the constrained marginals.\n symbol_map : list\n The mapping from states to real numbers. This is used while taking\n moments.\n cumulative : bool\n If `True`, include all moments less than or equal to `k`.\n with_replacement : bool\n If `True`, then variables are selected for moments with replacement.\n The standard Ising model selects without replacement.\n tol : float | None\n The desired convergence tolerance.\n prng : RandomState\n A pseudorandom number generator.\n\n \"\"\"\n self.k = k\n self.symbol_map = symbol_map\n self.cumulative = cumulative\n self.with_replacement = with_replacement\n super(MomentMaximumEntropy, self).__init__(dist, tol=tol, prng=prng)\n\n\n def build_linear_equality_constraints(self):\n from cvxopt import matrix\n\n # Dimension of optimization variable\n n = self.n\n\n if self.cumulative:\n k = range(self.k + 1)\n else:\n k = [self.k]\n\n args = (self.pmf, self.n_variables, k, self.symbol_map)\n kwargs = {'with_replacement': self.with_replacement}\n A, b = moment_constraints(*args, **kwargs)\n AA, bb, rank = as_full_rank(A, b)\n if rank > n:\n raise ValueError('More independent constraints than parameters.')\n\n AA = matrix(AA)\n bb = matrix(bb) # now a column vector\n\n self.A = AA\n self.b = bb\n\n\[email protected](message=\"Please see methods in dit.algorithms.distribution_optimizers.py.\",\n version='1.0.1')\ndef marginal_maxent_dists(dist, k_max=None, jitter=True, show_progress=True):\n \"\"\"\n Return the marginal-constrained maximum entropy distributions.\n\n Parameters\n ----------\n dist : distribution\n The distribution used to constrain the maxent distributions.\n k_max : int\n The maximum order to calculate.\n jitter : bool | float\n When `True` or a float, we perturb the distribution slightly before\n proceeding. This can sometimes help with convergence.\n show-progress : bool\n If `True`, show convergence progress to stdout.\n\n \"\"\"\n dist = prepare_dist(dist)\n\n if jitter:\n # This is sometimes necessary. If your distribution does not have\n # full support than convergence can be difficult to come by.\n dist.pmf = dit.math.pmfops.jittered(dist.pmf)\n\n n_variables = dist.outcome_length()\n symbols = dist.alphabet[0]\n\n if k_max is None:\n k_max = n_variables\n\n outcomes = list(dist.sample_space())\n\n dists = []\n for k in range(k_max + 1):\n print()\n print(\"Constraining maxent dist to match {0}-way marginals.\".format(k))\n print()\n opt = MarginalMaximumEntropy(dist, k)\n pmf_opt = opt.optimize(show_progress=show_progress)\n pmf_opt = pmf_opt.reshape(pmf_opt.shape[0])\n pmf = np.zeros(len(dist.pmf))\n pmf[opt.variables.nonzero] = pmf_opt # pylint: disable=no-member\n d = dit.Distribution(outcomes, pmf)\n dists.append(d)\n\n return dists\n\n\ndef moment_maxent_dists(dist, symbol_map, k_max=None, jitter=True,\n with_replacement=True, show_progress=True):\n \"\"\"\n Return the marginal-constrained maximum entropy distributions.\n\n Parameters\n ----------\n dist : distribution\n The distribution used to constrain the maxent distributions.\n symbol_map : iterable\n A list whose elements are the real values that each state is assigned\n while calculating moments. Typical values are [-1, 1] or [0, 1].\n k_max : int\n The maximum order to calculate.\n jitter : bool | float\n When `True` or a float, we perturb the distribution slightly before\n proceeding. This can sometimes help with convergence.\n with_replacement : bool\n If `True`, then variables are selected for moments with replacement.\n The standard Ising model selects without replacement.\n show-progress : bool\n If `True`, show convergence progress to stdout.\n\n \"\"\"\n dist = prepare_dist(dist)\n\n if jitter:\n # This is sometimes necessary. If your distribution does not have\n # full support than convergence can be difficult to come by.\n dist.pmf = dit.math.pmfops.jittered(dist.pmf)\n\n n_variables = dist.outcome_length()\n symbols = dist.alphabet[0]\n\n if k_max is None:\n k_max = n_variables\n\n outcomes = list(dist._product(symbols, repeat=n_variables))\n\n if with_replacement:\n text = 'with replacement'\n else:\n text = 'without replacement'\n\n dists = []\n for k in range(k_max + 1):\n msg = \"Constraining maxent dist to match {0}-way moments, {1}.\"\n print()\n print(msg.format(k, text))\n print()\n opt = MomentMaximumEntropy(dist, k, symbol_map, with_replacement=with_replacement)\n pmf_opt = opt.optimize(show_progress=show_progress)\n pmf_opt = pmf_opt.reshape(pmf_opt.shape[0])\n d = dit.Distribution(outcomes, pmf_opt)\n dists.append(d)\n\n return dists\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMarginal Utility of Information, as defined here: http://arxiv.org/abs/1409.4708\n\"\"\"\n\nfrom .base_profile import BaseProfile, profile_docstring\n\nfrom itertools import product\n\nimport numpy as np\n\nfrom .information_partitions import ShannonPartition\nfrom ..utils import flatten, powerset\n\n__all__ = [\n 'MUIProfile',\n]\n\n\ndef get_lp_form(dist, ents):\n \"\"\"\n Construct the constraint matrix for computing the maximum utility of information in linear programming cononical form.\n\n Parameters\n ----------\n dist : Distribution\n The distribution from which to construct the constraints.\n\n Returns\n -------\n c : ndarray\n The utility function to minimize\n A : ndarray\n The lhs of the constraint equations\n b : ndarray\n The rhs of the constraint equations\n bounds : list of pairs\n The bounds on the individual elements of `x`\n \"\"\"\n pa = list(frozenset(s) for s in powerset(flatten(dist.rvs)))[1:]\n sp = sorted(ents.atoms.items())\n atoms = list(frozenset(flatten(a[0])) for a, v in sp if not np.isclose(v, 0))\n\n A = []\n b = []\n\n for pa_V, pa_W in product(pa, pa):\n if pa_V == pa_W:\n # constraint (i)\n cond = np.zeros(len(atoms))\n for j, atom in enumerate(atoms):\n if pa_V & atom:\n cond[j] = 1\n A.append(cond)\n b.append(ents[([pa_V], [])])\n\n else:\n # constraint (ii)\n if pa_W < pa_V:\n cond = np.zeros(len(atoms))\n for j, atom in enumerate(atoms):\n if (pa_V & atom) and not (pa_W & atom):\n cond[j] = 1\n A.append(cond)\n b.append(ents[([pa_V], [])] - ents[([pa_W], [])])\n # constraint (iii)\n cond = np.zeros(len(atoms))\n for j, atom in enumerate(atoms):\n if (pa_V & atom):\n cond[j] += 1\n if (pa_W & atom):\n cond[j] += 1\n if ((pa_V | pa_W) & atom):\n cond[j] -= 1\n if ((pa_V & pa_W) & atom):\n cond[j] -= 1\n A.append(cond)\n b.append(ents[([pa_V], [])] +\n ents[([pa_W], [])] -\n ents[([pa_V | pa_W], [])] -\n ents[([pa_V & pa_W], [])])\n\n A.append([1]*len(atoms))\n b.append(0) # placeholder for y\n\n A = np.array(A)\n b = np.array(b)\n\n c = np.array([-len(atom) for atom in atoms]) # negative for minimization\n\n bounds = [(min(0, val), max(0, val)) for _, val in sp if not np.isclose(val, 0)]\n\n return c, A, b, bounds\n\n\ndef max_util_of_info(c, A, b, bounds, y):\n \"\"\"\n Compute the maximum utility of information at scale `y`.\n\n Parameters\n ----------\n c : ndarray\n A list of atom-weights.\n A : ndarray\n The lhs of the various constraints.\n b : ndarray\n The rhs of the various constraints.\n bounds : list of pairs\n Each part of `x` must be between the atom's value and 0.\n y : float\n The total mutual information captured.\n \"\"\"\n from scipy.optimize import linprog\n\n b[-1] = y\n solution = linprog(c, A, b, bounds=bounds)\n maximum_utility_of_information = -solution.fun\n return maximum_utility_of_information\n\n\nclass MUIProfile(BaseProfile):\n __doc__ = profile_docstring.format(name='MUIProfile',\n static_attributes='',\n attributes='',\n methods='')\n\n xlabel = \"scale [bits]\"\n ylabel = \"marginal utility of information\"\n align = 'edge'\n\n def _compute(self):\n \"\"\"\n Compute the Marginal Utility of Information.\n \"\"\"\n sp = ShannonPartition(self.dist)\n c, A, b, bounds = get_lp_form(self.dist, sp)\n ent = sum(sp.atoms.values())\n\n atoms = sp.atoms.values()\n ps = powerset(atoms)\n pnts = np.unique(np.round([sum(ss) for ss in ps], 7))\n pnts = [v for v in pnts if 0 <= v <= ent]\n\n maxui = [max_util_of_info(c, A, b, bounds, y) for y in pnts]\n mui = np.round(np.diff(maxui)/np.diff(pnts), 7)\n vals = np.array(np.unique(mui, return_index=True))\n self.profile = dict((pnts[int(row[1])], row[0]) for row in vals.T)\n self.widths = np.diff(list(sorted(self.profile.keys())) + [ent])\n\n def draw(self, ax=None): # pragma: no cover\n ax = super(MUIProfile, self).draw(ax=ax)\n pnts = np.arange(int(max(self.profile.keys()) + self.widths[-1]) + 1)\n ax.set_xticks(pnts)\n ax.set_xticklabels(pnts)\n return ax\n\n draw.__doc__ = BaseProfile.draw.__doc__\n"
] |
[
[
"numpy.asarray",
"numpy.equal",
"numpy.ndenumerate",
"numpy.array",
"numpy.random.RandomState"
],
[
"numpy.arange",
"numpy.log",
"numpy.log2",
"scipy.special.digamma"
],
[
"numpy.log2"
],
[
"numpy.diag",
"numpy.log",
"numpy.log2",
"numpy.nonzero",
"numpy.asarray",
"numpy.ones",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.unique",
"scipy.optimize.linprog",
"numpy.diff",
"numpy.array",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
RobbiNespu/scikit-image
|
[
"a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb",
"a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb",
"a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb",
"a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb"
] |
[
"skimage/io/_plugins/pil_plugin.py",
"skimage/io/_plugins/freeimage_plugin.py",
"doc/examples/plot_peak_local_max.py",
"skimage/segmentation/slic_superpixels.py"
] |
[
"__all__ = ['imread', 'imsave']\n\nimport numpy as np\nfrom six import string_types\nfrom PIL import Image\n\nfrom ...util import img_as_ubyte, img_as_uint\nfrom ...external.tifffile import imread as tif_imread, imsave as tif_imsave\n\n\ndef imread(fname, dtype=None, img_num=None, **kwargs):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : str\n File name.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements.\n img_num : int, optional\n Specifies which image to read in a file with multiple images\n (zero-indexed).\n kwargs : keyword pairs, optional\n Addition keyword arguments to pass through (only applicable to Tiff\n files for now, see `tifffile`'s `imread` function).\n\n Notes\n -----\n Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support many\n advanced image types including multi-page and floating point.\n\n All other files are read using the Python Imaging Libary.\n See PIL docs [2]_ for a list of supported formats.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n\n \"\"\"\n if hasattr(fname, 'lower') and dtype is None:\n kwargs.setdefault('key', img_num)\n if fname.lower().endswith(('.tiff', '.tif')):\n return tif_imread(fname, **kwargs)\n\n im = Image.open(fname)\n try:\n # this will raise an IOError if the file is not readable\n im.getdata()[0]\n except IOError as e:\n site = \"http://pillow.readthedocs.org/en/latest/installation.html#external-libraries\"\n pillow_error_message = str(e)\n error_message = ('Could not load \"%s\" \\n'\n 'Reason: \"%s\"\\n'\n 'Please see documentation at: %s') % (fname, pillow_error_message, site)\n raise ValueError(error_message)\n else:\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n\n\ndef pil_to_ndarray(im, dtype=None, img_num=None):\n \"\"\"Import a PIL Image object to an ndarray, in memory.\n\n Parameters\n ----------\n Refer to ``imread``.\n\n \"\"\"\n frames = []\n grayscale = None\n i = 0\n while 1:\n try:\n im.seek(i)\n except EOFError:\n break\n\n frame = im\n\n if img_num is not None and img_num != i:\n im.getdata()[0]\n i += 1\n continue\n\n if im.mode == 'P':\n if grayscale is None:\n grayscale = _palette_is_grayscale(im)\n\n if grayscale:\n frame = im.convert('L')\n else:\n frame = im.convert('RGB')\n\n elif im.mode == '1':\n frame = im.convert('L')\n\n elif 'A' in im.mode:\n frame = im.convert('RGBA')\n\n elif im.mode == 'CMYK':\n frame = im.convert('RGB')\n\n if im.mode.startswith('I;16'):\n shape = im.size\n dtype = '>u2' if im.mode.endswith('B') else '<u2'\n if 'S' in im.mode:\n dtype = dtype.replace('u', 'i')\n frame = np.fromstring(frame.tobytes(), dtype)\n frame.shape = shape[::-1]\n\n else:\n frame = np.array(frame, dtype=dtype)\n\n frames.append(frame)\n i += 1\n\n if img_num is not None:\n break\n\n if hasattr(im, 'fp') and im.fp:\n im.fp.close()\n\n if img_num is None and len(frames) > 1:\n return np.array(frames)\n elif frames:\n return frames[0]\n elif img_num:\n raise IndexError('Could not find image #%s' % img_num)\n\n\ndef _palette_is_grayscale(pil_image):\n \"\"\"Return True if PIL image in palette mode is grayscale.\n\n Parameters\n ----------\n pil_image : PIL image\n PIL Image that is in Palette mode.\n\n Returns\n -------\n is_grayscale : bool\n True if all colors in image palette are gray.\n \"\"\"\n assert pil_image.mode == 'P'\n # get palette as an array with R, G, B columns\n palette = np.asarray(pil_image.getpalette()).reshape((256, 3))\n # Not all palette colors are used; unused colors have junk values.\n start, stop = pil_image.getextrema()\n valid_palette = palette[start:stop]\n # Image is grayscale if channel differences (R - G and G - B)\n # are all zero.\n return np.allclose(np.diff(valid_palette), 0)\n\n\ndef ndarray_to_pil(arr, format_str=None):\n \"\"\"Export an ndarray to a PIL object.\n\n Parameters\n ----------\n Refer to ``imsave``.\n\n \"\"\"\n if arr.ndim == 3:\n arr = img_as_ubyte(arr)\n mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]\n\n elif format_str in ['png', 'PNG']:\n mode = 'I;16'\n mode_base = 'I'\n\n if arr.dtype.kind == 'f':\n arr = img_as_uint(arr)\n\n elif arr.max() < 256 and arr.min() >= 0:\n arr = arr.astype(np.uint8)\n mode = mode_base = 'L'\n\n else:\n arr = img_as_uint(arr)\n\n else:\n arr = img_as_ubyte(arr)\n mode = 'L'\n mode_base = 'L'\n\n try:\n array_buffer = arr.tobytes()\n except AttributeError:\n array_buffer = arr.tostring() # Numpy < 1.9\n\n if arr.ndim == 2:\n im = Image.new(mode_base, arr.T.shape)\n try:\n im.frombytes(array_buffer, 'raw', mode)\n except AttributeError:\n im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7\n else:\n image_shape = (arr.shape[1], arr.shape[0])\n try:\n im = Image.frombytes(mode, image_shape, array_buffer)\n except AttributeError:\n im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7\n return im\n\n\ndef imsave(fname, arr, format_str=None, **kwargs):\n \"\"\"Save an image to disk.\n\n Parameters\n ----------\n fname : str or file-like object\n Name of destination file.\n arr : ndarray of uint8 or float\n Array (image) to save. Arrays of data-type uint8 should have\n values in [0, 255], whereas floating-point arrays must be\n in [0, 1].\n format_str: str\n Format to save as, this is defaulted to PNG if using a file-like\n object; this will be derived from the extension if fname is a string\n kwargs: dict\n Keyword arguments to the Pillow save function (or tifffile save\n function, for Tiff files). These are format dependent. For example,\n Pillow's JPEG save function supports an integer ``quality`` argument\n with values in [1, 95], while TIFFFile supports a ``compress``\n integer argument with values in [0, 9].\n\n Notes\n -----\n Tiff files are handled by Christophe Golhke's tifffile.py [1]_,\n and support many advanced image types including multi-page and\n floating point.\n\n All other image formats use the Python Imaging Libary.\n See PIL docs [2]_ for a list of other supported formats.\n All images besides single channel PNGs are converted using `img_as_uint8`.\n Single Channel PNGs have the following behavior:\n - Integer values in [0, 255] and Boolean types -> img_as_uint8\n - Floating point and other integers -> img_as_uint16\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n # default to PNG if file-like object\n if not isinstance(fname, string_types) and format_str is None:\n format_str = \"PNG\"\n # Check for png in filename\n if (isinstance(fname, string_types)\n and fname.lower().endswith(\".png\")):\n format_str = \"PNG\"\n\n arr = np.asanyarray(arr).squeeze()\n\n if arr.dtype.kind == 'b':\n arr = arr.astype(np.uint8)\n\n use_tif = False\n if hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n use_tif = True\n if not format_str is None:\n if format_str.lower() in ['tiff', 'tif']:\n use_tif = True\n\n if use_tif:\n tif_imsave(fname, arr, **kwargs)\n return\n\n if arr.ndim not in (2, 3):\n raise ValueError(\"Invalid shape for image array: %s\" % arr.shape)\n\n if arr.ndim == 3:\n if arr.shape[2] not in (3, 4):\n raise ValueError(\"Invalid number of channels in image array.\")\n\n img = ndarray_to_pil(arr, format_str=format_str)\n img.save(fname, format=format_str, **kwargs)\n",
"import ctypes\nimport numpy\nimport sys\nimport os\nimport os.path\nfrom numpy.compat import asbytes, asstr\n\n\ndef _generate_candidate_libs():\n # look for likely library files in the following dirs:\n lib_dirs = [os.path.dirname(__file__),\n '/lib',\n '/usr/lib',\n '/usr/local/lib',\n '/opt/local/lib',\n os.path.join(sys.prefix, 'lib'),\n os.path.join(sys.prefix, 'DLLs')\n ]\n if 'HOME' in os.environ:\n lib_dirs.append(os.path.join(os.environ['HOME'], 'lib'))\n lib_dirs = [ld for ld in lib_dirs if os.path.exists(ld)]\n\n lib_names = ['libfreeimage', 'freeimage'] # should be lower-case!\n # Now attempt to find libraries of that name in the given directory\n # (case-insensitive and without regard for extension)\n lib_paths = []\n for lib_dir in lib_dirs:\n for lib_name in lib_names:\n files = os.listdir(lib_dir)\n lib_paths += [os.path.join(lib_dir, lib) for lib in files\n if lib.lower().startswith(lib_name) and not\n os.path.splitext(lib)[1] in ('.py', '.pyc', '.ini')]\n lib_paths = [lp for lp in lib_paths if os.path.exists(lp)]\n\n return lib_dirs, lib_paths\n\nif sys.platform == 'win32':\n LOADER = ctypes.windll\n FUNCTYPE = ctypes.WINFUNCTYPE\nelse:\n LOADER = ctypes.cdll\n FUNCTYPE = ctypes.CFUNCTYPE\n\ndef handle_errors():\n global FT_ERROR_STR\n if FT_ERROR_STR:\n tmp = FT_ERROR_STR\n FT_ERROR_STR = None\n raise RuntimeError(tmp)\n\nFT_ERROR_STR = None\n# This MUST happen in module scope, or the function pointer is garbage\n# collected, leading to a segfault when error_handler is called.\n@FUNCTYPE(None, ctypes.c_int, ctypes.c_char_p)\ndef c_error_handler(fif, message):\n global FT_ERROR_STR\n FT_ERROR_STR = 'FreeImage error: %s' % message\n\ndef load_freeimage():\n freeimage = None\n errors = []\n # First try a few bare library names that ctypes might be able to find\n # in the default locations for each platform. Win DLL names don't need the\n # extension, but other platforms do.\n bare_libs = ['FreeImage', 'libfreeimage.dylib', 'libfreeimage.so',\n 'libfreeimage.so.3']\n lib_dirs, lib_paths = _generate_candidate_libs()\n lib_paths = bare_libs + lib_paths\n for lib in lib_paths:\n try:\n freeimage = LOADER.LoadLibrary(lib)\n break\n except Exception:\n if lib not in bare_libs:\n # Don't record errors when it couldn't load the library from\n # a bare name -- this fails often, and doesn't provide any\n # useful debugging information anyway, beyond \"couldn't find\n # library...\"\n # Get exception instance in Python 2.x/3.x compatible manner\n e_type, e_value, e_tb = sys.exc_info()\n del e_tb\n errors.append((lib, e_value))\n\n if freeimage is None:\n if errors:\n # No freeimage library loaded, and load-errors reported for some\n # candidate libs\n err_txt = ['%s:\\n%s' % (l, str(e)) for l, e in errors]\n raise RuntimeError('One or more FreeImage libraries were found, but '\n 'could not be loaded due to the following errors:\\n'\n '\\n\\n'.join(err_txt))\n else:\n # No errors, because no potential libraries found at all!\n raise RuntimeError('Could not find a FreeImage library in any of:\\n' +\n '\\n'.join(lib_dirs))\n\n # FreeImage found\n freeimage.FreeImage_SetOutputMessage(c_error_handler)\n return freeimage\n\n_FI = load_freeimage()\n\nAPI = {\n # All we're doing here is telling ctypes that some of the FreeImage\n # functions return pointers instead of integers. (On 64-bit systems,\n # without this information the pointers get truncated and crashes result).\n # There's no need to list functions that return ints, or the types of the\n # parameters to these or other functions -- that's fine to do implicitly.\n\n # Note that the ctypes immediately converts the returned void_p back to a\n # python int again! This is really not helpful, because then passing it\n # back to another library call will cause truncation-to-32-bits on 64-bit\n # systems. Thanks, ctypes! So after these calls one must immediately\n # re-wrap the int as a c_void_p if it is to be passed back into FreeImage.\n 'FreeImage_AllocateT': (ctypes.c_void_p, None),\n 'FreeImage_FindFirstMetadata': (ctypes.c_void_p, None),\n 'FreeImage_GetBits': (ctypes.c_void_p, None),\n 'FreeImage_GetPalette': (ctypes.c_void_p, None),\n 'FreeImage_GetTagKey': (ctypes.c_char_p, None),\n 'FreeImage_GetTagValue': (ctypes.c_void_p, None),\n 'FreeImage_Load': (ctypes.c_void_p, None),\n 'FreeImage_LockPage': (ctypes.c_void_p, None),\n 'FreeImage_OpenMultiBitmap': (ctypes.c_void_p, None)\n }\n\n# Albert's ctypes pattern\n\n\ndef register_api(lib, api):\n for f, (restype, argtypes) in api.items():\n func = getattr(lib, f)\n func.restype = restype\n func.argtypes = argtypes\n\nregister_api(_FI, API)\n\n\nclass FI_TYPES(object):\n FIT_UNKNOWN = 0\n FIT_BITMAP = 1\n FIT_UINT16 = 2\n FIT_INT16 = 3\n FIT_UINT32 = 4\n FIT_INT32 = 5\n FIT_FLOAT = 6\n FIT_DOUBLE = 7\n FIT_COMPLEX = 8\n FIT_RGB16 = 9\n FIT_RGBA16 = 10\n FIT_RGBF = 11\n FIT_RGBAF = 12\n\n dtypes = {\n FIT_BITMAP: numpy.uint8,\n FIT_UINT16: numpy.uint16,\n FIT_INT16: numpy.int16,\n FIT_UINT32: numpy.uint32,\n FIT_INT32: numpy.int32,\n FIT_FLOAT: numpy.float32,\n FIT_DOUBLE: numpy.float64,\n FIT_COMPLEX: numpy.complex128,\n FIT_RGB16: numpy.uint16,\n FIT_RGBA16: numpy.uint16,\n FIT_RGBF: numpy.float32,\n FIT_RGBAF: numpy.float32\n }\n\n fi_types = {\n (numpy.dtype('uint8'), 1): FIT_BITMAP,\n (numpy.dtype('uint8'), 3): FIT_BITMAP,\n (numpy.dtype('uint8'), 4): FIT_BITMAP,\n (numpy.dtype('uint16'), 1): FIT_UINT16,\n (numpy.dtype('int16'), 1): FIT_INT16,\n (numpy.dtype('uint32'), 1): FIT_UINT32,\n (numpy.dtype('int32'), 1): FIT_INT32,\n (numpy.dtype('float32'), 1): FIT_FLOAT,\n (numpy.dtype('float64'), 1): FIT_DOUBLE,\n (numpy.dtype('complex128'), 1): FIT_COMPLEX,\n (numpy.dtype('uint16'), 3): FIT_RGB16,\n (numpy.dtype('uint16'), 4): FIT_RGBA16,\n (numpy.dtype('float32'), 3): FIT_RGBF,\n (numpy.dtype('float32'), 4): FIT_RGBAF\n }\n\n extra_dims = {\n FIT_UINT16: [],\n FIT_INT16: [],\n FIT_UINT32: [],\n FIT_INT32: [],\n FIT_FLOAT: [],\n FIT_DOUBLE: [],\n FIT_COMPLEX: [],\n FIT_RGB16: [3],\n FIT_RGBA16: [4],\n FIT_RGBF: [3],\n FIT_RGBAF: [4]\n }\n\n @classmethod\n def get_type_and_shape(cls, bitmap):\n w = _FI.FreeImage_GetWidth(bitmap)\n handle_errors()\n h = _FI.FreeImage_GetHeight(bitmap)\n handle_errors()\n fi_type = _FI.FreeImage_GetImageType(bitmap)\n handle_errors()\n if not fi_type:\n raise ValueError('Unknown image pixel type')\n dtype = cls.dtypes[fi_type]\n if fi_type == cls.FIT_BITMAP:\n bpp = _FI.FreeImage_GetBPP(bitmap)\n handle_errors()\n if bpp == 8:\n extra_dims = []\n elif bpp == 24:\n extra_dims = [3]\n elif bpp == 32:\n extra_dims = [4]\n else:\n raise ValueError('Cannot convert %d BPP bitmap' % bpp)\n else:\n extra_dims = cls.extra_dims[fi_type]\n return numpy.dtype(dtype), extra_dims + [w, h]\n\n\nclass IO_FLAGS(object):\n FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only\n # (not supported by all plugins)\n\n BMP_DEFAULT = 0\n BMP_SAVE_RLE = 1\n CUT_DEFAULT = 0\n DDS_DEFAULT = 0\n EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression\n EXR_FLOAT = 0x0001 # save data as float instead of as half (not recommended)\n EXR_NONE = 0x0002 # save with no compression\n EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines\n EXR_PIZ = 0x0008 # save with piz-based wavelet compression\n EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression\n EXR_B44 = 0x0020 # save with lossy 44% float compression\n # - goes to 22% when combined with EXR_LC\n EXR_LC = 0x0040 # save images with one luminance and two chroma channels,\n # rather than as RGB (lossy compression)\n FAXG3_DEFAULT = 0\n GIF_DEFAULT = 0\n GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed\n # palette entries, if it's 16 or 2 color\n GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp)\n # instead of returning raw frame data when loading\n HDR_DEFAULT = 0\n ICO_DEFAULT = 0\n ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the\n # AND-mask when loading\n IFF_DEFAULT = 0\n J2K_DEFAULT = 0 # save with a 16:1 rate\n JP2_DEFAULT = 0 # save with a 16:1 rate\n JPEG_DEFAULT = 0 # loading (see JPEG_FAST);\n # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420)\n JPEG_FAST = 0x0001 # load the file as fast as possible,\n # sacrificing some quality\n JPEG_ACCURATE = 0x0002 # load the file with the best quality,\n # sacrificing some speed\n JPEG_CMYK = 0x0004 # load separated CMYK \"as is\"\n # (use | to combine with other load flags)\n JPEG_EXIFROTATE = 0x0008 # load and rotate according to\n # Exif 'Orientation' tag if available\n JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1)\n JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1)\n JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1)\n JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1)\n JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1)\n JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG\n # (use | to combine with other save flags)\n JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma\n # subsampling (4:1:1)\n JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma\n # subsampling (4:2:0) - default value\n JPEG_SUBSAMPLING_422 = 0x8000 # save with low 2x1 chroma subsampling (4:2:2)\n JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4)\n JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables\n # (can reduce a few percent of file size)\n JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers\n KOALA_DEFAULT = 0\n LBM_DEFAULT = 0\n MNG_DEFAULT = 0\n PCD_DEFAULT = 0\n PCD_BASE = 1 # load the bitmap sized 768 x 512\n PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256\n PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128\n PCX_DEFAULT = 0\n PFM_DEFAULT = 0\n PICT_DEFAULT = 0\n PNG_DEFAULT = 0\n PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction\n PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag\n # (default value is 6)\n PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression\n # flag (default recommended value)\n PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag\n # (default value is 6)\n PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression\n PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine\n # with other save flags)\n PNM_DEFAULT = 0\n PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6)\n PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3)\n PSD_DEFAULT = 0\n PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB)\n PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB)\n RAS_DEFAULT = 0\n RAW_DEFAULT = 0 # load the file as linear RGB 48-bit\n RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included\n # Exif Data or default to RGB 24-bit\n RAW_DISPLAY = 2 # load the file as RGB 24-bit\n SGI_DEFAULT = 0\n TARGA_DEFAULT = 0\n TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888.\n TARGA_SAVE_RLE = 2 # Save with RLE compression\n TIFF_DEFAULT = 0\n TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK\n # (use | to combine with compression flags)\n TIFF_PACKBITS = 0x0100 # save using PACKBITS compression\n TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression\n TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression\n TIFF_NONE = 0x0800 # save without any compression\n TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding\n TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding\n TIFF_LZW = 0x4000 # save using LZW compression\n TIFF_JPEG = 0x8000 # save using JPEG compression\n TIFF_LOGLUV = 0x10000 # save using LogLuv compression\n WBMP_DEFAULT = 0\n XBM_DEFAULT = 0\n XPM_DEFAULT = 0\n\n\nclass METADATA_MODELS(object):\n FIMD_COMMENTS = 0\n FIMD_EXIF_MAIN = 1\n FIMD_EXIF_EXIF = 2\n FIMD_EXIF_GPS = 3\n FIMD_EXIF_MAKERNOTE = 4\n FIMD_EXIF_INTEROP = 5\n FIMD_IPTC = 6\n FIMD_XMP = 7\n FIMD_GEOTIFF = 8\n FIMD_ANIMATION = 9\n\n\nclass METADATA_DATATYPE(object):\n FIDT_BYTE = 1 # 8-bit unsigned integer\n FIDT_ASCII = 2 # 8-bit bytes w/ last byte null\n FIDT_SHORT = 3 # 16-bit unsigned integer\n FIDT_LONG = 4 # 32-bit unsigned integer\n FIDT_RATIONAL = 5 # 64-bit unsigned fraction\n FIDT_SBYTE = 6 # 8-bit signed integer\n FIDT_UNDEFINED = 7 # 8-bit untyped data\n FIDT_SSHORT = 8 # 16-bit signed integer\n FIDT_SLONG = 9 # 32-bit signed integer\n FIDT_SRATIONAL = 10 # 64-bit signed fraction\n FIDT_FLOAT = 11 # 32-bit IEEE floating point\n FIDT_DOUBLE = 12 # 64-bit IEEE floating point\n FIDT_IFD = 13 # 32-bit unsigned integer (offset)\n FIDT_PALETTE = 14 # 32-bit RGBQUAD\n FIDT_LONG8 = 16 # 64-bit unsigned integer\n FIDT_SLONG8 = 17 # 64-bit signed integer\n FIDT_IFD8 = 18 # 64-bit unsigned integer (offset)\n\n dtypes = {\n FIDT_BYTE: numpy.uint8,\n FIDT_SHORT: numpy.uint16,\n FIDT_LONG: numpy.uint32,\n FIDT_RATIONAL: [('numerator', numpy.uint32),\n ('denominator', numpy.uint32)],\n FIDT_SBYTE: numpy.int8,\n FIDT_UNDEFINED: numpy.uint8,\n FIDT_SSHORT: numpy.int16,\n FIDT_SLONG: numpy.int32,\n FIDT_SRATIONAL: [('numerator', numpy.int32),\n ('denominator', numpy.int32)],\n FIDT_FLOAT: numpy.float32,\n FIDT_DOUBLE: numpy.float64,\n FIDT_IFD: numpy.uint32,\n FIDT_PALETTE: [('R', numpy.uint8), ('G', numpy.uint8),\n ('B', numpy.uint8), ('A', numpy.uint8)],\n FIDT_LONG8: numpy.uint64,\n FIDT_SLONG8: numpy.int64,\n FIDT_IFD8: numpy.uint64\n }\n\n\ndef _process_bitmap(filename, flags, process_func):\n filename = asbytes(filename)\n ftype = _FI.FreeImage_GetFileType(filename, 0)\n handle_errors()\n if ftype == -1:\n raise ValueError('Cannot determine type of file %s' % filename)\n bitmap = _FI.FreeImage_Load(ftype, filename, flags)\n handle_errors()\n bitmap = ctypes.c_void_p(bitmap)\n if not bitmap:\n raise ValueError('Could not load file %s' % filename)\n try:\n return process_func(bitmap)\n finally:\n _FI.FreeImage_Unload(bitmap)\n handle_errors()\n\n\ndef read(filename, flags=0):\n \"\"\"Read an image to a numpy array of shape (height, width) for\n greyscale images, or shape (height, width, nchannels) for RGB or\n RGBA images.\n The `flags` parameter should be one or more values from the IO_FLAGS\n class defined in this module, or-ed together with | as appropriate.\n (See the source-code comments for more details.)\n \"\"\"\n return _process_bitmap(filename, flags, _array_from_bitmap)\n\n\ndef read_metadata(filename):\n \"\"\"Return a dict containing all image metadata.\n\n Returned dict maps (metadata_model, tag_name) keys to tag values, where\n metadata_model is a string name based on the FreeImage \"metadata models\"\n defined in the class METADATA_MODELS.\n \"\"\"\n flags = IO_FLAGS.FIF_LOAD_NOPIXELS\n return _process_bitmap(filename, flags, _read_metadata)\n\n\ndef _process_multipage(filename, flags, process_func):\n filename = asbytes(filename)\n ftype = _FI.FreeImage_GetFileType(filename, 0)\n handle_errors()\n if ftype == -1:\n raise ValueError('Cannot determine type of file %s' % filename)\n create_new = False\n read_only = True\n keep_cache_in_memory = True\n multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename, create_new,\n read_only, keep_cache_in_memory,\n flags)\n handle_errors()\n multibitmap = ctypes.c_void_p(multibitmap)\n if not multibitmap:\n raise ValueError('Could not open %s as multi-page image.' % filename)\n try:\n pages = _FI.FreeImage_GetPageCount(multibitmap)\n handle_errors()\n out = []\n for i in range(pages):\n bitmap = _FI.FreeImage_LockPage(multibitmap, i)\n handle_errors()\n bitmap = ctypes.c_void_p(bitmap)\n if not bitmap:\n raise ValueError('Could not open %s as a multi-page image.'\n % filename)\n try:\n out.append(process_func(bitmap))\n finally:\n _FI.FreeImage_UnlockPage(multibitmap, bitmap, False)\n handle_errors()\n return out\n finally:\n _FI.FreeImage_CloseMultiBitmap(multibitmap, 0)\n handle_errors()\n\n\ndef read_multipage(filename, flags=0):\n \"\"\"Read a multipage image to a list of numpy arrays, where each\n array is of shape (height, width) for greyscale images, or shape\n (height, width, nchannels) for RGB or RGBA images.\n The `flags` parameter should be one or more values from the IO_FLAGS\n class defined in this module, or-ed together with | as appropriate.\n (See the source-code comments for more details.)\n \"\"\"\n return _process_multipage(filename, flags, _array_from_bitmap)\n\n\ndef read_multipage_metadata(filename):\n \"\"\"Read a multipage image to a list of metadata dicts, one dict for each\n page. The dict format is as in read_metadata().\n \"\"\"\n flags = IO_FLAGS.FIF_LOAD_NOPIXELS\n return _process_multipage(filename, flags, _read_metadata)\n\n\ndef _wrap_bitmap_bits_in_array(bitmap, shape, dtype):\n \"\"\"Return an ndarray view on the data in a FreeImage bitmap. Only\n valid for as long as the bitmap is loaded (if single page) / locked\n in memory (if multipage).\n\n \"\"\"\n pitch = _FI.FreeImage_GetPitch(bitmap)\n handle_errors()\n height = shape[-1]\n byte_size = height * pitch\n itemsize = dtype.itemsize\n\n if len(shape) == 3:\n strides = (itemsize, shape[0] * itemsize, pitch)\n else:\n strides = (itemsize, pitch)\n bits = _FI.FreeImage_GetBits(bitmap)\n handle_errors()\n array = numpy.ndarray(shape, dtype=dtype,\n buffer=(ctypes.c_char * byte_size).from_address(bits),\n strides=strides)\n return array\n\n\ndef _array_from_bitmap(bitmap):\n \"\"\"Convert a FreeImage bitmap pointer to a numpy array.\n\n \"\"\"\n dtype, shape = FI_TYPES.get_type_and_shape(bitmap)\n array = _wrap_bitmap_bits_in_array(bitmap, shape, dtype)\n # swizzle the color components and flip the scanlines to go from\n # FreeImage's BGR[A] and upside-down internal memory format to something\n # more normal\n def n(arr):\n return arr[..., ::-1].T\n if len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and \\\n dtype.type == numpy.uint8:\n b = n(array[0])\n g = n(array[1])\n r = n(array[2])\n if shape[0] == 3:\n handle_errors()\n return numpy.dstack((r, g, b))\n elif shape[0] == 4:\n a = n(array[3])\n return numpy.dstack((r, g, b, a))\n else:\n raise ValueError('Cannot handle images of shape %s' % shape)\n\n # We need to copy because array does *not* own its memory\n # after bitmap is freed.\n return n(array).copy()\n\n\ndef _read_metadata(bitmap):\n metadata = {}\n models = [(name[5:], number) for name, number in\n METADATA_MODELS.__dict__.items() if name.startswith('FIMD_')]\n\n tag = ctypes.c_void_p()\n for model_name, number in models:\n mdhandle = _FI.FreeImage_FindFirstMetadata(number, bitmap,\n ctypes.byref(tag))\n handle_errors()\n mdhandle = ctypes.c_void_p(mdhandle)\n if mdhandle:\n more = True\n while more:\n tag_name = asstr(_FI.FreeImage_GetTagKey(tag))\n tag_type = _FI.FreeImage_GetTagType(tag)\n byte_size = _FI.FreeImage_GetTagLength(tag)\n handle_errors()\n char_ptr = ctypes.c_char * byte_size\n tag_str = char_ptr.from_address(_FI.FreeImage_GetTagValue(tag))\n handle_errors()\n if tag_type == METADATA_DATATYPE.FIDT_ASCII:\n tag_val = asstr(tag_str.value)\n else:\n tag_val = numpy.fromstring(tag_str,\n dtype=METADATA_DATATYPE.dtypes[tag_type])\n if len(tag_val) == 1:\n tag_val = tag_val[0]\n metadata[(model_name, tag_name)] = tag_val\n more = _FI.FreeImage_FindNextMetadata(mdhandle, ctypes.byref(tag))\n handle_errors()\n _FI.FreeImage_FindCloseMetadata(mdhandle)\n handle_errors()\n return metadata\n\n\ndef write(array, filename, flags=0):\n \"\"\"Write a (height, width) or (height, width, nchannels) array to\n a greyscale, RGB, or RGBA image, with file type deduced from the\n filename.\n The `flags` parameter should be one or more values from the IO_FLAGS\n class defined in this module, or-ed together with | as appropriate.\n (See the source-code comments for more details.)\n \"\"\"\n array = numpy.asarray(array)\n filename = asbytes(filename)\n ftype = _FI.FreeImage_GetFIFFromFilename(filename)\n handle_errors()\n if ftype == -1:\n raise ValueError('Cannot determine type for %s' % filename)\n bitmap, fi_type = _array_to_bitmap(array)\n try:\n if fi_type == FI_TYPES.FIT_BITMAP:\n can_write = _FI.FreeImage_FIFSupportsExportBPP(ftype,\n _FI.FreeImage_GetBPP(bitmap))\n handle_errors()\n else:\n can_write = _FI.FreeImage_FIFSupportsExportType(ftype, fi_type)\n handle_errors()\n if not can_write:\n raise TypeError('Cannot save image of this format '\n 'to this file type')\n res = _FI.FreeImage_Save(ftype, bitmap, filename, flags)\n handle_errors()\n if not res:\n raise RuntimeError('Could not save image properly.')\n finally:\n _FI.FreeImage_Unload(bitmap)\n handle_errors()\n\n\ndef write_multipage(arrays, filename, flags=0):\n \"\"\"Write a list of (height, width) or (height, width, nchannels)\n arrays to a multipage greyscale, RGB, or RGBA image, with file type\n deduced from the filename.\n The `flags` parameter should be one or more values from the IO_FLAGS\n class defined in this module, or-ed together with | as appropriate.\n (See the source-code comments for more details.)\n \"\"\"\n filename = asbytes(filename)\n ftype = _FI.FreeImage_GetFIFFromFilename(filename)\n if ftype == -1:\n raise ValueError('Cannot determine type of file %s' % filename)\n create_new = True\n read_only = False\n keep_cache_in_memory = True\n multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename,\n create_new, read_only,\n keep_cache_in_memory, 0)\n multibitmap = ctypes.c_void_p(multibitmap)\n if not multibitmap:\n raise ValueError('Could not open %s for writing multi-page image.' %\n filename)\n try:\n for array in arrays:\n array = numpy.asarray(array)\n bitmap, fi_type = _array_to_bitmap(array)\n _FI.FreeImage_AppendPage(multibitmap, bitmap)\n finally:\n _FI.FreeImage_CloseMultiBitmap(multibitmap, flags)\n\n# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255\n_GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32)\n\n\ndef _array_to_bitmap(array):\n \"\"\"Allocate a FreeImage bitmap and copy a numpy array into it.\n\n \"\"\"\n shape = array.shape\n dtype = array.dtype\n r, c = shape[:2]\n if len(shape) == 2:\n n_channels = 1\n w_shape = (c, r)\n elif len(shape) == 3:\n n_channels = shape[2]\n w_shape = (n_channels, c, r)\n else:\n n_channels = shape[0]\n try:\n fi_type = FI_TYPES.fi_types[(dtype, n_channels)]\n except KeyError:\n raise ValueError('Cannot write arrays of given type and shape.')\n\n itemsize = array.dtype.itemsize\n bpp = 8 * itemsize * n_channels\n bitmap = _FI.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0)\n bitmap = ctypes.c_void_p(bitmap)\n if not bitmap:\n raise RuntimeError('Could not allocate image for storage')\n try:\n def n(arr): # normalise to freeimage's in-memory format\n return arr.T[..., ::-1]\n\n wrapped_array = _wrap_bitmap_bits_in_array(bitmap, w_shape, dtype)\n # swizzle the color components and flip the scanlines to go to\n # FreeImage's BGR[A] and upside-down internal memory format\n if len(shape) == 3 and _FI.FreeImage_IsLittleEndian():\n R = array[:, :, 0]\n G = array[:, :, 1]\n B = array[:, :, 2]\n\n if dtype.type == numpy.uint8:\n wrapped_array[0] = n(B)\n wrapped_array[1] = n(G)\n wrapped_array[2] = n(R)\n elif dtype.type == numpy.uint16:\n wrapped_array[0] = n(R)\n wrapped_array[1] = n(G)\n wrapped_array[2] = n(B)\n\n if shape[2] == 4:\n A = array[:, :, 3]\n wrapped_array[3] = n(A)\n else:\n wrapped_array[:] = n(array)\n if len(shape) == 2 and dtype.type == numpy.uint8:\n palette = _FI.FreeImage_GetPalette(bitmap)\n palette = ctypes.c_void_p(palette)\n if not palette:\n raise RuntimeError('Could not get image palette')\n ctypes.memmove(palette, _GREY_PALETTE.ctypes.data, 1024)\n return bitmap, fi_type\n except:\n _FI.FreeImage_Unload(bitmap)\n raise\n\n\ndef imread(filename):\n \"\"\"\n img = imread(filename)\n\n Reads an image from file `filename`\n\n Parameters\n ----------\n filename : file name\n Returns\n -------\n img : ndarray\n \"\"\"\n img = read(filename)\n return img\n\n\ndef imsave(filename, img):\n '''\n imsave(filename, img)\n\n Save image to disk\n\n Image type is inferred from filename\n\n Parameters\n ----------\n filename : file name\n img : image to be saved as nd array\n '''\n write(img, filename)\n",
"\"\"\"\n====================\nFinding local maxima\n====================\n\nThe ``peak_local_max`` function returns the coordinates of local peaks (maxima)\nin an image. A maximum filter is used for finding local maxima. This operation\ndilates the original image and merges neighboring local maxima closer than the\nsize of the dilation. Locations where the original image is equal to the\ndilated image are returned as local maxima.\n\n\"\"\"\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\nfrom skimage.feature import peak_local_max\nfrom skimage import data, img_as_float\n\nim = img_as_float(data.coins())\n\n# image_max is the dilation of im with a 20*20 structuring element\n# It is used within peak_local_max function\nimage_max = ndimage.maximum_filter(im, size=20, mode='constant')\n\n# Comparison between image_max and im to find the coordinates of local maxima\ncoordinates = peak_local_max(im, min_distance=20)\n\n# display results\nfig, ax = plt.subplots(1, 3, figsize=(8, 3))\nax1, ax2, ax3 = ax.ravel()\nax1.imshow(im, cmap=plt.cm.gray)\nax1.axis('off')\nax1.set_title('Original')\n\nax2.imshow(image_max, cmap=plt.cm.gray)\nax2.axis('off')\nax2.set_title('Maximum filter')\n\nax3.imshow(im, cmap=plt.cm.gray)\nax3.autoscale(False)\nax3.plot(coordinates[:, 1], coordinates[:, 0], 'r.')\nax3.axis('off')\nax3.set_title('Peak local max')\n\nfig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,\n bottom=0.02, left=0.02, right=0.98)\n\nplt.show()\n",
"# coding=utf-8\n\nimport collections as coll\nimport numpy as np\nfrom scipy import ndimage\nimport warnings\n\nfrom ..util import img_as_float, regular_grid\nfrom ..segmentation._slic import (_slic_cython,\n _enforce_label_connectivity_cython)\nfrom ..color import rgb2lab\n\n\ndef slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,\n spacing=None, multichannel=True, convert2lab=None,\n enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3,\n slic_zero=False):\n \"\"\"Segments image using k-means clustering in Color-(x,y,z) space.\n\n Parameters\n ----------\n image : 2D, 3D or 4D ndarray\n Input image, which can be 2D or 3D, and grayscale or multichannel\n (see `multichannel` parameter).\n n_segments : int, optional\n The (approximate) number of labels in the segmented output image.\n compactness : float, optional\n Balances color-space proximity and image-space proximity. Higher\n values give more weight to image-space. As `compactness` tends to\n infinity, superpixel shapes become square/cubic. In SLICO mode, this\n is the initial compactness.\n max_iter : int, optional\n Maximum number of iterations of k-means.\n sigma : float or (3,) array-like of floats, optional\n Width of Gaussian smoothing kernel for pre-processing for each\n dimension of the image. The same sigma is applied to each dimension in\n case of a scalar value. Zero means no smoothing.\n Note, that `sigma` is automatically scaled if it is scalar and a\n manual voxel spacing is provided (see Notes section).\n spacing : (3,) array-like of floats, optional\n The voxel spacing along each image dimension. By default, `slic`\n assumes uniform spacing (same voxel resolution along z, y and x).\n This parameter controls the weights of the distances along z, y,\n and x during k-means clustering.\n multichannel : bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension.\n convert2lab : bool, optional\n Whether the input should be converted to Lab colorspace prior to\n segmentation. The input image *must* be RGB. Highly recommended.\n This option defaults to ``True`` when ``multichannel=True`` *and*\n ``image.shape[-1] == 3``.\n enforce_connectivity: bool, optional (default False)\n Whether the generated segments are connected or not\n min_size_factor: float, optional\n Proportion of the minimum segment size to be removed with respect\n to the supposed segment size ```depth*width*height/n_segments```\n max_size_factor: float, optional\n Proportion of the maximum connected segment size. A value of 3 works\n in most of the cases.\n slic_zero: bool, optional\n Run SLIC-zero, the zero-parameter mode of SLIC. [2]_\n\n Returns\n -------\n labels : 2D or 3D array\n Integer mask indicating segment labels.\n\n Raises\n ------\n ValueError\n If ``convert2lab`` is set to ``True`` but the last array\n dimension is not of length 3.\n\n Notes\n -----\n * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to\n segmentation.\n\n * If `sigma` is scalar and `spacing` is provided, the kernel width is\n divided along each dimension by the spacing. For example, if ``sigma=1``\n and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This\n ensures sensible smoothing for anisotropic images.\n\n * The image is rescaled to be in [0, 1] prior to processing.\n\n * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To\n interpret them as 3D with the last dimension having length 3, use\n `multichannel=False`.\n\n References\n ----------\n .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,\n Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to\n State-of-the-art Superpixel Methods, TPAMI, May 2012.\n .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO\n\n Examples\n --------\n >>> from skimage.segmentation import slic\n >>> from skimage.data import astronaut\n >>> img = astronaut()\n >>> segments = slic(img, n_segments=100, compactness=10)\n\n Increasing the compactness parameter yields more square regions:\n\n >>> segments = slic(img, n_segments=100, compactness=20)\n\n \"\"\"\n if enforce_connectivity is None:\n warnings.warn('Deprecation: enforce_connectivity will default to'\n ' True in future versions.')\n enforce_connectivity = False\n\n image = img_as_float(image)\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n image = image[np.newaxis, ..., np.newaxis]\n is_2d = True\n elif image.ndim == 3 and multichannel:\n # Make 2D multichannel image 3D with depth = 1\n image = image[np.newaxis, ...]\n is_2d = True\n elif image.ndim == 3 and not multichannel:\n # Add channel as single last dimension\n image = image[..., np.newaxis]\n\n if spacing is None:\n spacing = np.ones(3)\n elif isinstance(spacing, (list, tuple)):\n spacing = np.array(spacing, dtype=np.double)\n\n if not isinstance(sigma, coll.Iterable):\n sigma = np.array([sigma, sigma, sigma], dtype=np.double)\n sigma /= spacing.astype(np.double)\n elif isinstance(sigma, (list, tuple)):\n sigma = np.array(sigma, dtype=np.double)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n image = ndimage.gaussian_filter(image, sigma)\n\n if multichannel and (convert2lab or convert2lab is None):\n if image.shape[-1] != 3 and convert2lab:\n raise ValueError(\"Lab colorspace conversion requires a RGB image.\")\n elif image.shape[-1] == 3:\n image = rgb2lab(image)\n\n depth, height, width = image.shape[:3]\n\n # initialize cluster centroids for desired number of segments\n grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step) for s in slices]\n segments_z = grid_z[slices]\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n\n segments_color = np.zeros(segments_z.shape + (image.shape[3],))\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n segments_color],\n axis=-1).reshape(-1, 3 + image.shape[3])\n segments = np.ascontiguousarray(segments)\n\n # we do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n step = float(max((step_z, step_y, step_x)))\n ratio = 1.0 / compactness\n\n image = np.ascontiguousarray(image * ratio)\n\n labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)\n\n if enforce_connectivity:\n segment_size = depth * height * width / n_segments\n min_size = int(min_size_factor * segment_size)\n max_size = int(max_size_factor * segment_size)\n labels = _enforce_label_connectivity_cython(labels,\n n_segments,\n min_size,\n max_size)\n\n if is_2d:\n labels = labels[0]\n\n return labels\n"
] |
[
[
"numpy.asanyarray",
"numpy.array",
"numpy.diff"
],
[
"numpy.asarray",
"numpy.arange",
"numpy.dstack",
"numpy.dtype",
"numpy.compat.asbytes",
"numpy.compat.asstr",
"numpy.fromstring"
],
[
"scipy.ndimage.maximum_filter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"scipy.ndimage.gaussian_filter",
"numpy.ascontiguousarray",
"numpy.ones",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
crcrpar/NeMo
|
[
"8e15ba43ba0a17b456d3bfa09444574ef1faa301"
] |
[
"nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py"
] |
[
"# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom operator import itemgetter\nfrom typing import Any, Dict, Optional\n\nimport torch\nimport torch.nn as nn\nfrom omegaconf.dictconfig import DictConfig\nfrom pytorch_lightning.trainer.trainer import Trainer\n\nfrom nemo.collections.nlp.data.language_modeling.megatron.data_samplers import (\n MegatronPretrainingRandomSampler,\n MegatronPretrainingSampler,\n)\nfrom nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel\nfrom nemo.collections.nlp.modules.common.megatron.clip_grads import clip_grad_norm_fp32\nfrom nemo.collections.nlp.modules.common.megatron.token_level_encoder_decoder import (\n MegatronTokenLevelEncoderDecoderModule,\n)\nfrom nemo.collections.nlp.modules.common.megatron.utils import average_losses_across_data_parallel_group\nfrom nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer\nfrom nemo.utils import AppState, logging\n\ntry:\n from apex.transformer import parallel_state, tensor_parallel\n\n HAVE_APEX = True\nexcept (ImportError, ModuleNotFoundError):\n HAVE_APEX = False\n\n\n__all__ = [\"MegatronLMEncoderDecoderModel\"]\n\n\nclass MegatronLMEncoderDecoderModel(MegatronBaseModel):\n \"\"\"\n Megatron encoder-decoder base class\n \"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: Trainer):\n super().__init__(cfg, trainer=trainer)\n\n # build tokenizer (defaults to nemo supported tokenizers)\n self._build_tokenizer()\n\n # manipulate vocabulary (e.g., pad vocabulary for better efficiency)\n self._build_vocab()\n\n # TODO: create get_encoder_decoder_model()here for different losses (e..g, nll, vae, mim)\n self.enc_dec_model = MegatronTokenLevelEncoderDecoderModule(\n encoder_arch=cfg.encoder_arch,\n decoder_arch=cfg.decoder_arch,\n vocab_size=self.padded_vocab_size,\n hidden_size=cfg.hidden_size,\n max_position_embeddings=cfg.max_position_embeddings,\n num_layers=cfg.num_layers,\n num_attention_heads=cfg.num_attention_heads,\n apply_query_key_layer_scaling=cfg.get('apply_query_key_layer_scaling', True),\n kv_channels=cfg.get('kv_channels', None),\n ffn_hidden_size=cfg.ffn_hidden_size,\n num_tokentypes=0,\n parallel_output=True,\n pre_process=cfg.get('pre_process', True),\n post_process=cfg.get('post_process', True),\n init_method_std=cfg.get('init_method_std', 0.02),\n fp16_cross_entropy=cfg.get('fp16_lm_cross_entropy', False),\n use_cpu_initialization=cfg.get('use_cpu_initialization', False),\n hidden_dropout=cfg.get('hidden_dropout', 0.1),\n precision=cfg.get('precision', 16),\n fp32_residual_connection=cfg.get('fp32_residual_connection', False),\n activations_checkpoint_method=cfg.get('activations_checkpoint_method', None),\n activations_checkpoint_num_layers=cfg.get('activations_checkpoint_num_layers', 1),\n layernorm_epsilon=cfg.get('layernorm_epsilon', 1e-5),\n persist_layer_norm=cfg.get('persist_layer_norm', False),\n bias_gelu_fusion=True,\n onnx_safe=cfg.get('onnx_safe', False),\n )\n\n def _build_tokenizer(self):\n \"\"\"\n Default tokenizer is based on available nemo tokenizers.\n Override this method to use an external tokenizer.\n All tokenizers are expected to provide compatible interface.\n Override default Encoder-decoder tokenizer to use legacy=True for sentencepiece.\n \"\"\"\n self.tokenizer = get_nmt_tokenizer(\n library=self._cfg.tokenizer.library,\n model_name=self._cfg.tokenizer.type,\n tokenizer_model=self.register_artifact(\"tokenizer_model\", self._cfg.tokenizer.model),\n vocab_file=self.register_artifact(\"vocab_file\", self._cfg.tokenizer.vocab_file),\n merges_file=self.register_artifact(\"merges_file\", self._cfg.tokenizer.merge_file),\n legacy=True if self._cfg.tokenizer.library == 'sentencepiece' else False,\n )\n\n def _build_vocab(self):\n \"\"\"\n Manipulate vocabulary (e.g., pad vocabulary for increased performance)/\n \"\"\"\n # TODO: add config to allow to disable it?\n self.padded_vocab_size = self._vocab_size_with_padding(\n orig_vocab_size=self.tokenizer.vocab_size,\n make_vocab_size_divisible_by=self._cfg.get('make_vocab_size_divisible_by', 128),\n tensor_model_parallel_size=self._cfg.get('tensor_model_parallel_size', 1),\n )\n\n def forward(\n self,\n encoder_input_ids,\n decoder_input_ids,\n encoder_attn_mask,\n decoder_attn_mask,\n tokentype_ids=None,\n lm_labels=None,\n enc_hidden_states=None,\n output_enc_hidden_only=False,\n ):\n ret_dict = self.enc_dec_model(\n enc_input_ids=encoder_input_ids,\n dec_input_ids=decoder_input_ids,\n enc_attn_mask=encoder_attn_mask,\n dec_attn_mask=decoder_attn_mask,\n tokentype_ids=tokentype_ids,\n labels=lm_labels,\n enc_hidden_states=enc_hidden_states,\n output_enc_hidden_only=output_enc_hidden_only,\n )\n\n return ret_dict\n\n def training_step(self, batch, batch_idx):\n tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask = self.process_batch(batch)\n\n tokens_loss = itemgetter(\"tokens_loss\")(\n self(tokens_enc, tokens_dec, enc_mask, dec_mask, tokentype_ids=None, lm_labels=labels,)\n )\n\n loss = self.loss_func(loss_mask, tokens_loss)\n self.log('train_loss', loss)\n # Reduced loss for logging. This averages the loss across all workers unlike \"loss\" above which is specific to a DDP rank.\n reduced_loss = average_losses_across_data_parallel_group([loss])\n # cache reduced loss while accumulating gradients\n self._reduced_loss_buffer.append(reduced_loss[0])\n\n if (batch_idx + 1) % self.trainer.accumulate_grad_batches == 0:\n # Reduced loss for logging.\n average_reduced_loss = sum(self._reduced_loss_buffer) / len(self._reduced_loss_buffer)\n self.log('reduced_train_loss', average_reduced_loss, prog_bar=True)\n lr = self._optimizer.param_groups[0]['lr']\n self.log('lr', lr)\n self.log('global_step', self.trainer.global_step, prog_bar=True)\n self.log('consumed_samples', self.compute_consumed_samples(self.trainer.global_step), prog_bar=True)\n self._reduced_loss_buffer = []\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask = self.process_batch(batch)\n\n tokens_loss = itemgetter(\"tokens_loss\")(\n self(tokens_enc, tokens_dec, enc_mask, dec_mask, tokentype_ids=None, lm_labels=labels,)\n )\n loss = self.loss_func(loss_mask, tokens_loss)\n reduced_loss = average_losses_across_data_parallel_group([loss])\n return reduced_loss\n\n def validation_epoch_end(self, outputs):\n averaged_loss = average_losses_across_data_parallel_group(outputs)\n self.log('val_loss', averaged_loss[0], prog_bar=True)\n self.log('consumed_samples', self.compute_consumed_samples(self.trainer.global_step))\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def test_epoch_end(self, outputs):\n averaged_loss = average_losses_across_data_parallel_group(outputs)\n logging.info(f'test_loss: {averaged_loss[0]}')\n\n def loss_func(self, loss_mask, tokens_loss):\n \"\"\"\n This function takes as input per-token loss and masks non-required values.\n \"\"\"\n losses = tokens_loss.view(-1).float()\n loss_mask = loss_mask.view(-1).float()\n # TODO: add nemo version here\n loss = torch.sum(losses * loss_mask) / loss_mask.sum() # sequence level nll\n return loss\n\n def process_batch(self, batch):\n \"\"\"Build the batch.\"\"\"\n\n keys = ['text_enc', 'text_dec', 'labels', 'loss_mask', 'enc_mask', 'dec_mask']\n datatype = torch.int64\n\n data = batch\n data_b = tensor_parallel.broadcast_data(keys, data, datatype)\n\n # Unpack.\n tokens_enc = data_b['text_enc'].long()\n tokens_dec = data_b['text_dec'].long()\n labels = data_b['labels'].long()\n loss_mask = data_b['loss_mask'].float()\n\n enc_mask = data_b['enc_mask']\n dec_mask = data_b['dec_mask']\n\n return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask\n\n def build_train_valid_test_datasets(self):\n raise NotImplementedError(\"Please implement this method in child-class\")\n\n def build_pretraining_data_loader(self, dataset, consumed_samples):\n \"\"\"Buld dataloader given an input dataset.\"\"\"\n\n if dataset is None:\n return None\n\n # Megatron sampler\n if self._cfg.data.dataloader_type == 'single':\n batch_sampler = MegatronPretrainingSampler(\n total_samples=len(dataset),\n consumed_samples=consumed_samples,\n micro_batch_size=self._cfg.micro_batch_size,\n data_parallel_rank=parallel_state.get_data_parallel_rank(),\n data_parallel_size=parallel_state.get_data_parallel_world_size(),\n )\n elif self._cfg.data.dataloader_type == 'cyclic':\n batch_sampler = MegatronPretrainingRandomSampler(\n total_samples=len(dataset),\n consumed_samples=consumed_samples,\n micro_batch_size=self._cfg.micro_batch_size,\n data_parallel_rank=parallel_state.get_data_parallel_rank(),\n data_parallel_size=parallel_state.get_data_parallel_world_size(),\n )\n else:\n raise Exception('{} dataloader type is not supported.'.format(self._cfg.dataloader_type))\n\n # Torch dataloader.\n return torch.utils.data.DataLoader(\n dataset, batch_sampler=batch_sampler, num_workers=self._cfg.data.num_workers, pin_memory=True,\n )\n\n def setup(self, stage=None):\n \"\"\"A PTL method to setup the training, validation and test datasets.\"\"\"\n if stage == 'predict':\n return\n if self._train_dl is not None and self._validation_dl is not None:\n return\n self.build_train_valid_test_datasets()\n self.setup_training_data(self._cfg.data)\n self.setup_validation_data(self._cfg.data)\n self.setup_test_data(self._cfg.data)\n\n def setup_training_data(self, cfg):\n if hasattr(self, '_train_ds'):\n resume_checkpoint_path = self.trainer.checkpoint_connector.resume_checkpoint_path\n if resume_checkpoint_path:\n consumed_samples = int(\n float(re.findall(r\"consumed_samples\\=([0-9]+.[0-9]+)\", resume_checkpoint_path)[0])\n )\n else:\n consumed_samples = 0\n self._train_dl = self.build_pretraining_data_loader(self._train_ds, consumed_samples)\n\n def setup_validation_data(self, cfg):\n if hasattr(self, '_validation_ds'):\n consumed_samples = 0\n self._validation_dl = self.build_pretraining_data_loader(self._validation_ds, consumed_samples)\n\n def setup_test_data(self, cfg):\n if hasattr(self, '_test_ds'):\n consumed_samples = 0\n self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples)\n\n def compute_consumed_samples(self, global_step):\n app_state = AppState()\n consumed_samples = (\n global_step\n * app_state.data_parallel_size\n * self._cfg.micro_batch_size\n * self.trainer.accumulate_grad_batches\n )\n return int(consumed_samples)\n\n def configure_gradient_clipping(self, *args, **kwargs):\n \"\"\"PTL hook to configure gradients.\n We use gradient clipping implementation from megatron-lm.\n \"\"\"\n clip_val = self.trainer.gradient_clip_val\n if clip_val is None:\n return\n\n clip_val = float(clip_val)\n if clip_val <= 0:\n return\n\n parameters = self.enc_dec_model.parameters()\n clip_grad_norm_fp32(parameters=parameters, max_norm=clip_val)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:\n request = batch\n response = self.complete(request)\n logging.info(f\"response: {response}\")\n return response\n\n def decode(self, tokens_enc, enc_mask, num_tokens_to_generate):\n # TODO: move method into a class inside MegatronTokenLevelEncoderDecoderModule (?)\n encoder_hidden_states = itemgetter(\"enc_output\")(\n self(\n encoder_input_ids=tokens_enc,\n decoder_input_ids=None,\n encoder_attn_mask=enc_mask,\n decoder_attn_mask=None,\n tokentype_ids=None,\n lm_labels=None,\n enc_hidden_states=None,\n output_enc_hidden_only=True,\n )\n )\n predicted_tokens_dec = torch.LongTensor([self.tokenizer.bos_id]).unsqueeze(0).to(tokens_enc.device)\n\n for _ in range(num_tokens_to_generate):\n dec_mask = predicted_tokens_dec != self.tokenizer.pad_id\n token_logits = itemgetter(\"token_logits\")(\n self(\n encoder_input_ids=tokens_enc,\n decoder_input_ids=predicted_tokens_dec,\n encoder_attn_mask=enc_mask,\n decoder_attn_mask=dec_mask,\n tokentype_ids=None,\n lm_labels=None,\n enc_hidden_states=encoder_hidden_states,\n output_enc_hidden_only=False,\n )\n )\n token_logits = tensor_parallel.gather_from_tensor_model_parallel_region(token_logits)\n # FIXME: already log softmax?\n log_probs, token_ids = torch.max(nn.functional.log_softmax(token_logits, dim=-1), dim=-1)\n predicted_tokens_dec = torch.cat([predicted_tokens_dec, token_ids[:, -1].unsqueeze(1)], 1)\n if token_ids[:, -1] == self.tokenizer.eos_id:\n break\n\n return predicted_tokens_dec, log_probs\n\n def complete(self, request: Dict):\n \"\"\"\n Autoregressively invokes language model in the inference mode\n Args:\n request: Dictionary with the following fields\n * prompt: a string which text the model should complete.\n * tokens_to_generate: how many tokens to generate while doing prompt completion.\n Returns:\n response: A python dictionary with the following fields\n * prompt: original text of the prompt\n * tokenized_prompt: list of (str) tokens from prompt\n * completion: a python dictionary with the following subfields:\n * tokens: a list of triples (token, token_id, log_prob) comprising completion\n * text: completion text (as a single string)\n\n \"\"\"\n response = {}\n self.freeze()\n # naive greedy slow loop\n # TODO: add option for BeamSearchDecoder\n\n response['prompt'] = request['prompt'][0]\n response['completion'] = {}\n tokens_enc = request['masked_sample']\n\n response['masked_input'] = ' '.join(self.tokenizer.ids_to_tokens(tokens_enc[0]))\n enc_mask = tokens_enc != self.tokenizer.pad_id\n enc_mask = enc_mask < 0.5\n\n predicted_tokens_ids, log_probs = self.decode(tokens_enc, enc_mask, int(request['tokens_to_generate']))\n predicted_tokens_ids = predicted_tokens_ids.cpu().numpy()[0].tolist()\n log_probs = log_probs.cpu().numpy()[0].tolist()\n if self.tokenizer.eos_id in predicted_tokens_ids:\n idx = predicted_tokens_ids.index(self.tokenizer.eos_id)\n predicted_tokens_ids = predicted_tokens_ids[:idx]\n else:\n predicted_tokens_ids = [id for id in predicted_tokens_ids if id != self.tokenizer.pad_id]\n predicted_tokens_dec = self.tokenizer.ids_to_tokens(predicted_tokens_ids)\n response['completion']['text'] = self.tokenizer.tokens_to_text(predicted_tokens_dec)\n response['completion']['tokens'] = list(zip(predicted_tokens_ids, predicted_tokens_dec, log_probs))\n self.unfreeze()\n return response\n\n def _vocab_size_with_padding(self, orig_vocab_size, make_vocab_size_divisible_by, tensor_model_parallel_size):\n \"\"\"Pad vocab size so it is divisible by model parallel size and\n still having GPU friendly size.\"\"\"\n\n after = orig_vocab_size\n multiple = make_vocab_size_divisible_by * tensor_model_parallel_size\n while (after % multiple) != 0:\n after += 1\n logging.info(\n f'Padded vocab_size: {after}, original vocab_size: {orig_vocab_size}, dummy tokens: {after - orig_vocab_size}.'\n )\n return after\n\n def _enable_nvidia_optimizations(self):\n \"These optimizations are present in NVIDIA NGC PyTorch Containers\"\n\n # Version check\n nvidia_torch_version = os.getenv('NVIDIA_PYTORCH_VERSION', None)\n if nvidia_torch_version is not None:\n NVIDIA_TORCH_MAJOR = int(nvidia_torch_version.split('.')[0])\n NVIDIA_TORCH_MINOR = int(nvidia_torch_version.split('.')[1])\n\n # Apex Persistent layer norm is supported from Nvidia PyTorch container v21.11\n if NVIDIA_TORCH_MAJOR < 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR < 11):\n self._cfg.persist_layer_norm = False\n\n if NVIDIA_TORCH_MAJOR >= 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR >= 11):\n # NVFUSER\n torch._C._jit_set_profiling_executor(True)\n torch._C._jit_set_profiling_mode(True)\n torch._C._jit_override_can_fuse_on_cpu(False)\n torch._C._jit_override_can_fuse_on_gpu(False)\n torch._C._jit_set_texpr_fuser_enabled(False)\n torch._C._jit_set_nvfuser_enabled(True)\n torch._C._debug_set_autodiff_subgraph_inlining(False)\n\n else:\n # Not a Nvidia container. Dependency check is on users\n pass\n\n def list_available_models(self):\n pass\n"
] |
[
[
"torch.LongTensor",
"torch._C._jit_set_profiling_executor",
"torch.nn.functional.log_softmax",
"torch._C._jit_set_profiling_mode",
"torch.sum",
"torch.utils.data.DataLoader",
"torch._C._debug_set_autodiff_subgraph_inlining",
"torch._C._jit_set_texpr_fuser_enabled",
"torch._C._jit_override_can_fuse_on_gpu",
"torch._C._jit_override_can_fuse_on_cpu",
"torch._C._jit_set_nvfuser_enabled"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PatelManush/bitsInAction-repo
|
[
"a780b2f4da205d006b74948935be48a0ab3d8f9d"
] |
[
"MachineLearning/PerceptronExample/Perceptron.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[18]:\n\n\nimport numpy as np\n\nclass Perceptron(object) :\n def __init__(self, eta=0.01, n_iter=50,random_state=1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n \n def fit(self,x,y):\n rgen =np.random.RandomState(self.random_state)\n self.w_=rgen.normal(loc=0.0,scale=0.1,size=1 + x.shape[1])\n self.errors_=[]\n \n for _ in range(self.n_iter):\n errors = 0\n for xi, target in zip(x,y):\n update = self.eta * (target - self.predict(xi))\n self.w_[1:] += update * xi\n self.w_[0]+=update\n errors += int(update != 0.0)\n self.errors_.append(errors)\n return self\n \n def net_input(self,x):\n return np.dot(x,self.w_[1:]) + self.w_[0]\n \n def predict(self,x):\n return np.where(self.net_input(x) >= 0.0,1,-1)\n \n \n\n\n# In[6]:\n\n\nimport pandas as pd\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)\ndf.tail()\n\n\n# In[32]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ny=df.iloc[0:100,4].values\ny=np.where(y=='Iris-setosa',-1,1)\n\nX= df.iloc[0:100,[0,2]].values\n\nplt.scatter(X[:50,0],X[:50,1],color='red',marker='o',label='setosa')\n\nplt.scatter(X[50:100,0],X[50:100,1],color='blue',marker='x',label='versicolor')\n\nplt.xlabel('sepal lenght [cm]')\nplt.ylabel('petal lenght [cm]')\nplt.legend(loc='upper left')\nplt.show()\n\n\n# In[33]:\n\n\nppn=Perceptron(eta=0.1,n_iter=10)\nppn.fit(X,y)\nplt.plot(range(1,len(ppn.errors_)+1),ppn.errors_,marker='o')\nplt.xlabel('Epochs')\nplt.ylabel('Number of updates')\nplt.show()\n\n\n# In[36]:\n\n\nfrom matplotlib.colors import ListedColormap\n\ndef plot_decision_regions(x,y,classifier,resolution=0.02):\n markers = ('s','x','o','^','v')\n colors = ('red','blue','lightgreen','gray','cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n \n x1_min,x1_max = X[:,0].min() - 1,X[:,0].max()+1\n x2_min,x2_max = X[:,1].min() - 1,X[:,1].max()+1\n \n xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),np.arange(x2_min,x2_max,resolution))\n \n z=classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)\n z=z.reshape(xx1.shape)\n plt.contourf(xx1,xx2,z,alpha=0.3,cmap=cmap)\n plt.xlim(xx1.min(),xx1.max())\n plt.ylim(xx2.min(),xx2.max())\n \n for idx,cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl,0],y=X[y==cl,1],alpha=0.8,c=colors[idx],marker=markers[idx],label=cl,edgecolor='black')\n\n\n# In[37]:\n\n\nplot_decision_regions(x,y,classifier=ppn)\nplt.xlabel('sepal lenght [cm]')\nplt.ylabel('Petal lenght [cm]')\nplt.legend(loc='upper left')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.dot",
"pandas.read_csv",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.scatter",
"numpy.unique",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.where",
"numpy.random.RandomState",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hyqskevin/RL_notes
|
[
"655e7af09267d69ed68bcfa5af9aaee945dbe14c"
] |
[
"policy_network/util.py"
] |
[
"# -*- coding: utf-8 -*-\n# @Time : 2021/8/2 5:12 PM\n# @Author : kevin_w\n# @Site : \n# @File : util.py\n# @Comment :\n\nimport numpy as np\nimport gym\nimport argparse\nimport torch\nimport torchvision.transforms as T\nfrom PIL import Image\n\n# define parameter\nparser = argparse.ArgumentParser(description=\"RL REINFORCE example\")\nparser.add_argument('--seed', type=int, default=1, metavar='seed')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='gamma')\nparser.add_argument('--batch_size', type=int, default=128, metavar='batch')\nparser.add_argument('--episode', type=int, default=10000, metavar='episode')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='interval between training status logs (default: 10)')\n\nargs = parser.parse_args()\n\nresize = T.Compose([\n T.ToPILImage(),\n T.Resize(40, interpolation=Image.CUBIC),\n T.ToTensor()\n])\n\nenv = gym.make('CartPole-v0').unwrapped\nenv.reset()\nenv.seed(args.seed)\ntorch.manual_seed(args.seed)\n\n\ndef get_cart_location(screen_width):\n # x_threshold: max coordinate of angle\n width = env.x_threshold * 2\n scale = screen_width / width\n return int(env.state[0] * scale + screen_width / 2.0)\n\n\ndef get_screen():\n # screen 800x1200x3 -> 3x800x1200 Color x Height x Width\n screen = env.render(mode='rgb_array').transpose((2, 0, 1))\n _, screen_height, screen_width = screen.shape\n # clip height [0.4, 0.6]\n screen = screen[:, int(screen_height * 0.4): int(screen_height * 0.8)]\n # clip width\n view_width = int(screen_width * 0.6)\n cart_location = get_cart_location(screen_width)\n if cart_location < view_width // 2:\n slice_range = slice(view_width)\n elif cart_location > (screen_width - view_width // 2):\n slice_range = slice(-view_width, None)\n else:\n slice_range = slice(cart_location - view_width // 2,\n cart_location + view_width // 2)\n screen = screen[:, :, slice_range]\n screen = np.ascontiguousarray(screen, dtype=np.float32) / 255\n screen = resize(torch.from_numpy(screen)).unsqueeze(0)\n\n return screen\n\n\ndef reward_func(x, theta):\n # calculate Angle at which will fail the episode\n r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.5\n r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5\n reward = 0.2 * r1 + 0.8 * r2\n return reward\n\n"
] |
[
[
"numpy.ascontiguousarray",
"torch.manual_seed",
"torch.from_numpy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nishprabhu/image_captioning
|
[
"4bf84c9f37b2d35a071cb28b62ad08081ebe0a29",
"4bf84c9f37b2d35a071cb28b62ad08081ebe0a29"
] |
[
"cnn.py",
"seq2seq.py"
] |
[
"\"\"\" The CNN Encoder Model \"\"\"\n\nimport torch.nn as nn\nfrom torchvision.models import resnet18\n\n\nclass CNN(nn.Module):\n \"\"\" The CNN Model \"\"\"\n\n def __init__(self, encoder_output_dim):\n super().__init__()\n self.cnn = resnet18(pretrained=True)\n self.cnn = nn.Sequential(*list(self.cnn.children())[:-1])\n self.intermediate = nn.Linear(512, encoder_output_dim)\n\n def forward(self, image):\n \"\"\" Forward function \"\"\"\n output = self.cnn(image)\n output = self.intermediate(output.squeeze())\n return output\n",
"\"\"\" Encoder and Decoder wrappers written in PyTorch 1.5.0 \"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n \"\"\" Encoder base class \"\"\"\n\n def __init__(self):\n super(Encoder, self).__init__()\n\n def forward(self, *kwargs):\n \"\"\" Forward pass \"\"\"\n raise NotImplementedError\n\n\nclass Decoder(nn.Module):\n \"\"\" Decoder base class \"\"\"\n\n def __init__(self):\n super(Decoder, self).__init__()\n\n def forward(\n self,\n encoder_output,\n decoder_input,\n predict=False,\n max_target_length=30,\n **kwargs\n ):\n \"\"\"\n Forward pass\n\n Arguments:\n ---------\n encoder_output: Tensor of shape (batch_size, e, embedding_size)\n decoder_input: Tensor of shape (batch_size, d)\n Provide a vector of shape (batch_size, 1) consisting of <START> tokens for prediction.\n predict: Boolean tensor. Use teacher forcing if False.\n max_target_length: Integer indicating the number of time-steps to be run during inference.\n \"\"\"\n if not predict:\n outputs = self.forward_step(encoder_output, decoder_input, **kwargs)\n else:\n previous_outputs = [decoder_input[:, 0]]\n current_input = torch.stack(previous_outputs, dim=-1)\n for _ in range(max_target_length):\n current_output = self.forward_step(\n encoder_output, current_input, **kwargs\n )\n indices = torch.argmax(F.softmax(current_output, dim=-1), dim=-1)[\n :, -1:\n ]\n current_input = torch.cat([current_input, indices], dim=-1)\n outputs = current_output\n\n return outputs\n\n def forward_step(self, encoder_output, decoder_input, **kwargs):\n \"\"\"\n A single forward step\n\n Arguments:\n ---------\n encoder_output: Tensor of shape (batch_size, e, embedding_size)\n decoder_input: Tensor of shape (batch_size, d)\n [e and d are encoder and decoder sequence lengths respectively.]\n\n Output:\n ------\n outputs: Tensor of shape (batch_size, d, vocabulary_size)\n \"\"\"\n raise NotImplementedError\n"
] |
[
[
"torch.nn.Linear"
],
[
"torch.stack",
"torch.nn.functional.softmax",
"torch.cat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amtorresn/AnaTorres_Ejercicio27
|
[
"8b018f62b1874a1fb5a11b03f42b40d02f2b4692"
] |
[
"plot.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndata = np.loadtxt(\"datos.dat\")\nx = np.arange(0,101,1)\ny = np.arange(0,101,1)\nfig = plt.figure()\nax = Axes3D(fig)\nax.plot_trisurf(x,y, data)\nplt.savefig(\"fig.png\")"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cflynn3/resqpy
|
[
"d1d34972ee95526265bb7bde96ca232f98f69c77",
"d1d34972ee95526265bb7bde96ca232f98f69c77",
"d1d34972ee95526265bb7bde96ca232f98f69c77",
"d1d34972ee95526265bb7bde96ca232f98f69c77"
] |
[
"resqpy/well.py",
"resqpy/rq_import.py",
"tests/test_surface.py",
"resqpy/olio/vector_utilities.py"
] |
[
"\"\"\"well.py: resqpy well module providing trajectory, deviation survey, blocked well, wellbore frame and marker frame and md datum classes.\n\nExample::\n\n # Wellbore interpretations\n for well in model.iter_wellbore_interpretations():\n print(well.title)\n\n for trajectory in well.iter_trajectories():\n print(trajectory.title)\n\n for frame in trajectory.iter_wellbore_frames():\n print(frame.title)\n\n # Measured depths\n mds = frame.node_mds\n\n # Logs\n log_collection = frame.logs\n for log in log_collection:\n values = log.values()\n\n\"\"\"\n\n# todo: create a trajectory from a deviation survey, assuming minimum curvature\n\nversion = '20th October 2021'\n\n# Nexus is a registered trademark of the Halliburton Company\n# RMS and ROXAR are registered trademarks of Roxar Software Solutions AS, an Emerson company\n\nimport logging\n\nlog = logging.getLogger(__name__)\nlog.debug('well.py version ' + version)\n\nimport math as maths\nimport os\nimport warnings\n\nimport lasio\nimport numpy as np\nimport pandas as pd\n\nimport resqpy.crs as crs\nimport resqpy.lines as rql\nimport resqpy.olio.grid_functions as gf\nimport resqpy.olio.intersection as intersect\nimport resqpy.olio.keyword_files as kf\nimport resqpy.olio.uuid as bu\nimport resqpy.olio.vector_utilities as vec\nimport resqpy.olio.wellspec_keywords as wsk\nimport resqpy.olio.write_hdf5 as rwh5\nimport resqpy.olio.xml_et as rqet\nimport resqpy.organize as rqo\nimport resqpy.property as rqp\nimport resqpy.weights_and_measures as bwam\nfrom resqpy.olio.base import BaseResqpy\nfrom resqpy.olio.xml_namespaces import curly_namespace as ns\n\nvalid_md_reference_list = [\n \"ground level\", \"kelly bushing\", \"mean sea level\", \"derrick floor\", \"casing flange\", \"arbitrary point\",\n \"crown valve\", \"rotary bushing\", \"rotary table\", \"sea floor\", \"lowest astronomical tide\", \"mean higher high water\",\n \"mean high water\", \"mean lower low water\", \"mean low water\", \"mean tide level\", \"kickoff point\"\n]\n\n# todo: could require/maintain DeviationSurvey mds in same units as md datum object's crs vertical units?\n\n\nclass MdDatum(BaseResqpy):\n \"\"\"Class for RESQML measured depth datum.\"\"\"\n\n resqml_type = 'MdDatum'\n\n def __init__(\n self,\n parent_model,\n uuid = None,\n md_datum_root = None,\n crs_uuid = None,\n crs_root = None, # deprecated\n location = None,\n md_reference = 'mean sea level',\n title = None,\n originator = None,\n extra_metadata = None):\n \"\"\"Initialises a new MdDatum object.\n\n arguments:\n parent_model (model.Model object): the model which the new md datum belongs to\n uuid: If not None, load from existing object. Else, create new.\n md_datum_root (optional): DEPRECATED: the root node of the xml tree representing the md datum;\n if not None, the new md datum object is initialised based on data in the tree;\n if None, the new object is initialised from the remaining arguments\n crs_uuid (uuid.UUID): required if initialising from values\n crs_root: DEPRECATED, use crs_uuid instead; the root node of the coordinate reference system\n xml tree; ignored if uuid or md_datum_root is not None or crs_uuid is not None\n location: (triple float): the x, y, z location of the new measured depth datum;\n ignored if uuid or md_datum_root is not None\n md_reference (string): human readable resqml standard string indicating the real\n world nature of the datum, eg. 'kelly bushing'; the full list of options is\n available as the global variable valid_md_reference_list in this module;\n ignored if uuid or md_datum_root is not None\n title (str, optional): the citation title to use for a new datum;\n ignored if uuid or md_datum_root is not None\n originator (str, optional): the name of the person creating the datum, defaults to login id;\n ignored if uuid or md_datum_root is not None\n extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the datum;\n ignored if uuid or md_datum_root is not None\n\n returns:\n the newly instantiated measured depth datum object\n\n note:\n this function does not create an xml node for the md datum; call the create_xml() method afterwards\n if initialising from data other than an existing RESQML object\n \"\"\"\n\n if crs_root is not None:\n warnings.warn(\"Attribute 'crs_root' is deprecated. Use 'crs_uuid'\", DeprecationWarning)\n # TODO: remove crs_root argument\n\n self.location = location\n self.md_reference = md_reference\n self.crs_uuid = crs_uuid\n\n super().__init__(model = parent_model,\n uuid = uuid,\n title = title,\n originator = originator,\n extra_metadata = extra_metadata,\n root_node = md_datum_root)\n\n # temporary code to sort out crs reference, till crs_root arg is retired\n if self.crs_uuid is None and crs_root is not None:\n self.crs_uuid = rqet.uuid_for_part_root(crs_root)\n\n assert self.crs_uuid is not None\n if self.root is None and (location is not None or md_reference):\n assert location is not None and md_reference\n assert md_reference in valid_md_reference_list\n assert len(location) == 3\n\n def _load_from_xml(self):\n md_datum_root = self.root\n assert md_datum_root is not None\n location_node = rqet.find_tag(md_datum_root, 'Location')\n self.location = (rqet.find_tag_float(location_node,\n 'Coordinate1'), rqet.find_tag_float(location_node, 'Coordinate2'),\n rqet.find_tag_float(location_node, 'Coordinate3'))\n self.md_reference = rqet.node_text(rqet.find_tag(md_datum_root, 'MdReference')).strip().lower()\n assert self.md_reference in valid_md_reference_list\n self.crs_uuid = self.extract_crs_uuid()\n\n @property\n def crs_root(self):\n \"\"\"XML node corresponding to self.crs_uuid.\"\"\"\n\n return self.model.root_for_uuid(self.crs_uuid)\n\n # todo: the following function is almost identical to one in the grid module: it should be made common and put in model.py\n\n def extract_crs_uuid(self):\n \"\"\"Returns uuid for coordinate reference system, as stored in reference node of this md datum's xml tree.\"\"\"\n\n if self.crs_uuid is not None:\n return self.crs_uuid\n crs_root = rqet.find_tag(self.root, 'LocalCrs')\n uuid_str = rqet.find_tag(crs_root, 'UUID').text\n self.crs_uuid = bu.uuid_from_string(uuid_str)\n return self.crs_uuid\n\n def extract_crs_root(self):\n \"\"\"Returns root in parent model xml parts forest of coordinate reference system used by this md datum.\"\"\"\n\n if self.crs_uuid is None:\n self.extract_crs_uuid()\n return self.crs_root\n\n def create_part(self):\n \"\"\"Creates xml for this md datum object and adds to parent model as a part; returns root node for part.\"\"\"\n\n # note: deprecated, call create_xml() directly\n assert self.root is None\n assert self.location is not None\n self.create_xml(add_as_part = True)\n\n def create_xml(self, add_as_part = True, add_relationships = True, title = None, originator = None):\n \"\"\"Creates xml for a measured depth datum element; crs node must already exist; optionally adds as part.\n\n arguments:\n add_as_part (boolean, default True): if True, the newly created xml node is added as a part\n in the model\n add_relationships (boolean, default True): if True, a relationship xml part is created relating the\n new md datum part to the crs\n title (string): used as the citation Title text for the new md datum node\n originator (string, optional): the name of the human being who created the md datum part;\n default is to use the login name\n\n returns:\n the newly created measured depth datum xml node\n \"\"\"\n\n md_reference = self.md_reference.lower()\n assert md_reference in valid_md_reference_list, 'invalid measured depth reference: ' + md_reference\n\n if title:\n self.title = title\n if not self.title:\n self.title = 'measured depth datum'\n\n crs_uuid = self.crs_uuid\n assert crs_uuid is not None\n\n datum = super().create_xml(add_as_part = False, originator = originator)\n\n self.model.create_solitary_point3d('Location', datum, self.location)\n\n md_ref = rqet.SubElement(datum, ns['resqml2'] + 'MdReference')\n md_ref.set(ns['xsi'] + 'type', ns['resqml2'] + 'MdReference')\n md_ref.text = md_reference\n\n self.model.create_crs_reference(crs_uuid = crs_uuid, root = datum)\n\n if add_as_part:\n self.model.add_part('obj_MdDatum', self.uuid, datum)\n if add_relationships:\n self.model.create_reciprocal_relationship(datum, 'destinationObject', self.crs_root, 'sourceObject')\n\n return datum\n\n def is_equivalent(self, other):\n \"\"\"Implements equals operator, comparing metadata items deemed significant.\"\"\"\n\n if not isinstance(other, self.__class__):\n return False\n if self.md_reference != other.md_reference or not np.allclose(self.location, other.location):\n return False\n return bu.matching_uuids(self.crs_uuid, other.crs_uuid)\n\n\nclass DeviationSurvey(BaseResqpy):\n \"\"\"Class for RESQML wellbore deviation survey.\n\n RESQML documentation:\n\n Specifies the station data from a deviation survey.\n\n The deviation survey does not provide a complete specification of the\n geometry of a wellbore trajectory. Although a minimum-curvature\n algorithm is used in most cases, the implementation varies sufficiently\n that no single algorithmic specification is available as a data transfer\n standard.\n\n Instead, the geometry of a RESQML wellbore trajectory is represented by\n a parametric line, parameterized by the MD.\n\n CRS and units of measure do not need to be consistent with the CRS and\n units of measure for wellbore trajectory representation.\n \"\"\"\n\n resqml_type = 'DeviationSurveyRepresentation'\n\n def __init__(self,\n parent_model,\n uuid = None,\n title = None,\n deviation_survey_root = None,\n represented_interp = None,\n md_datum = None,\n md_uom = 'm',\n angle_uom = 'dega',\n measured_depths = None,\n azimuths = None,\n inclinations = None,\n station_count = None,\n first_station = None,\n is_final = False,\n originator = None,\n extra_metadata = None):\n \"\"\"Load or create a DeviationSurvey object.\n\n If uuid is given, loads from XML. Else, create new. If loading from disk, other\n parameters will be overwritten.\n\n Args:\n parent_model (model.Model): the model which the new survey belongs to\n uuid (uuid.UUID): If given, loads from disk. Else, creates new.\n title (str): Citation title\n deviation_survey_root: DEPCRECATED. If given, load from disk.\n represented_interp (wellbore interpretation): if present, is noted as the wellbore\n interpretation object which this deviation survey relates to\n md_datum (MdDatum): the datum that the depths for this survey are measured from\n md_uom (string, default 'm'): a resqml length unit of measure applicable to the\n measured depths; should be 'm' or 'ft'\n angle_uom (string): a resqml angle unit; should be 'dega' or 'rad'\n measured_depths (np.array): 1d array\n azimuths (np.array): 1d array\n inclindations (np.array): 1d array\n station_count (int): length of measured_depths, azimuths & inclinations\n first_station (tuple): (x, y, z) of first point in survey, in crs for md datum\n is_final (bool): whether survey is a finalised deviation survey\n originator (str): name of author\n extra_metadata (dict, optional): extra metadata key, value pairs\n\n Returns:\n DeviationSurvey\n\n Notes:\n this method does not create an xml node, nor write hdf5 arrays\n \"\"\"\n\n self.is_final = is_final\n self.md_uom = bwam.rq_length_unit(md_uom)\n\n self.angles_in_degrees = angle_uom.strip().lower().startswith('deg')\n \"\"\"boolean: True for degrees, False for radians (nothing else supported). Should be 'dega' or 'rad'\"\"\"\n\n # Array data\n self.measured_depths = _as_optional_array(measured_depths)\n self.azimuths = _as_optional_array(azimuths)\n self.inclinations = _as_optional_array(inclinations)\n\n if station_count is None and measured_depths is not None:\n station_count = len(measured_depths)\n self.station_count = station_count\n self.first_station = first_station\n\n # Referenced objects\n self.md_datum = md_datum # md datum is an object in its own right, with a related crs!\n self.wellbore_interpretation = represented_interp\n\n # TODO: remove deviation_survey_root, use just uuid\n\n super().__init__(model = parent_model,\n uuid = uuid,\n title = title,\n originator = originator,\n extra_metadata = extra_metadata,\n root_node = deviation_survey_root)\n\n @classmethod\n def from_data_frame(cls,\n parent_model,\n data_frame,\n md_datum = None,\n md_col = 'MD',\n azimuth_col = 'AZIM_GN',\n inclination_col = 'INCL',\n x_col = 'X',\n y_col = 'Y',\n z_col = 'Z',\n md_uom = 'm',\n angle_uom = 'dega'):\n \"\"\"Load MD, aximuth & inclination data from a pandas data frame.\n\n Args:\n parent_model (model.Model): the parent resqml model\n data_frame: a pandas dataframe holding the deviation survey data\n md_datum (MdDatum object): the datum that the depths for this survey are measured from\n md_col (string, default 'MD'): the name of the column holding measured depth values\n azimuth_col (string, default 'AZIM_GN'): the name of the column holding azimuth values relative\n to the north direction (+ve y axis) of the coordinate reference system\n inclination_col (string, default 'INCL'): the name of the column holding inclination values\n x_col (string, default 'X'): the name of the column holding an x value in the first row\n y_col (string, default 'Y'): the name of the column holding an Y value in the first row\n z_col (string, default 'Z'): the name of the column holding an z value in the first row\n md_uom (string, default 'm'): a resqml length unit of measure applicable to the\n measured depths; should be 'm' or 'ft'\n angle_uom (string, default 'dega'): a resqml angle unit of measure applicable to both\n the azimuth and inclination data\n\n Returns:\n DeviationSurvey\n\n Note:\n The X, Y & Z columns are only used to set the first station location (from the first row)\n \"\"\"\n\n for col in [md_col, azimuth_col, inclination_col, x_col, y_col, z_col]:\n assert col in data_frame.columns\n station_count = len(data_frame)\n assert station_count >= 2 # vertical well could be hamdled by allowing a single station in survey?\n # self.md_uom = bwam.p_length_unit(md_uom)\n\n start = data_frame.iloc[0]\n\n return cls(parent_model = parent_model,\n station_count = station_count,\n md_datum = md_datum,\n md_uom = md_uom,\n angle_uom = angle_uom,\n first_station = (start[x_col], start[y_col], start[z_col]),\n measured_depths = data_frame[md_col].values,\n azimuths = data_frame[azimuth_col].values,\n inclinations = data_frame[inclination_col].values,\n is_final = True) # assume this is a finalised deviation survey\n\n @classmethod\n def from_ascii_file(cls,\n parent_model,\n deviation_survey_file,\n comment_character = '#',\n space_separated_instead_of_csv = False,\n md_col = 'MD',\n azimuth_col = 'AZIM_GN',\n inclination_col = 'INCL',\n x_col = 'X',\n y_col = 'Y',\n z_col = 'Z',\n md_uom = 'm',\n angle_uom = 'dega',\n md_datum = None):\n \"\"\"Load MD, aximuth & inclination data from an ascii deviation survey file.\n\n Arguments:\n parent_model (model.Model): the parent resqml model\n deviation_survey_file (string): the filename of an ascii file holding the deviation survey data\n comment_character (string): the character to be treated as introducing comments\n space_separated_instead_of_csv (boolea, default False): if False, csv format expected;\n if True, columns are expected to be seperated by white space\n md_col (string, default 'MD'): the name of the column holding measured depth values\n azimuth_col (string, default 'AZIM_GN'): the name of the column holding azimuth values relative\n to the north direction (+ve y axis) of the coordinate reference system\n inclination_col (string, default 'INCL'): the name of the column holding inclination values\n x_col (string, default 'X'): the name of the column holding an x value in the first row\n y_col (string, default 'Y'): the name of the column holding an Y value in the first row\n z_col (string, default 'Z'): the name of the column holding an z value in the first row\n md_uom (string, default 'm'): a resqml length unit of measure applicable to the\n measured depths; should be 'm' or 'ft'\n angle_uom (string, default 'dega'): a resqml angle unit of measure applicable to both\n the azimuth and inclination data\n md_datum (MdDatum object): the datum that the depths for this survey are measured from\n\n Returns:\n DeviationSurvey\n\n Note:\n The X, Y & Z columns are only used to set the first station location (from the first row)\n \"\"\"\n\n try:\n df = pd.read_csv(deviation_survey_file,\n comment = comment_character,\n delim_whitespace = space_separated_instead_of_csv)\n if df is None:\n raise Exception\n except Exception:\n log.error('failed to read ascii deviation survey file ' + deviation_survey_file)\n raise\n\n return cls.from_data_frame(parent_model,\n df,\n md_col = md_col,\n azimuth_col = azimuth_col,\n inclination_col = inclination_col,\n x_col = x_col,\n y_col = y_col,\n z_col = z_col,\n md_uom = md_uom,\n angle_uom = angle_uom,\n md_datum = md_datum)\n\n def _load_from_xml(self):\n \"\"\"Load attributes from xml and associated hdf5 data.\n\n This is invoked as part of the init method when an existing uuid is given.\n\n Returns:\n [bool]: True if sucessful\n \"\"\"\n\n # Get node from self.uuid\n node = self.root\n assert node is not None\n\n # Load XML data\n self.md_uom = rqet.length_units_from_node(rqet.find_tag(node, 'MdUom', must_exist = True))\n self.angle_uom = rqet.find_tag_text(node, 'AngleUom', must_exist = True)\n self.station_count = rqet.find_tag_int(node, 'StationCount', must_exist = True)\n self.first_station = extract_xyz(rqet.find_tag(node, 'FirstStationLocation', must_exist = True))\n self.is_final = rqet.find_tag_bool(node, 'IsFinal')\n\n # Load HDF5 data\n mds_node = rqet.find_tag(node, 'Mds', must_exist = True)\n load_hdf5_array(self, mds_node, 'measured_depths')\n azimuths_node = rqet.find_tag(node, 'Azimuths', must_exist = True)\n load_hdf5_array(self, azimuths_node, 'azimuths')\n inclinations_node = rqet.find_tag(node, 'Inclinations', must_exist = True)\n load_hdf5_array(self, inclinations_node, 'inclinations')\n\n # Set related objects\n self.md_datum = self._load_related_datum()\n self.represented_interp = self._load_related_wellbore_interp()\n\n # Validate\n assert self.measured_depths is not None\n assert len(self.measured_depths) > 0\n\n return True\n\n def create_xml(self,\n ext_uuid = None,\n md_datum_root = None,\n md_datum_xyz = None,\n add_as_part = True,\n add_relationships = True,\n title = None,\n originator = None):\n \"\"\"Creates a deviation survey representation xml element from this DeviationSurvey object.\n\n arguments:\n ext_uuid (uuid.UUID): the uuid of the hdf5 external part holding the deviation survey arrays\n md_datum_root: the root xml node for the measured depth datum that the deviation survey depths\n are based on\n md_datum_xyz: TODO: document this\n add_as_part (boolean, default True): if True, the newly created xml node is added as a part\n in the model\n add_relationships (boolean, default True): if True, a relationship xml part is created relating the\n new deviation survey part to the measured depth datum part\n title (string): used as the citation Title text; should usually refer to the well name in a\n human readable way\n originator (string, optional): the name of the human being who created the deviation survey part;\n default is to use the login name\n\n returns:\n the newly created deviation survey xml node\n \"\"\"\n\n assert self.station_count > 0\n\n if ext_uuid is None:\n ext_uuid = self.model.h5_uuid()\n\n if md_datum_root is None:\n if self.md_datum is None:\n if md_datum_xyz is None:\n raise ValueError(\"Must provide a MD Datum for the DeviationSurvey\")\n self.md_datum = MdDatum(self.model, location = md_datum_xyz)\n if self.md_datum.root is None:\n md_datum_root = self.md_datum.create_xml()\n else:\n md_datum_root = self.md_datum.root\n assert md_datum_root is not None\n\n # Create root node, write citation block\n ds_node = super().create_xml(title = title, originator = originator, add_as_part = False)\n\n if_node = rqet.SubElement(ds_node, ns['resqml2'] + 'IsFinal')\n if_node.set(ns['xsi'] + 'type', ns['xsd'] + 'boolean')\n if_node.text = str(self.is_final).lower()\n\n sc_node = rqet.SubElement(ds_node, ns['resqml2'] + 'StationCount')\n sc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'positiveInteger')\n sc_node.text = str(self.station_count)\n\n md_uom = rqet.SubElement(ds_node, ns['resqml2'] + 'MdUom')\n md_uom.set(ns['xsi'] + 'type', ns['eml'] + 'LengthUom')\n md_uom.text = bwam.rq_length_unit(self.md_uom)\n\n self.model.create_md_datum_reference(md_datum_root, root = ds_node)\n\n self.model.create_solitary_point3d('FirstStationLocation', ds_node, self.first_station)\n\n angle_uom = rqet.SubElement(ds_node, ns['resqml2'] + 'AngleUom')\n angle_uom.set(ns['xsi'] + 'type', ns['eml'] + 'PlaneAngleUom')\n if self.angles_in_degrees:\n angle_uom.text = 'dega'\n else:\n angle_uom.text = 'rad'\n\n mds = rqet.SubElement(ds_node, ns['resqml2'] + 'Mds')\n mds.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n mds.text = rqet.null_xml_text\n\n mds_values_node = rqet.SubElement(mds, ns['resqml2'] + 'Values')\n mds_values_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Hdf5Dataset')\n mds_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'Mds', root = mds_values_node)\n\n azimuths = rqet.SubElement(ds_node, ns['resqml2'] + 'Azimuths')\n azimuths.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n azimuths.text = rqet.null_xml_text\n\n azimuths_values_node = rqet.SubElement(azimuths, ns['resqml2'] + 'Values')\n azimuths_values_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Hdf5Dataset')\n azimuths_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'Azimuths', root = azimuths_values_node)\n\n inclinations = rqet.SubElement(ds_node, ns['resqml2'] + 'Inclinations')\n inclinations.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n inclinations.text = rqet.null_xml_text\n\n inclinations_values_node = rqet.SubElement(inclinations, ns['resqml2'] + 'Values')\n inclinations_values_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Hdf5Dataset')\n inclinations_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'Inclinations', root = inclinations_values_node)\n\n interp_root = None\n if self.wellbore_interpretation is not None:\n interp_root = self.wellbore_interpretation.root\n self.model.create_ref_node('RepresentedInterpretation',\n rqet.find_nested_tags_text(interp_root, ['Citation', 'Title']),\n bu.uuid_from_string(interp_root.attrib['uuid']),\n content_type = 'obj_WellboreInterpretation',\n root = ds_node)\n\n if add_as_part:\n self.model.add_part('obj_DeviationSurveyRepresentation', self.uuid, ds_node)\n if add_relationships:\n # todo: check following relationship\n self.model.create_reciprocal_relationship(ds_node, 'destinationObject', md_datum_root, 'sourceObject')\n if interp_root is not None:\n self.model.create_reciprocal_relationship(ds_node, 'destinationObject', interp_root, 'sourceObject')\n ext_part = rqet.part_name_for_object('obj_EpcExternalPartReference', ext_uuid, prefixed = False)\n ext_node = self.model.root_for_part(ext_part)\n self.model.create_reciprocal_relationship(ds_node, 'mlToExternalPartProxy', ext_node,\n 'externalPartProxyToMl')\n\n return ds_node\n\n def write_hdf5(self, file_name = None, mode = 'a'):\n \"\"\"Create or append to an hdf5 file, writing datasets for the measured depths, azimuths, and inclinations.\"\"\"\n\n # NB: array data must all have been set up prior to calling this function\n h5_reg = rwh5.H5Register(self.model)\n h5_reg.register_dataset(self.uuid, 'Mds', self.measured_depths, dtype = float)\n h5_reg.register_dataset(self.uuid, 'Azimuths', self.azimuths, dtype = float)\n h5_reg.register_dataset(self.uuid, 'Inclinations', self.inclinations, dtype = float)\n h5_reg.write(file = file_name, mode = mode)\n\n def _load_related_datum(self):\n \"\"\"Return related MdDatum object from XML if present.\"\"\"\n\n md_datum_uuid = bu.uuid_from_string(rqet.find_tag(rqet.find_tag(self.root, 'MdDatum'), 'UUID'))\n if md_datum_uuid is not None:\n md_datum_part = 'obj_MdDatum_' + str(md_datum_uuid) + '.xml'\n md_datum = MdDatum(self.model, md_datum_root = self.model.root_for_part(md_datum_part, is_rels = False))\n else:\n md_datum = None\n return md_datum\n\n def _load_related_wellbore_interp(self):\n \"\"\"Return related wellbore interp object from XML if present.\"\"\"\n\n interp_uuid = rqet.find_nested_tags_text(self.root, ['RepresentedInterpretation', 'UUID'])\n if interp_uuid is None:\n represented_interp = None\n else:\n represented_interp = rqo.WellboreInterpretation(self.model, uuid = interp_uuid)\n return represented_interp\n\n\nclass Trajectory(BaseResqpy):\n \"\"\"Class for RESQML Wellbore Trajectory Representation (Geometry).\n\n note:\n resqml allows trajectory to have different crs to the measured depth datum crs;\n however, this code requires the trajectory to be in the same crs as the md datum\n \"\"\"\n\n resqml_type = 'WellboreTrajectoryRepresentation'\n well_name = rqo._alias_for_attribute(\"title\")\n\n def __init__(\n self,\n parent_model,\n trajectory_root = None, # deprecated\n uuid = None,\n md_datum = None,\n deviation_survey = None,\n data_frame = None,\n grid = None,\n cell_kji0_list = None,\n wellspec_file = None,\n spline_mode = 'cube',\n deviation_survey_file = None,\n survey_file_space_separated = False,\n length_uom = None,\n md_domain = None,\n represented_interp = None,\n well_name = None,\n set_tangent_vectors = False,\n hdf5_source_model = None,\n originator = None,\n extra_metadata = None):\n \"\"\"Creates a new trajectory object and optionally loads it from xml, deviation survey, pandas dataframe, or\n ascii file.\n\n arguments:\n parent_model (model.Model object): the model which the new trajectory belongs to\n trajectory_root (DEPRECATED): use uuid instead; the root node of an xml tree representing the trajectory;\n if not None, the new trajectory object is initialised based on the data in the tree;\n if None, one of the other arguments is used\n md_datum (MdDatum object): the datum that the depths for this trajectory are measured from;\n not used if uuid or trajectory_root is not None\n deviation_survey (DeviationSurvey object, optional): if present and uuid and trajectory_root are None\n then the trajectory is derived from the deviation survey based on minimum curvature\n data_frame (optional): a pandas dataframe with columns 'MD', 'X', 'Y' and 'Z', holding\n the measured depths, and corresponding node locations; ignored if uuid or trajectory_root is not None\n grid (grid.Grid object, optional): only required if initialising from a list of cell indices;\n ignored otherwise\n cell_kji0_list (numpy int array of shape (N, 3)): ordered list of cell indices to be visited by\n the trajectory; ignored if uuid or trajectory_root is not None\n wellspec_file (string, optional): name of an ascii file containing Nexus WELLSPEC data; well_name\n and length_uom arguments must be passed\n spline_mode (string, default 'cube'): one of 'none', 'linear', 'square', or 'cube'; affects spline\n tangent generation; only relevant if initialising from list of cells\n deviation_survey_file (string): filename of an ascii file holding the trajectory\n in a tabular form; ignored if uuid or trajectory_root is not None\n survey_file_space_separated (boolean, default False): if True, deviation survey file is\n space separated; if False, comma separated (csv); ignored unless loading from survey file\n length_uom (string, default 'm'): a resqml length unit of measure applicable to the\n measured depths; should be 'm' or 'ft'\n md_domain (string, optional): if present, must be 'logger' or 'driller'; the source of the original\n deviation data; ignored if uuid or trajectory_root is not None\n represented_interp (wellbore interpretation object, optional): if present, is noted as the wellbore\n interpretation object which this trajectory relates to; ignored if uuid or trajectory_root is not None\n well_name (string, optional): used as citation title\n set_tangent_vectors (boolean, default False): if True and tangent vectors are not loaded then they will\n be computed from the control points\n hdf5_source_model (model.Model, optional): if present this model is used to determine the hdf5 file\n name from which to load the trajectory's array data; if None, the parent_model is used as usual\n originator (str, optional): the name of the person creating the trajectory, defaults to login id;\n ignored if uuid or trajectory_root is not None\n extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the trajectory;\n ignored if uuid or trajectory_root is not None\n\n returns:\n the newly created wellbore trajectory object\n\n notes:\n if starting from a deviation survey file, there are two routes: create a deviation survey object first,\n using the azimuth and inclination data, then generate a trajectory from that based on minimum curvature;\n or, create a trajectory directly using X, Y, Z data from the deviation survey file (ie. minimum\n curvature or other algorithm already applied externally);\n if not loading from xml, then the crs is set to that used by the measured depth datum, or if that is not\n available then the default crs for the model\n\n :meta common:\n \"\"\"\n\n self.crs_uuid = None\n self.title = well_name\n self.start_md = None\n self.finish_md = None\n self.md_uom = length_uom\n self.md_domain = md_domain\n self.md_datum = md_datum # md datum is an object in its own right, with a related crs!\n # parametric line geometry elements\n self.knot_count = None\n self.line_kind_index = None\n # 0 for vertical\n # 1 for linear spline\n # 2 for natural cubic spline\n # 3 for cubic spline\n # 4 for z linear cubic spline\n # 5 for minimum-curvature spline # in practice this is the value actually used in datasets\n # (-1) for null: no line\n self.measured_depths = None # known as control point parameters in the parametric line geometry\n self.control_points = None # xyz array of shape (knot_count, 3)\n self.tangent_vectors = None # optional xyz tangent vector array, if present has same shape as control points)\n self.deviation_survey = deviation_survey # optional related deviation survey\n self.wellbore_interpretation = represented_interp\n self.wellbore_feature = None\n self.feature_and_interpretation_to_be_written = False\n # todo: parent intersection for multi-lateral wells\n # todo: witsml trajectory reference (optional)\n\n super().__init__(model = parent_model,\n uuid = uuid,\n title = well_name,\n originator = originator,\n extra_metadata = extra_metadata,\n root_node = trajectory_root)\n\n if self.root is not None:\n return\n\n if set_tangent_vectors and self.knot_count > 1 and self.tangent_vectors is None:\n self.set_tangents()\n elif self.deviation_survey is not None:\n self.compute_from_deviation_survey(method = 'minimum curvature', set_tangent_vectors = set_tangent_vectors)\n elif data_frame is not None:\n self.load_from_data_frame(data_frame,\n md_uom = length_uom,\n md_datum = md_datum,\n set_tangent_vectors = set_tangent_vectors)\n elif cell_kji0_list is not None:\n self.load_from_cell_list(grid, cell_kji0_list, spline_mode, length_uom)\n elif wellspec_file:\n self.load_from_wellspec(grid, wellspec_file, well_name, spline_mode, length_uom)\n elif deviation_survey_file:\n self.load_from_ascii_file(deviation_survey_file,\n space_separated_instead_of_csv = survey_file_space_separated,\n md_uom = length_uom,\n md_datum = md_datum,\n title = well_name,\n set_tangent_vectors = set_tangent_vectors)\n # todo: create from already loaded deviation_survey node (ie. derive xyz points)\n\n if self.crs_uuid is None:\n if self.md_datum is not None:\n self.crs_uuid = self.md_datum.crs_uuid\n else:\n self.crs_uuid = self.model.crs_uuid\n\n if not self.title:\n self.title = 'well trajectory'\n\n if self.md_datum is None and self.control_points is not None:\n self.md_datum = MdDatum(self.model, crs_uuid = self.crs_uuid, location = self.control_points[0])\n\n @property\n def crs_root(self):\n \"\"\"XML node corresponding to self.crs_uuid.\"\"\"\n\n return self.model.root_for_uuid(self.crs_uuid)\n\n def iter_wellbore_frames(self):\n \"\"\"Iterable of all WellboreFrames associated with a trajectory.\n\n Yields:\n frame: instance of :class:`resqpy.organize.WellboreFrame`\n\n :meta common:\n \"\"\"\n uuids = self.model.uuids(obj_type = \"WellboreFrameRepresentation\", related_uuid = self.uuid)\n for uuid in uuids:\n yield WellboreFrame(self.model, uuid = uuid)\n\n def _load_from_xml(self):\n \"\"\"Loads the trajectory object from an xml node (and associated hdf5 data).\"\"\"\n\n node = self.root\n assert node is not None\n self.start_md = float(rqet.node_text(rqet.find_tag(node, 'StartMd')).strip())\n self.finish_md = float(rqet.node_text(rqet.find_tag(node, 'FinishMd')).strip())\n self.md_uom = rqet.length_units_from_node(rqet.find_tag(node, 'MdUom'))\n self.md_domain = rqet.node_text(rqet.find_tag(node, 'MdDomain'))\n geometry_node = rqet.find_tag(node, 'Geometry')\n self.crs_uuid = bu.uuid_from_string(rqet.find_nested_tags_text(geometry_node, ['LocalCrs', 'UUID']))\n self.knot_count = int(rqet.node_text(rqet.find_tag(geometry_node, 'KnotCount')).strip())\n self.line_kind_index = int(rqet.node_text(rqet.find_tag(geometry_node, 'LineKindIndex')).strip())\n mds_node = rqet.find_tag(geometry_node, 'ControlPointParameters')\n if mds_node is not None: # not required for vertical or z linear cubic spline\n load_hdf5_array(self, mds_node, 'measured_depths')\n control_points_node = rqet.find_tag(geometry_node, 'ControlPoints')\n load_hdf5_array(self, control_points_node, 'control_points', tag = 'Coordinates')\n tangents_node = rqet.find_tag(geometry_node, 'TangentVectors')\n if tangents_node is not None:\n load_hdf5_array(self, tangents_node, 'tangent_vectors', tag = 'Coordinates')\n relatives_model = self.model # if hdf5_source_model is None else hdf5_source_model\n # md_datum - separate part, referred to in this tree\n md_datum_uuid = bu.uuid_from_string(rqet.find_nested_tags_text(node, ['MdDatum', 'UUID']))\n assert md_datum_uuid is not None, 'failed to fetch uuid of md datum for trajectory'\n md_datum_part = relatives_model.part_for_uuid(md_datum_uuid)\n assert md_datum_part, 'md datum part not found in model'\n self.md_datum = MdDatum(self.model, uuid = relatives_model.uuid_for_part(md_datum_part))\n ds_uuid = bu.uuid_from_string(rqet.find_nested_tags_text(node, ['DeviationSurvey', 'UUID']))\n if ds_uuid is not None: # this will probably not work when relatives model is different from self.model\n ds_part = rqet.part_name_for_object('obj_DeviationSurveyRepresentation_', ds_uuid)\n self.deviation_survey = DeviationSurvey(self.model,\n uuid = relatives_model.uuid_for_part(ds_part, is_rels = False),\n md_datum = self.md_datum)\n interp_uuid = rqet.find_nested_tags_text(node, ['RepresentedInterpretation', 'UUID'])\n if interp_uuid is None:\n self.wellbore_interpretation = None\n else:\n self.wellbore_interpretation = rqo.WellboreInterpretation(self.model, uuid = interp_uuid)\n\n def compute_from_deviation_survey(self,\n survey = None,\n method = 'minimum curvature',\n md_domain = None,\n set_tangent_vectors = True):\n \"\"\"Derive wellbore trajectory from deviation survey azimuth and inclination data.\"\"\"\n\n if survey is None:\n assert self.deviation_survey is not None\n survey = self.deviation_survey\n else:\n self.deviation_survey = survey\n\n assert method in ['minimum curvature'] # if adding other methods, set line_kind_index appropriately\n\n self.knot_count = survey.station_count\n assert self.knot_count >= 2 # vertical well could be hamdled by allowing a single station in survey?\n self.line_kind_index = 5 # minimum curvature spline\n self.measured_depths = survey.measured_depths.copy()\n self.md_uom = survey.md_uom\n if not self.title:\n self.title = rqet.find_nested_tags_text(survey.root_node, ['Citation', 'Title'])\n self.start_md = self.measured_depths[0]\n self.finish_md = self.measured_depths[-1]\n if md_domain is not None:\n self.md_domain = md_domain\n self.control_points = np.empty((self.knot_count, 3))\n self.control_points[0, :] = survey.first_station\n for sp in range(1, self.knot_count):\n i1 = survey.inclinations[sp - 1]\n i2 = survey.inclinations[sp]\n az1 = survey.azimuths[sp - 1]\n az2 = survey.azimuths[sp]\n delta_md = survey.measured_depths[sp] - survey.measured_depths[sp - 1]\n assert delta_md > 0.0\n if i1 == i2 and az1 == az2:\n matrix = vec.rotation_3d_matrix((180.0 - i1, -az1, 0.0)) # TODO: check sign of az1\n delta_v = vec.rotate_vector(matrix, np.array([0.0, delta_md, 0.0]))\n else:\n i1 = maths.radians(i1)\n i2 = maths.radians(i2)\n az1 = maths.radians(az1)\n az2 = maths.radians(az2)\n sin_i1 = maths.sin(i1)\n sin_i2 = maths.sin(i2)\n cos_theta = min(max(maths.cos(i2 - i1) - sin_i1 * sin_i2 * (1.0 - maths.cos(az2 - az1)), -1.0), 1.0)\n theta = maths.acos(cos_theta)\n # theta = maths.acos(sin_i1 * sin_i2 * maths.cos(az2 - az1) + (maths.cos(i1) * maths.cos(i2)))\n assert theta != 0.0 # shouldn't happen as covered by if clause above\n half_rf = maths.tan(0.5 * theta) / theta\n delta_y = delta_md * half_rf * ((sin_i1 * maths.cos(az1)) + (sin_i2 * maths.cos(az2)))\n delta_x = delta_md * half_rf * ((sin_i1 * maths.sin(az1)) + (sin_i2 * maths.sin(az2)))\n delta_z = delta_md * half_rf * (maths.cos(i1) + maths.cos(i2))\n delta_v = np.array((delta_x, delta_y, delta_z))\n self.control_points[sp] = self.control_points[sp - 1] + delta_v\n self.tangent_vectors = None\n if set_tangent_vectors:\n self.set_tangents()\n self.md_datum = survey.md_datum\n\n def load_from_data_frame(\n self,\n data_frame,\n md_col = 'MD',\n x_col = 'X',\n y_col = 'Y',\n z_col = 'Z',\n md_uom = 'm',\n md_domain = None,\n md_datum = None, # MdDatum object\n title = None,\n set_tangent_vectors = True):\n \"\"\"Load MD and control points (xyz) data from a pandas data frame.\"\"\"\n\n try:\n for col in [md_col, x_col, y_col, z_col]:\n assert col in data_frame.columns\n self.knot_count = len(data_frame)\n assert self.knot_count >= 2 # vertical well could be hamdled by allowing a single station in survey?\n self.line_kind_index = 5 # assume minimum curvature spline\n # self.md_uom = bwam.p_length_unit(md_uom)\n self.md_uom = bwam.rq_length_unit(md_uom)\n start = data_frame.iloc[0]\n finish = data_frame.iloc[-1]\n if title:\n self.title = title\n self.start_md = start[md_col]\n self.finish_md = finish[md_col]\n if md_domain is not None:\n self.md_domain = md_domain\n self.measured_depths = np.empty(self.knot_count)\n self.measured_depths[:] = data_frame[md_col]\n self.control_points = np.empty((self.knot_count, 3))\n self.control_points[:, 0] = data_frame[x_col]\n self.control_points[:, 1] = data_frame[y_col]\n self.control_points[:, 2] = data_frame[z_col]\n self.tangent_vectors = None\n if set_tangent_vectors:\n self.set_tangents()\n self.md_datum = md_datum\n except Exception:\n log.exception('failed to load trajectory object from data frame')\n\n def load_from_cell_list(self, grid, cell_kji0_list, spline_mode = 'cube', md_uom = 'm'):\n \"\"\"Loads the trajectory object based on the centre points of a list of cells.\"\"\"\n\n assert grid is not None, 'grid argument missing for trajectory initislisation from cell list'\n cell_kji0_list = np.array(cell_kji0_list, dtype = int)\n assert cell_kji0_list.ndim == 2 and cell_kji0_list.shape[1] == 3\n assert spline_mode in ['none', 'linear', 'square', 'cube']\n\n cell_centres = grid.centre_point_list(cell_kji0_list)\n\n knot_count = len(cell_kji0_list) + 2\n self.line_kind_index = 5 # 5 means minimum curvature spline; todo: set to cubic spline value?\n self.md_uom = bwam.rq_length_unit(md_uom)\n self.start_md = 0.0\n points = np.empty((knot_count, 3))\n points[1:-1] = cell_centres\n points[0] = points[1]\n points[0, 2] = 0.0\n points[-1] = points[-2]\n points[-1, 2] *= 1.05\n if spline_mode == 'none':\n self.knot_count = knot_count\n self.control_points = points\n else:\n self.control_points = rql.spline(points, tangent_weight = spline_mode, min_subdivisions = 3)\n self.knot_count = len(self.control_points)\n self.set_measured_depths()\n\n def load_from_wellspec(self, grid, wellspec_file, well_name, spline_mode = 'cube', md_uom = 'm'):\n\n col_list = ['IW', 'JW', 'L']\n wellspec_dict = wsk.load_wellspecs(wellspec_file, well = well_name, column_list = col_list)\n\n assert len(wellspec_dict) == 1, 'no wellspec data found in file ' + wellspec_file + ' for well ' + well_name\n\n df = wellspec_dict[well_name]\n assert len(df) > 0, 'no rows of perforation data found in wellspec for well ' + well_name\n\n cell_kji0_list = np.empty((len(df), 3), dtype = int)\n cell_kji0_list[:, 0] = df['L']\n cell_kji0_list[:, 1] = df['JW']\n cell_kji0_list[:, 2] = df['IW']\n\n self.load_from_cell_list(grid, cell_kji0_list, spline_mode, md_uom)\n\n def load_from_ascii_file(self,\n trajectory_file,\n comment_character = '#',\n space_separated_instead_of_csv = False,\n md_col = 'MD',\n x_col = 'X',\n y_col = 'Y',\n z_col = 'Z',\n md_uom = 'm',\n md_domain = None,\n md_datum = None,\n well_col = None,\n title = None,\n set_tangent_vectors = True):\n \"\"\"Loads the trajectory object from an ascii file with columns for MD, X, Y & Z (and optionally WELL).\"\"\"\n\n if not title and not self.title:\n self.title = 'well trajectory'\n\n try:\n df = pd.read_csv(trajectory_file,\n comment = comment_character,\n delim_whitespace = space_separated_instead_of_csv)\n if df is None:\n raise Exception\n except Exception:\n log.error('failed to read ascii deviation survey file ' + str(trajectory_file))\n raise\n if well_col and well_col not in df.columns:\n log.warning('well column ' + str(well_col) + ' not found in ascii trajectory file ' + str(trajectory_file))\n well_col = None\n if well_col is None:\n for col in df.columns:\n if str(col).upper().startswith('WELL'):\n well_col = col\n break\n if title: # filter data frame by well name\n if well_col:\n df = df[df[well_col] == title]\n if len(df) == 0:\n log.error('no data found for well ' + str(title) + ' in file ' + str(trajectory_file))\n elif well_col is not None:\n if len(set(df[well_col])) > 1:\n raise Exception(\n 'attempt to set trajectory for unidentified well from ascii file holding data for multiple wells')\n self.load_from_data_frame(df,\n md_col = md_col,\n x_col = x_col,\n y_col = y_col,\n z_col = z_col,\n md_uom = md_uom,\n md_domain = md_domain,\n md_datum = md_datum,\n title = title,\n set_tangent_vectors = set_tangent_vectors)\n\n def set_tangents(self, force = False, write_hdf5 = False, weight = 'cube'):\n \"\"\"Calculates tangent vectors based on control points.\n\n arguments:\n force (boolean, default False): if False and tangent vectors already exist then the existing ones are used;\n if True or no tangents vectors exist then they are computed\n write_hdf5 (boolean, default False): if True and new tangent vectors are computed then the array is also written\n directly to the hdf5 file\n weight (string, default 'linear'): one of 'linear', 'square', 'cube'; if linear, each tangent is the mean of the\n direction vectors of the two trjectory segments which meet at the knot; the square and cube options give\n increased weight to the direction vector of shorter segments (usually better)\n\n returns:\n numpy float array of shape (knot_count, 3) being the tangents in xyz, 'pointing' in the direction of increased\n knot index; the tangents are also stored as an attribute of the object\n\n note:\n the write_hdf5() method writes all the array data for the trajectory, including the tangent vectors; only set\n the write_hdf5 argument to this method to True if the other arrays for the trajectory already exist in the hdf5 file\n \"\"\"\n\n if self.tangent_vectors is not None and not force:\n return self.tangent_vectors\n assert self.knot_count is not None and self.knot_count >= 2\n assert self.control_points is not None and len(self.control_points) == self.knot_count\n\n self.tangent_vectors = rql.tangents(self.control_points, weight = weight)\n\n if write_hdf5:\n h5_reg = rwh5.H5Register(self.model)\n h5_reg.register_dataset(self.uuid, 'tangentVectors', self.tangent_vectors)\n h5_reg.write(file = self.model.h5_filename(), mode = 'a')\n\n return self.tangent_vectors\n\n def dataframe(self, md_col = 'MD', x_col = 'X', y_col = 'Y', z_col = 'Z'):\n \"\"\"Returns a pandas data frame containing MD and control points (xyz) data.\n\n note:\n set md_col to None for a dataframe containing only X, Y & Z data\n\n :meta common:\n \"\"\"\n\n if md_col:\n column_list = [md_col, x_col, y_col, z_col]\n else:\n column_list = [x_col, y_col, z_col]\n\n data_frame = pd.DataFrame(columns = column_list)\n if md_col:\n data_frame[md_col] = self.measured_depths\n data_frame[x_col] = self.control_points[:, 0]\n data_frame[y_col] = self.control_points[:, 1]\n data_frame[z_col] = self.control_points[:, 2]\n return data_frame\n\n def write_to_ascii_file(self,\n trajectory_file,\n mode = 'w',\n space_separated_instead_of_csv = False,\n md_col = 'MD',\n x_col = 'X',\n y_col = 'Y',\n z_col = 'Z'):\n \"\"\"Writes trajectory to an ascii file.\n\n note:\n set md_col to None for a dataframe containing only X, Y & Z data\n \"\"\"\n\n df = self.dataframe(md_col = md_col, x_col = x_col, y_col = y_col, z_col = z_col)\n sep = ' ' if space_separated_instead_of_csv else ','\n df.to_csv(trajectory_file, sep = sep, index = False, mode = mode)\n\n def xyz_for_md(self, md):\n \"\"\"Returns an xyz triplet corresponding to the given measured depth; uses simple linear interpolation between\n knots.\n\n args:\n md (float): measured depth for which xyz location is required; units must be those of self.md_uom\n\n returns:\n triple float being x, y, z coordinates of point on trajectory corresponding to given measured depth\n\n note:\n the algorithm uses a simple linear interpolation between neighbouring knots (control points) on the trajectory;\n if the measured depth is less than zero or greater than the finish md, a single None is returned; if the md is\n less than the start md then a linear interpolation between the md datum location and the first knot is returned\n\n :meta common:\n \"\"\"\n\n def interpolate(p1, p2, f):\n return f * p2 + (1.0 - f) * p1\n\n def search(md, i1, i2):\n if i2 - i1 <= 1:\n if md == self.measured_depths[i1]:\n return self.control_points[i1]\n return interpolate(self.control_points[i1], self.control_points[i1 + 1],\n (md - self.measured_depths[i1]) /\n (self.measured_depths[i1 + 1] - self.measured_depths[i1]))\n im = i1 + (i2 - i1) // 2\n if self.measured_depths[im] >= md:\n return search(md, i1, im)\n return search(md, im, i2)\n\n if md < 0.0 or md > self.finish_md or md > self.measured_depths[-1]:\n return None\n if md <= self.start_md:\n if self.start_md == 0.0:\n return self.md_datum.location\n return interpolate(np.array(self.md_datum.location), self.control_points[0], md / self.start_md)\n return search(md, 0, self.knot_count - 1)\n\n def splined_trajectory(self,\n well_name,\n min_subdivisions = 1,\n max_segment_length = None,\n max_degrees_per_knot = 5.0,\n use_tangents_if_present = True,\n store_tangents_if_calculated = True):\n \"\"\"Creates and returns a new Trajectory derived as a cubic spline of this trajectory.\n\n arguments:\n well_name (string): the name to use as the citation title for the new trajectory\n min_subdivisions (+ve integer, default 1): the minimum number of segments in the trajectory for each\n segment in this trajectory\n max_segment_length (float, optional): if present, each segment of this trajectory is subdivided so\n that the naive subdivided length is not greater than the specified length\n max_degrees_per_knot (float, default 5.0): the maximum number of degrees\n use_tangents_if_present (boolean, default False): if True, any tangent vectors in this trajectory\n are used during splining\n store_tangents_if_calculated (boolean, default True): if True any tangents calculated by the method\n are stored in the object (causing any previous tangents to be discarded); however, the new tangents\n are not written to the hdf5 file by this method\n\n returns:\n Trajectory object with control points lying on a cubic spline of the points of this trajectory\n\n notes:\n this method is typically used to smoothe an artificial or simulator trajectory;\n measured depths are re-calculated and will differ from those in this trajectory;\n unexpected behaviour may occur if the z units are different from the xy units in the crs;\n if tangent vectors for neighbouring points in this trajectory are pointing in opposite directions,\n the resulting spline is likely to be bad;\n the max_segment_length is applied when deciding how many subdivisions to make for a segment in this\n trajectory, based on the stright line segment length; segments in the resulting spline may exceed this\n length;\n similarly max_degrees_per_knot assumes a simply bend between neighbouring knots; if the position of the\n control points results in a loop, the value may be exceeded in the spline;\n the hdf5 data for the splined trajectory is not written by this method, neither is the xml created;\n no interpretation object is created by this method\n NB: direction of tangent vectors affects results, set use_tangents_if_present = False to\n ensure locally calculated tangent vectors are used\n \"\"\"\n\n assert self.knot_count > 1 and self.control_points is not None\n assert min_subdivisions >= 1\n assert max_segment_length is None or max_segment_length > 0.0\n assert max_degrees_per_knot is None or max_degrees_per_knot > 0.0\n if not well_name:\n well_name = self.title\n\n tangent_vectors = self.tangent_vectors\n if tangent_vectors is None or not use_tangents_if_present:\n tangent_vectors = rql.tangents(self.control_points, weight = 'square')\n if store_tangents_if_calculated:\n self.tangent_vectors = tangent_vectors\n\n spline_traj = Trajectory(self.model,\n well_name = well_name,\n md_datum = self.md_datum,\n length_uom = self.md_uom,\n md_domain = self.md_domain)\n spline_traj.line_kind_index = self.line_kind_index # not sure how we should really be setting this\n spline_traj.crs_uuid = self.crs_uuid\n spline_traj.start_md = self.start_md\n spline_traj.deviation_survey = self.deviation_survey\n\n spline_traj.control_points = rql.spline(self.control_points,\n tangent_vectors = tangent_vectors,\n min_subdivisions = min_subdivisions,\n max_segment_length = max_segment_length,\n max_degrees_per_knot = max_degrees_per_knot)\n spline_traj.knot_count = len(spline_traj.control_points)\n\n spline_traj.set_measured_depths()\n\n return spline_traj\n\n def set_measured_depths(self):\n \"\"\"Sets the measured depths from the start_md value and the control points.\"\"\"\n\n self.measured_depths = np.empty(self.knot_count)\n self.measured_depths[0] = self.start_md\n for sk in range(1, self.knot_count):\n self.measured_depths[sk] = (self.measured_depths[sk - 1] +\n vec.naive_length(self.control_points[sk] - self.control_points[sk - 1]))\n self.finish_md = self.measured_depths[-1]\n\n return self.measured_depths\n\n def create_feature_and_interpretation(self):\n \"\"\"Instantiate new empty WellboreFeature and WellboreInterpretation objects, if a wellboreinterpretation does\n not already exist.\n\n Uses the trajectory citation title as the well name\n \"\"\"\n\n log.debug(\"Creating a new WellboreInterpretation..\")\n log.debug(f\"WellboreFeature exists: {self.wellbore_feature is not None}\")\n log.debug(f\"WellboreInterpretation exists: {self.wellbore_interpretation is not None}\")\n\n if self.wellbore_interpretation is None:\n log.info(f\"Creating WellboreInterpretation and WellboreFeature with name {self.title}\")\n self.wellbore_feature = rqo.WellboreFeature(parent_model = self.model, feature_name = self.title)\n self.wellbore_interpretation = rqo.WellboreInterpretation(parent_model = self.model,\n wellbore_feature = self.wellbore_feature)\n self.feature_and_interpretation_to_be_written = True\n else:\n raise ValueError(\"Cannot add WellboreFeature, trajectory already has an associated WellboreInterpretation\")\n\n def create_xml(self,\n ext_uuid = None,\n wbt_uuid = None,\n md_datum_root = None,\n md_datum_xyz = None,\n add_as_part = True,\n add_relationships = True,\n title = None,\n originator = None):\n \"\"\"Create a wellbore trajectory representation node from a Trajectory object, optionally add as part.\n\n notes:\n measured depth datum xml node must be in place before calling this function;\n branching well structures (multi-laterals) are supported by the resqml standard but not yet by\n this code;\n optional witsml trajectory reference not yet supported here\n\n :meta common:\n \"\"\"\n\n if title:\n self.title = title\n if not self.title:\n self.title = 'wellbore trajectory'\n\n if ext_uuid is None:\n ext_uuid = self.model.h5_uuid()\n\n if self.feature_and_interpretation_to_be_written:\n if self.wellbore_interpretation is None:\n self.create_feature_and_interpretation()\n if self.wellbore_feature is not None:\n self.wellbore_feature.create_xml(add_as_part = add_as_part, originator = originator)\n self.wellbore_interpretation.create_xml(add_as_part = add_as_part,\n add_relationships = add_relationships,\n originator = originator)\n\n if md_datum_root is None:\n if self.md_datum is None:\n assert md_datum_xyz is not None\n self.md_datum = MdDatum(self.model, location = md_datum_xyz)\n if self.md_datum.root is None:\n md_datum_root = self.md_datum.create_xml()\n else:\n md_datum_root = self.md_datum.root\n\n wbt_node = super().create_xml(originator = originator, add_as_part = False)\n\n start_node = rqet.SubElement(wbt_node, ns['resqml2'] + 'StartMd')\n start_node.set(ns['xsi'] + 'type', ns['xsd'] + 'double')\n start_node.text = str(self.start_md)\n\n finish_node = rqet.SubElement(wbt_node, ns['resqml2'] + 'FinishMd')\n finish_node.set(ns['xsi'] + 'type', ns['xsd'] + 'double')\n finish_node.text = str(self.finish_md)\n\n md_uom = rqet.SubElement(wbt_node, ns['resqml2'] + 'MdUom')\n md_uom.set(ns['xsi'] + 'type', ns['eml'] + 'LengthUom')\n md_uom.text = bwam.rq_length_unit(self.md_uom)\n\n self.model.create_md_datum_reference(self.md_datum.root, root = wbt_node)\n\n if self.line_kind_index != 0: # 0 means vertical well, which doesn't need a geometry\n\n # todo: check geometry elements for parametric curve flavours other than minimum curvature\n\n geom = rqet.SubElement(wbt_node, ns['resqml2'] + 'Geometry')\n geom.set(ns['xsi'] + 'type', ns['resqml2'] + 'ParametricLineGeometry')\n geom.text = '\\n'\n\n # note: resqml standard allows trajectory to be in different crs to md datum\n # however, this module often uses the md datum crs, if the trajectory has been imported\n if self.crs_uuid is None:\n self.crs_uuid = self.md_datum.crs_uuid\n assert self.crs_uuid is not None\n self.model.create_crs_reference(crs_uuid = self.crs_uuid, root = geom)\n\n kc_node = rqet.SubElement(geom, ns['resqml2'] + 'KnotCount')\n kc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'positiveInteger')\n kc_node.text = str(self.knot_count)\n\n lki_node = rqet.SubElement(geom, ns['resqml2'] + 'LineKindIndex')\n lki_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')\n lki_node.text = str(self.line_kind_index)\n\n cpp_node = rqet.SubElement(geom, ns['resqml2'] + 'ControlPointParameters')\n cpp_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n cpp_node.text = rqet.null_xml_text\n\n cpp_values_node = rqet.SubElement(cpp_node, ns['resqml2'] + 'Values')\n cpp_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n cpp_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'controlPointParameters', root = cpp_values_node)\n\n cp_node = rqet.SubElement(geom, ns['resqml2'] + 'ControlPoints')\n cp_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Point3dHdf5Array')\n cp_node.text = rqet.null_xml_text\n\n cp_coords_node = rqet.SubElement(cp_node, ns['resqml2'] + 'Coordinates')\n cp_coords_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n cp_coords_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'controlPoints', root = cp_coords_node)\n\n if self.tangent_vectors is not None:\n\n tv_node = rqet.SubElement(geom, ns['resqml2'] + 'TangentVectors')\n tv_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Point3dHdf5Array')\n tv_node.text = rqet.null_xml_text\n\n tv_coords_node = rqet.SubElement(tv_node, ns['resqml2'] + 'Coordinates')\n tv_coords_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n tv_coords_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'tangentVectors', root = tv_coords_node)\n\n if self.md_domain:\n domain_node = rqet.SubElement(wbt_node, ns['resqml2'] + 'MdDomain')\n domain_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'MdDomain')\n domain_node.text = self.md_domain\n\n if self.deviation_survey is not None:\n ds_root = self.deviation_survey.root_node\n self.model.create_ref_node('DeviationSurvey',\n rqet.find_tag(rqet.find_tag(ds_root, 'Citation'), 'Title').text,\n bu.uuid_from_string(ds_root.attrib['uuid']),\n content_type = 'obj_DeviationSurveyRepresentation',\n root = wbt_node)\n\n interp_root = None\n if self.wellbore_interpretation is not None:\n interp_root = self.wellbore_interpretation.root\n self.model.create_ref_node('RepresentedInterpretation',\n rqet.find_nested_tags_text(interp_root, ['Citation', 'Title']),\n bu.uuid_from_string(interp_root.attrib['uuid']),\n content_type = 'obj_WellboreInterpretation',\n root = wbt_node)\n\n if add_as_part:\n self.model.add_part('obj_WellboreTrajectoryRepresentation', self.uuid, wbt_node)\n if add_relationships:\n crs_root = self.crs_root\n self.model.create_reciprocal_relationship(wbt_node, 'destinationObject', crs_root, 'sourceObject')\n self.model.create_reciprocal_relationship(wbt_node, 'destinationObject', self.md_datum.root,\n 'sourceObject')\n if self.deviation_survey is not None:\n self.model.create_reciprocal_relationship(wbt_node, 'destinationObject',\n self.deviation_survey.root_node, 'sourceObject')\n if interp_root is not None:\n self.model.create_reciprocal_relationship(wbt_node, 'destinationObject', interp_root,\n 'sourceObject')\n ext_part = rqet.part_name_for_object('obj_EpcExternalPartReference', ext_uuid, prefixed = False)\n ext_node = self.model.root_for_part(ext_part)\n self.model.create_reciprocal_relationship(wbt_node, 'mlToExternalPartProxy', ext_node,\n 'externalPartProxyToMl')\n\n return wbt_node\n\n def write_hdf5(self, file_name = None, mode = 'a'):\n \"\"\"Create or append to an hdf5 file, writing datasets for the measured depths, control points and tangent\n vectors.\n\n :meta common:\n \"\"\"\n\n # NB: array data must all have been set up prior to calling this function\n if self.uuid is None:\n self.uuid = bu.new_uuid()\n\n h5_reg = rwh5.H5Register(self.model)\n h5_reg.register_dataset(self.uuid, 'controlPointParameters', self.measured_depths)\n h5_reg.register_dataset(self.uuid, 'controlPoints', self.control_points)\n if self.tangent_vectors is not None:\n h5_reg.register_dataset(self.uuid, 'tangentVectors', self.tangent_vectors)\n h5_reg.write(file = file_name, mode = mode)\n\n def __eq__(self, other):\n \"\"\"Implements equals operator.\n\n Compares class type and uuid\n \"\"\"\n\n # TODO: more detailed equality comparison\n other_uuid = getattr(other, \"uuid\", None)\n return isinstance(other, self.__class__) and bu.matching_uuids(self.uuid, other_uuid)\n\n\nclass WellboreFrame(BaseResqpy):\n \"\"\"Class for RESQML WellboreFrameRepresentation objects (supporting well log Properties)\n\n RESQML documentation:\n\n Representation of a wellbore that is organized along a wellbore trajectory by its MD values.\n RESQML uses MD values to associate properties on points and to organize association of\n properties on intervals between MD points.\n\n Roughly equivalent to a Techlog \"dataset\" object with a given depth reference.\n\n The `logs` attribute is a :class:`resqpy.property.WellLogCollection` of all logs in the frame.\n \"\"\"\n\n resqml_type = 'WellboreFrameRepresentation'\n\n def __init__(self,\n parent_model,\n frame_root = None,\n uuid = None,\n trajectory = None,\n mds = None,\n represented_interp = None,\n title = None,\n originator = None,\n extra_metadata = None):\n \"\"\"Creates a new wellbore frame object and optionally loads it from xml or list of measured depths.\n\n arguments:\n parent_model (model.Model object): the model which the new wellbore frame belongs to\n frame_root (optional): DEPRECATED. the root node of an xml tree representing the wellbore frame;\n if not None, the new wellbore frame object is initialised based on the data in the tree;\n if None, an empty wellbore frame object is returned\n trajectory (Trajectory object, optional): the trajectory of the well; required if loading from\n list of measured depths\n mds (optional numpy 1D array, tuple or list of floats): ordered list of measured depths which\n will constitute the frame; ignored if frame_root is not None\n represented_interp (wellbore interpretation object, optional): if present, is noted as the wellbore\n interpretation object which this frame relates to; ignored if frame_root is not None\n title (str, optional): the citation title to use for a new wellbore frame;\n ignored if uuid or frame_root is not None\n originator (str, optional): the name of the person creating the wellbore frame, defaults to login id;\n ignored if uuid or frame_root is not None\n extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the wellbore frame;\n ignored if uuid or frame_root is not None\n\n returns:\n the newly created wellbore frame object\n\n note:\n if initialising from a list of measured depths, the wellbore trajectory object must already exist\n \"\"\"\n\n #: Associated wellbore trajectory, an instance of :class:`resqpy.well.Trajectory`.\n self.trajectory = trajectory\n self.trajectory_uuid = None if trajectory is None else trajectory.uuid\n\n #: Instance of :class:`resqpy.organize.WellboreInterpretation`\n self.wellbore_interpretation = represented_interp\n self.wellbore_feature = None\n self.feature_and_interpretation_to_be_written = False\n\n #: number of measured depth nodes, each being an entry or exit point of trajectory with a cell\n self.node_count = None\n\n #: node_count measured depths (in same units and datum as trajectory) of cell entry and/or exit points\n self.node_mds = None\n\n #: All logs associated with the wellbore frame; an instance of :class:`resqpy.property.WellLogCollection`\n self.logs = None\n\n super().__init__(model = parent_model,\n uuid = uuid,\n title = title,\n originator = originator,\n extra_metadata = extra_metadata,\n root_node = frame_root)\n\n if self.root is None and trajectory is not None and mds is not None and len(mds) > 1:\n self.node_count = len(mds)\n self.node_mds = np.array(mds)\n assert self.node_mds is not None and self.node_mds.ndim == 1\n\n # UUID needs to have been created before LogCollection can be made\n # TODO: Figure out when this should be created, and how it is kept in sync when new logs are created\n self.logs = rqp.WellLogCollection(frame = self)\n\n def _load_from_xml(self):\n \"\"\"Loads the wellbore frame object from an xml node (and associated hdf5 data).\"\"\"\n\n # NB: node is the root level xml node, not a node in the md list!\n\n node = self.root\n assert node is not None\n\n trajectory_uuid = bu.uuid_from_string(rqet.find_nested_tags_text(node, ['Trajectory', 'UUID']))\n assert trajectory_uuid is not None, 'wellbore frame trajectory reference not found in xml'\n if self.trajectory is None:\n self.trajectory = Trajectory(self.model, uuid = trajectory_uuid)\n else:\n assert bu.matching_uuids(self.trajectory.uuid, trajectory_uuid), 'wellbore frame trajectory uuid mismatch'\n\n self.node_count = rqet.find_tag_int(node, 'NodeCount')\n assert self.node_count is not None, 'node count not found in xml for wellbore frame'\n assert self.node_count > 1, 'fewer than 2 nodes for wellbore frame'\n\n mds_node = rqet.find_tag(node, 'NodeMd')\n assert mds_node is not None, 'wellbore frame measured depths hdf5 reference not found in xml'\n load_hdf5_array(self, mds_node, 'node_mds')\n\n assert self.node_mds is not None and self.node_mds.ndim == 1 and self.node_mds.size == self.node_count\n\n interp_uuid = rqet.find_nested_tags_text(node, ['RepresentedInterpretation', 'UUID'])\n if interp_uuid is None:\n self.wellbore_interpretation = None\n else:\n self.wellbore_interpretation = rqo.WellboreInterpretation(self.model, uuid = interp_uuid)\n\n # Create well log collection of all log data\n self.logs = rqp.WellLogCollection(frame = self)\n\n def extract_crs_root(self):\n \"\"\"Returns the xml root node of the coordinate reference system used by the related trajectory.\"\"\"\n\n if self.trajectory is None:\n return None\n return self.trajectory.crs_root\n\n def create_feature_and_interpretation(self):\n \"\"\"Instantiate new empty WellboreFeature and WellboreInterpretation objects, if a wellboreinterpretation does\n not already exist.\n\n Uses the wellboreframe citation title as the well name\n \"\"\"\n if self.wellbore_interpretation is not None:\n log.info(f\"Creating WellboreInterpretation and WellboreFeature with name {self.title}\")\n self.wellbore_feature = rqo.WellboreFeature(parent_model = self.model, feature_name = self.title)\n self.wellbore_interpretation = rqo.WellboreInterpretation(parent_model = self.model,\n wellbore_feature = self.wellbore_feature)\n self.feature_and_interpretation_to_be_written = True\n else:\n log.info(\"WellboreInterpretation already exists\")\n\n def write_hdf5(self, file_name = None, mode = 'a'):\n \"\"\"Create or append to an hdf5 file, writing datasets for the measured depths.\"\"\"\n\n # NB: array data must have been set up prior to calling this function\n\n if self.uuid is None:\n self.uuid = bu.new_uuid()\n\n h5_reg = rwh5.H5Register(self.model)\n h5_reg.register_dataset(self.uuid, 'NodeMd', self.node_mds)\n h5_reg.write(file = file_name, mode = mode)\n\n def create_xml(self,\n ext_uuid = None,\n add_as_part = True,\n add_relationships = True,\n title = None,\n originator = None):\n \"\"\"Create a wellbore frame representation node from this WellboreFrame object, optionally add as part.\n\n note:\n trajectory xml node must be in place before calling this function\n \"\"\"\n\n assert self.trajectory is not None, 'trajectory object missing'\n assert self.trajectory.root is not None, 'trajectory xml not established'\n\n if self.feature_and_interpretation_to_be_written:\n if self.wellbore_interpretation is None:\n self.create_feature_and_interpretation()\n if self.wellbore_feature is not None:\n self.wellbore_feature.create_xml(add_as_part = add_as_part, originator = originator)\n self.wellbore_interpretation.create_xml(add_as_part = add_as_part,\n add_relationships = add_relationships,\n originator = originator)\n\n if ext_uuid is None:\n ext_uuid = self.model.h5_uuid()\n\n if title:\n self.title = title\n if not self.title:\n self.title = 'wellbore frame'\n\n wf_node = super().create_xml(originator = originator, add_as_part = False)\n\n # wellbore frame elements\n\n nc_node = rqet.SubElement(wf_node, ns['resqml2'] + 'NodeCount')\n nc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'positiveInteger')\n nc_node.text = str(self.node_count)\n\n mds_node = rqet.SubElement(wf_node, ns['resqml2'] + 'NodeMd')\n mds_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n mds_node.text = rqet.null_xml_text\n\n mds_values_node = rqet.SubElement(mds_node, ns['resqml2'] + 'Values')\n mds_values_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Hdf5Dataset')\n mds_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'NodeMd', root = mds_values_node)\n\n traj_root = self.trajectory.root\n self.model.create_ref_node('Trajectory',\n rqet.find_nested_tags_text(traj_root, ['Citation', 'Title']),\n bu.uuid_from_string(traj_root.attrib['uuid']),\n content_type = 'obj_WellboreTrajectoryRepresentation',\n root = wf_node)\n\n if self.wellbore_interpretation is not None:\n interp_root = self.wellbore_interpretation.root\n self.model.create_ref_node('RepresentedInterpretation',\n rqet.find_nested_tags_text(interp_root, ['Citation', 'Title']),\n bu.uuid_from_string(interp_root.attrib['uuid']),\n content_type = 'obj_WellboreInterpretation',\n root = wf_node)\n\n if add_as_part:\n self.model.add_part('obj_WellboreFrameRepresentation', self.uuid, wf_node)\n if add_relationships:\n self.model.create_reciprocal_relationship(wf_node, 'destinationObject', self.trajectory.root,\n 'sourceObject')\n ext_part = rqet.part_name_for_object('obj_EpcExternalPartReference', ext_uuid, prefixed = False)\n ext_node = self.model.root_for_part(ext_part)\n self.model.create_reciprocal_relationship(wf_node, 'mlToExternalPartProxy', ext_node,\n 'externalPartProxyToMl')\n if self.wellbore_interpretation is not None:\n interp_root = self.wellbore_interpretation.root\n self.model.create_reciprocal_relationship(wf_node, 'destinationObject', interp_root, 'sourceObject')\n\n return wf_node\n\n\nclass BlockedWell(BaseResqpy):\n \"\"\"Class for RESQML Blocked Wellbore Representation (Wells), ie cells visited by wellbore.\n\n RESQML documentation:\n\n The information that allows you to locate, on one or several grids (existing or planned),\n the intersection of volume (cells) and surface (faces) elements with a wellbore trajectory\n (existing or planned).\n\n note:\n measured depth data must be in same crs as those for the related trajectory\n \"\"\"\n\n resqml_type = 'BlockedWellboreRepresentation'\n well_name = rqo._alias_for_attribute(\"title\")\n\n def __init__(self,\n parent_model,\n blocked_well_root = None,\n uuid = None,\n grid = None,\n trajectory = None,\n wellspec_file = None,\n cellio_file = None,\n column_ji0 = None,\n well_name = None,\n check_grid_name = False,\n use_face_centres = False,\n represented_interp = None,\n originator = None,\n extra_metadata = None,\n add_wellspec_properties = False):\n \"\"\"Creates a new blocked well object and optionally loads it from xml, or trajectory, or Nexus wellspec file.\n\n arguments:\n parent_model (model.Model object): the model which the new blocked well belongs to\n blocked_well_root (DEPRECATED): the root node of an xml tree representing the blocked well;\n if not None, the new blocked well object is initialised based on the data in the tree;\n if None, the other arguments are used\n grid (optional, grid.Grid object): required if intialising from a trajectory or wellspec file;\n not used if blocked_well_root is not None\n trajectory (optional, Trajectory object): the trajectory of the well, to be intersected with the grid;\n not used if blocked_well_root is not None\n wellspec_file (optional, string): filename of an ascii file holding the Nexus wellspec data;\n ignored if blocked_well_root is not None or trajectory is not None\n cellio_file (optional, string): filename of an ascii file holding the RMS exported blocked well data;\n ignored if blocked_well_root is not None or trajectory is not None or wellspec_file is not None\n column_ji0 (optional, pair of ints): column indices (j0, i0) for a 'vertical' well; ignored if\n blocked_well_root is not None or trajectory is not None or wellspec_file is not None or\n cellio_file is not None\n well_name (string): the well name as given in the wellspec or cellio file; required if loading from\n one of those files; or the name to be used as citation title for a column well\n check_grid_name (boolean, default False): if True, the GRID column of the wellspec data will be checked\n for a match with the citation title of the grid object; perforations for other grids will be skipped;\n if False, all wellspec data is assumed to relate to the grid; only relevant when loading from wellspec\n use_face_centres (boolean, default False): if True, cell face centre points are used for the entry and\n exit points when constructing the simulation trajectory; if False and ANGLA & ANGLV data are available\n then entry and exit points are constructed based on a straight line at those angles passing through\n the centre of the cell; only relevant when loading from wellspec\n represented_interp (wellbore interpretation object, optional): if present, is noted as the wellbore\n interpretation object which this frame relates to; ignored if blocked_well_root is not None\n originator (str, optional): the name of the person creating the blocked well, defaults to login id;\n ignored if uuid or blocked_well_root is not None\n extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the blocked well;\n ignored if uuid or blocked_well_root is not None\n add_wellspec_properties (boolean or list of str, default False): if not False, and initialising from\n a wellspec file, the blocked well has its hdf5 data written and xml created and properties are\n fully created; if a list is provided the elements must be numerical wellspec column names;\n if True, all numerical columns other than the cell indices are added as properties\n\n returns:\n the newly created blocked well object\n\n notes:\n if starting from a wellspec file or column indices, a 'simulation' trajectory and md datum objects are\n constructed to go with the blocked well;\n column wells might not be truly vertical - the trajectory will consist of linear segments joining the\n centres of the k faces in the column;\n optional RESQML attributes are not handled by this code (WITSML log reference, interval stratigraphic units,\n cell fluid phase units);\n mysterious RESQML WellboreFrameIndexableElements is not used in any other RESQML classes and is therefore\n not used here\n\n :meta common:\n \"\"\"\n\n self.trajectory = trajectory #: trajectory object associated with the wellbore\n self.trajectory_to_be_written = False\n self.feature_to_be_written = False\n self.interpretation_to_be_written = False\n self.node_count = None #: number of measured depth nodes, each being an entry or exit point of trajectory with a cell\n self.node_mds = None #: node_count measured depths (in same units and datum as trajectory) of cell entry and/or exit points\n self.cell_count = None #: number of blocked intervals (<= node_count - 1)\n self.cell_indices = None #: cell_count natural cell indices, paired with non-null grid_indices\n self.grid_indices = None #: node_count-1 indices into grid list for each interval in node_mds; -1 for unblocked interval\n self.face_pair_indices = None #: entry, exit face per cell indices, -1 for Target Depth termination within a cell\n self.grid_list = [\n ] #: list of grid objects indexed by grid_indices; for now only handles 1 grid unless loading from xml\n self.wellbore_interpretation = None #: associated wellbore interpretation object\n self.wellbore_feature = None #: associated wellbore feature object\n\n #: All logs associated with the blockedwellbore; an instance of :class:`resqpy.property.WellIntervalPropertyCollection`\n self.logs = None\n self.cellind_null = None\n self.gridind_null = None\n self.facepair_null = None\n\n # face_index_map maps from (axis, p01) to face index value in range 0..5\n # this is the default as indicated on page 139 (but not p. 180) of the RESQML Usage Gude v2.0.1\n # also assumes K is generally increasing downwards\n # see DevOps backlog item 269001 discussion for more information\n # self.face_index_map = np.array([[0, 1], [4, 2], [5, 3]], dtype = int)\n self.face_index_map = np.array([[0, 1], [2, 4], [5, 3]], dtype = int) # order: top, base, J-, I+, J+, I-\n # and the inverse, maps from 0..5 to (axis, p01)\n # self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 1], [2, 1], [1, 0], [2, 0]], dtype = int)\n self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]], dtype = int)\n # note: the rework_face_pairs() method, below, overwrites the face indices based on I, J cell indices\n\n super().__init__(model = parent_model,\n uuid = uuid,\n title = well_name,\n originator = originator,\n extra_metadata = extra_metadata,\n root_node = blocked_well_root)\n\n if self.root is None:\n self.wellbore_interpretation = represented_interp\n if grid is None and (self.trajectory is not None or wellspec_file is not None or cellio_file is not None or\n column_ji0 is not None):\n grid = self.model.grid()\n if self.trajectory is not None:\n self.compute_from_trajectory(self.trajectory, grid)\n elif wellspec_file is not None:\n okay = self.derive_from_wellspec(wellspec_file,\n well_name,\n grid,\n check_grid_name = check_grid_name,\n use_face_centres = use_face_centres,\n add_properties = add_wellspec_properties)\n elif cellio_file is not None:\n okay = self.import_from_rms_cellio(cellio_file, well_name, grid)\n if not okay:\n self.node_count = 0\n elif column_ji0 is not None:\n okay = self.set_for_column(well_name, grid, column_ji0)\n self.gridind_null = -1\n self.facepair_null = -1\n self.cellind_null = -1\n # else an empty object is returned\n\n def _load_from_xml(self):\n \"\"\"Loads the blocked wellbore object from an xml node (and associated hdf5 data).\"\"\"\n\n node = self.root\n assert node is not None\n\n trajectory_uuid = bu.uuid_from_string(rqet.find_nested_tags_text(node, ['Trajectory', 'UUID']))\n assert trajectory_uuid is not None, 'blocked well trajectory reference not found in xml'\n if self.trajectory is None:\n self.trajectory = Trajectory(self.model, uuid = trajectory_uuid)\n else:\n assert bu.matching_uuids(self.trajectory.uuid, trajectory_uuid), 'blocked well trajectory uuid mismatch'\n\n self.node_count = rqet.find_tag_int(node, 'NodeCount')\n assert self.node_count is not None and self.node_count >= 2, 'problem with blocked well node count'\n\n mds_node = rqet.find_tag(node, 'NodeMd')\n assert mds_node is not None, 'blocked well node measured depths hdf5 reference not found in xml'\n load_hdf5_array(self, mds_node, 'node_mds')\n\n # Statement below has no effect, is this a bug?\n self.node_mds is not None and self.node_mds.ndim == 1 and self.node_mds.size == self.node_count\n\n self.cell_count = rqet.find_tag_int(node, 'CellCount')\n assert self.cell_count is not None and self.cell_count > 0\n\n # TODO: remove this if block once RMS export issue resolved\n if self.cell_count == self.node_count:\n extended_mds = np.empty((self.node_mds.size + 1,))\n extended_mds[:-1] = self.node_mds\n extended_mds[-1] = self.node_mds[-1] + 1.0\n self.node_mds = extended_mds\n self.node_count += 1\n\n assert self.cell_count < self.node_count\n\n ci_node = rqet.find_tag(node, 'CellIndices')\n assert ci_node is not None, 'blocked well cell indices hdf5 reference not found in xml'\n load_hdf5_array(self, ci_node, 'cell_indices', dtype = int)\n assert (self.cell_indices is not None and self.cell_indices.ndim == 1 and\n self.cell_indices.size == self.cell_count), 'mismatch in number of cell indices for blocked well'\n self.cellind_null = rqet.find_tag_int(ci_node, 'NullValue')\n if self.cellind_null is None:\n self.cellind_null = -1 # if no Null found assume -1 default\n\n fi_node = rqet.find_tag(node, 'LocalFacePairPerCellIndices')\n assert fi_node is not None, 'blocked well face indices hdf5 reference not found in xml'\n load_hdf5_array(self, fi_node, 'raw_face_indices', dtype = 'int')\n assert self.raw_face_indices is not None, 'failed to load face indices for blocked well'\n assert self.raw_face_indices.size == 2 * self.cell_count, 'mismatch in number of cell faces for blocked well'\n if self.raw_face_indices.ndim > 1:\n self.raw_face_indices = self.raw_face_indices.reshape((self.raw_face_indices.size,))\n mask = np.where(self.raw_face_indices == -1)\n self.raw_face_indices[mask] = 0\n self.face_pair_indices = self.face_index_inverse_map[self.raw_face_indices]\n self.face_pair_indices[mask] = (-1, -1)\n self.face_pair_indices = self.face_pair_indices.reshape((-1, 2, 2))\n del self.raw_face_indices\n self.facepair_null = rqet.find_tag_int(fi_node, 'NullValue')\n if self.facepair_null is None:\n self.facepair_null = -1\n\n gi_node = rqet.find_tag(node, 'GridIndices')\n assert gi_node is not None, 'blocked well grid indices hdf5 reference not found in xml'\n load_hdf5_array(self, gi_node, 'grid_indices', dtype = 'int')\n assert self.grid_indices is not None and self.grid_indices.ndim == 1 and self.grid_indices.size == self.node_count - 1\n unique_grid_indices = np.unique(self.grid_indices) # sorted list of unique values\n self.gridind_null = rqet.find_tag_int(gi_node, 'NullValue')\n if self.gridind_null is None:\n self.gridind_null = -1 # if no Null found assume -1 default\n\n grid_node_list = rqet.list_of_tag(node, 'Grid')\n assert len(grid_node_list) > 0, 'blocked well grid reference(s) not found in xml'\n assert unique_grid_indices[0] >= -1 and unique_grid_indices[-1] < len(\n grid_node_list), 'blocked well grid index out of range'\n assert np.count_nonzero(\n self.grid_indices >= 0) == self.cell_count, 'mismatch in number of blocked well intervals'\n self.grid_list = []\n for grid_ref_node in grid_node_list:\n grid_node = self.model.referenced_node(grid_ref_node)\n assert grid_node is not None, 'grid referenced in blocked well xml is not present in model'\n grid_uuid = rqet.uuid_for_part_root(grid_node)\n grid_obj = self.model.grid(uuid = grid_uuid, find_properties = False)\n self.grid_list.append(grid_obj)\n\n interp_uuid = rqet.find_nested_tags_text(node, ['RepresentedInterpretation', 'UUID'])\n if interp_uuid is None:\n self.wellbore_interpretation = None\n else:\n self.wellbore_interpretation = rqo.WellboreInterpretation(self.model, uuid = interp_uuid)\n\n # Create blocked well log collection of all log data\n self.logs = rqp.WellIntervalPropertyCollection(frame = self)\n\n # Set up matches between cell_indices and grid_indices\n self.cell_grid_link = self.map_cell_and_grid_indices()\n\n def map_cell_and_grid_indices(self):\n \"\"\"Returns a list of index values linking the grid_indices to cell_indices.\n\n note:\n length will match grid_indices, and will show -1 where cell is unblocked\n \"\"\"\n\n indexmap = []\n j = 0\n for i in self.grid_indices:\n if i == -1:\n indexmap.append(-1)\n else:\n indexmap.append(j)\n j += 1\n return indexmap\n\n def compressed_grid_indices(self):\n \"\"\"Returns a list of grid indices excluding the -1 elements (unblocked intervals).\n\n note:\n length will match that of cell_indices\n \"\"\"\n\n compressed = []\n for i in self.grid_indices:\n if i >= 0:\n compressed.append(i)\n assert len(compressed) == self.cell_count\n return compressed\n\n def number_of_grids(self):\n \"\"\"Returns the number of grids referenced by the blocked well object.\"\"\"\n\n if self.grid_list is None:\n return 0\n return len(self.grid_list)\n\n def single_grid(self):\n \"\"\"Asserts that exactly one grid is being referenced and returns a grid object for that grid.\"\"\"\n\n assert len(self.grid_list) == 1, 'blocked well is not referring to exactly one grid'\n return self.grid_list[0]\n\n def grid_uuid_list(self):\n \"\"\"Returns a list of the uuids of the grids referenced by the blocked well object.\n\n :meta common:\n \"\"\"\n\n uuid_list = []\n if self.grid_list is None:\n return uuid_list\n for g in self.grid_list:\n uuid_list.append(g.uuid)\n return uuid_list\n\n def cell_indices_kji0(self):\n \"\"\"Returns a numpy int array of shape (N, 3) of cells visited by well, for a single grid situation.\n\n :meta common:\n \"\"\"\n\n grid = self.single_grid()\n return grid.denaturalized_cell_indices(self.cell_indices)\n\n def cell_indices_and_grid_list(self):\n \"\"\"Returns a numpy int array of shape (N, 3) of cells visited by well, and a list of grid objects of length N.\n\n :meta common:\n \"\"\"\n\n grid_for_cell_list = []\n grid_indices = self.compressed_grid_indices()\n assert len(grid_indices) == self.cell_count\n cell_indices = np.empty((self.cell_count, 3), dtype = int)\n for cell_number in range(self.cell_count):\n grid = self.grid_list[grid_indices[cell_number]]\n grid_for_cell_list.append(grid)\n cell_indices[cell_number] = grid.denaturalized_cell_index(self.cell_indices[cell_number])\n return cell_indices, grid_for_cell_list\n\n def cell_indices_for_grid_uuid(self, grid_uuid):\n \"\"\"Returns a numpy int array of shape (N, 3) of cells visited by well in specified grid.\n\n :meta common:\n \"\"\"\n\n if isinstance(grid_uuid, str):\n grid_uuid = bu.uuid_from_string(grid_uuid)\n ci_list, grid_list = self.cell_indices_and_grid_list()\n mask = np.zeros((len(ci_list),), dtype = bool)\n for cell_number in range(len(ci_list)):\n mask[cell_number] = bu.matching_uuids(grid_list[cell_number].uuid, grid_uuid)\n ci_selected = ci_list[mask]\n return ci_selected\n\n def box(self, grid_uuid = None):\n \"\"\"Returns the KJI box containing the cells visited by the well, for single grid if grid_uuid is None.\"\"\"\n\n if grid_uuid is None:\n cells_kji0 = self.cell_indices_kji0()\n else:\n cells_kji0 = self.cell_indices_for_grid_uuid(grid_uuid)\n\n if cells_kji0 is None or len(cells_kji0) == 0:\n return None\n well_box = np.empty((2, 3), dtype = int)\n well_box[0] = np.min(cells_kji0, axis = 0)\n well_box[1] = np.max(cells_kji0, axis = 0)\n return well_box\n\n def face_pair_array(self):\n \"\"\"Returns numpy int array of shape (N, 2, 2) being pairs of face (axis, polarity) pairs, to go with\n cell_kji0_array().\n\n note:\n\n each of the N rows in the returned array is of the form:\n\n ((entry_face_axis, entry_face_polarity), (exit_face_axis, exit_face_polarity))\n\n where the axis values are in the range 0 to 2 for k, j & i respectively, and\n the polarity values are zero for the 'negative' face and 1 for the 'positive' face;\n exit values may be -1 to indicate TD within the cell (ie. no exit point)\n \"\"\"\n return self.face_pair_indices\n\n def compute_from_trajectory(self,\n trajectory,\n grid,\n active_only = False,\n quad_triangles = True,\n use_single_layer_tactics = True):\n \"\"\"Populate this blocked wellbore object based on intersection of trajectory with cells of grid.\n\n arguments:\n trajectory (Trajectory object): the trajectory to intersect with the grid; control_points and crs_root attributes must\n be populated\n grid (grid.Grid object): the grid with which to intersect the trajectory\n active_only (boolean, default False): if True, only active cells are included as blocked intervals\n quad_triangles (boolean, default True): if True, 4 triangles per cell face are used for the intersection calculations;\n if False, only 2 triangles per face are used\n use_single_layer_tactics (boolean, default True): if True and the grid does not have k gaps, initial intersection\n calculations with fault planes or the outer IK & JK skin of the grid are calculated as if the grid is a single\n layer (and only after an intersection is thus found is the actual layer identified); this significantly speeds up\n computation but may cause failure in the presence of significantly non-straight pillars and could (rarely) cause\n problems where a fault plane is significantly skewed (non-planar) even if individual pillars are straight\n\n note:\n this method is computationally intensive and might take ~30 seconds for a tyipical grid and trajectory; large grids,\n grids with k gaps, or setting use_single_layer_tactics False will typically result in significantly longer processing time\n \"\"\"\n\n import resqpy.grid_surface as rgs # was causing circular import issue when at global level\n\n # note: see also extract_box_for_well code\n assert trajectory is not None and grid is not None\n if np.any(np.isnan(grid.points_ref(masked = False))):\n log.warning('grid does not have geometry defined everywhere: attempting fill')\n import resqpy.derived_model as rqdm\n fill_grid = rqdm.copy_grid(grid)\n fill_grid.set_geometry_is_defined(nullify_partial_pillars = True, complete_all = True)\n # note: may need to write hdf5 and create xml for fill_grid, depending on use in populate_blocked_well_from_trajectory()\n # fill_grid.write_hdf_from_caches()\n # fill_grid.create_xml\n grid = fill_grid\n assert trajectory.control_points is not None and trajectory.crs_root is not None and grid.crs_root is not None\n assert len(trajectory.control_points)\n\n self.trajectory = trajectory\n if not self.well_name:\n self.well_name = trajectory.title\n bw = rgs.populate_blocked_well_from_trajectory(self,\n grid,\n active_only = active_only,\n quad_triangles = quad_triangles,\n lazy = False,\n use_single_layer_tactics = use_single_layer_tactics)\n if bw is None:\n raise Exception('failed to generate blocked well from trajectory with uuid: ' + str(trajectory.uuid))\n\n assert bw is self\n\n def set_for_column(self, well_name, grid, col_ji0, skip_inactive = True):\n \"\"\"Populates empty blocked well for a 'vertical' well in given column; creates simulation trajectory and md\n datum.\"\"\"\n\n if well_name:\n self.well_name = well_name\n col_list = ['IW', 'JW', 'L', 'ANGLA', 'ANGLV'] # NB: L is Layer, ie. k\n df = pd.DataFrame(columns = col_list)\n pinch_col = grid.pinched_out(cache_cp_array = True, cache_pinchout_array = True)[:, col_ji0[0], col_ji0[1]]\n if skip_inactive and grid.inactive is not None:\n inactive_col = grid.inactive[:, col_ji0[0], col_ji0[1]]\n else:\n inactive_col = np.zeros(grid.nk, dtype = bool)\n for k0 in range(grid.nk):\n if pinch_col[k0] or inactive_col[k0]:\n continue\n # note: leaving ANGLA & ANGLV columns as NA will cause K face centres to be used when deriving from dataframe\n row_dict = {'IW': col_ji0[1] + 1, 'JW': col_ji0[0] + 1, 'L': k0 + 1}\n df = df.append(row_dict, ignore_index = True)\n\n return self.derive_from_dataframe(df, self.well_name, grid, use_face_centres = True)\n\n def derive_from_wellspec(self,\n wellspec_file,\n well_name,\n grid,\n check_grid_name = False,\n use_face_centres = False,\n add_properties = True):\n \"\"\"Populates empty blocked well from Nexus WELLSPEC data; creates simulation trajectory and md datum.\n\n args:\n wellspec_file (string): path of Nexus ascii file holding WELLSPEC keyword\n well_name (string): the name of the well as used in the wellspec data\n grid (grid.Grid object): the grid object which the cell indices in the wellspec data relate to\n check_grid_name (boolean, default False): if True, the GRID column of the wellspec data will be checked\n for a match with the citation title of the grid object; perforations for other grids will be skipped;\n if False, all wellspec data is assumed to relate to the grid\n use_face_centres (boolean, default False): if True, cell face centre points are used for the entry and\n exit points when constructing the simulation trajectory; if False and ANGLA & ANGLV data are available\n then entry and exit points are constructed based on a straight line at those angles passing through\n the centre of the cell\n add_properties (bool or list of str, default True): if True, WELLSPEC columns (other than IW, JW, L & GRID)\n are added as property parts for the blocked well; if a list is passed, it must contain a subset of the\n columns in the WELLSPEC data\n\n returns:\n self if successful; None otherwise\n\n note:\n if add_properties is True or present as a list, this method will write the hdf5, create the xml and add\n parts to the model for this blocked well and the properties\n \"\"\"\n\n if well_name:\n self.well_name = well_name\n else:\n well_name = self.well_name\n\n if add_properties:\n if isinstance(add_properties, list):\n col_list = ['IW', 'JW', 'L'] + [col.upper() for col in add_properties if col not in ['IW', 'JW', 'L']]\n else:\n col_list = []\n else:\n col_list = ['IW', 'JW', 'L', 'ANGLA', 'ANGLV']\n if check_grid_name:\n grid_name = rqet.citation_title_for_node(grid.root).upper()\n if not grid_name:\n check_grid_name = False\n else:\n col_list.append('GRID')\n\n wellspec_dict = wsk.load_wellspecs(wellspec_file, well = well_name, column_list = col_list)\n\n assert len(wellspec_dict) == 1, 'no wellspec data found in file ' + wellspec_file + ' for well ' + well_name\n\n df = wellspec_dict[well_name]\n assert len(df) > 0, 'no rows of perforation data found in wellspec for well ' + well_name\n\n name_for_check = grid_name if check_grid_name else None\n return self.derive_from_dataframe(df,\n well_name,\n grid,\n grid_name_to_check = name_for_check,\n use_face_centres = use_face_centres,\n add_as_properties = add_properties)\n\n def derive_from_cell_list(self, cell_kji0_list, well_name, grid):\n \"\"\"Populate empty blocked well from numpy int array of shape (N, 3) being list of cells.\"\"\"\n\n df = pd.DataFrame(columns = ['IW', 'JW', 'L'])\n df['IW'] = cell_kji0_list[:, 2] + 1\n df['JW'] = cell_kji0_list[:, 1] + 1\n df['L'] = cell_kji0_list[:, 0] + 1\n\n return self.derive_from_dataframe(df, well_name, grid, use_face_centres = True)\n\n def derive_from_dataframe(self,\n df,\n well_name,\n grid,\n grid_name_to_check = None,\n use_face_centres = True,\n add_as_properties = False):\n \"\"\"Populate empty blocked well from WELLSPEC-like dataframe; first columns must be IW, JW, L (i, j, k).\n\n note:\n if add_as_properties is True or present as a list of wellspec column names, both the blocked well and\n the properties will have their hdf5 data written, xml created and be added as parts to the model\n \"\"\"\n\n def cell_kji0_from_df(df, df_row):\n row = df.iloc[df_row]\n if pd.isna(row[0]) or pd.isna(row[1]) or pd.isna(row[2]):\n return None\n cell_kji0 = np.empty((3,), dtype = int)\n cell_kji0[:] = row[2], row[1], row[0]\n cell_kji0[:] -= 1\n return cell_kji0\n\n if well_name:\n self.well_name = well_name\n else:\n well_name = self.well_name\n\n assert len(df) > 0, 'empty dataframe for blocked well ' + str(well_name)\n\n length_uom = grid.z_units()\n assert grid.xy_units() == length_uom, 'mixed length units in grid crs'\n\n previous_xyz = None\n trajectory_mds = []\n trajectory_points = [] # entries paired with trajectory_mds\n blocked_intervals = [\n ] # will have one fewer entries than trajectory nodes; 0 = blocked, -1 = not blocked (for grid indices)\n blocked_cells_kji0 = [] # will have length equal to number of 0's in blocked intervals\n blocked_face_pairs = [\n ] # same length as blocked_cells_kji0; each is ((entry axis, entry polarity), (exit axis, exit polarity))\n\n log.debug('wellspec dataframe for well ' + str(well_name) + ' has ' + str(len(df)) + ' row' + _pl(len(df)))\n\n skipped_warning_grid = None\n\n angles_present = ('ANGLV' in df.columns and 'ANGLA' in df.columns and not pd.isnull(df.iloc[0]['ANGLV']) and\n not pd.isnull(df.iloc[0]['ANGLA']))\n\n # TODO: remove these temporary overrides\n angles_present = False\n use_face_centres = True\n\n if not angles_present and not use_face_centres:\n log.warning(f'ANGLV and/or ANGLA data unavailable for well {well_name}: using face centres')\n use_face_centres = True\n\n for i in range(len(df)): # for each row in the dataframe for this well\n\n cell_kji0 = cell_kji0_from_df(df, i)\n if cell_kji0 is None:\n log.error('missing cell index in wellspec data for well ' + str(well_name) + ' row ' + str(i + 1))\n continue\n\n row = df.iloc[i]\n\n if grid_name_to_check and pd.notna(row['GRID']) and grid_name_to_check != str(row['GRID']).upper():\n other_grid = str(row['GRID'])\n if skipped_warning_grid != other_grid:\n log.warning('skipping perforation(s) in grid ' + other_grid + ' for well ' + str(well_name))\n skipped_warning_grid = other_grid\n continue\n cp = grid.corner_points(cell_kji0 = cell_kji0, cache_resqml_array = False)\n assert not np.any(np.isnan(cp)), 'missing geometry for perforation cell for well ' + str(well_name)\n\n if angles_present:\n log.debug('row ' + str(i) + ': using angles')\n angla = row['ANGLA']\n inclination = row['ANGLV']\n if inclination < 0.1:\n azimuth = 0.0\n else:\n i_vector = np.sum(cp[:, :, 1] - cp[:, :, 0], axis = (0, 1))\n azimuth = vec.azimuth(i_vector) - angla # see Nexus keyword reference doc\n well_vector = vec.unit_vector_from_azimuth_and_inclination(azimuth, inclination) * 10000.0\n # todo: the following might be producing NaN's when vector passes precisely through an edge\n (entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity,\n exit_xyz) = find_entry_and_exit(cp, -well_vector, well_vector, well_name)\n else:\n # fabricate entry and exit axes and polarities based on indices alone\n # note: could use geometry but here a cheap rough-and-ready approach is used\n log.debug('row ' + str(i) + ': using cell moves')\n if i == 0:\n entry_axis, entry_polarity = 0, 0 # K-\n else:\n entry_move = cell_kji0 - blocked_cells_kji0[-1]\n log.debug(f'entry move: {entry_move}')\n if entry_move[1] == 0 and entry_move[2] == 0: # K move\n entry_axis = 0\n entry_polarity = 0 if entry_move[0] >= 0 else 1\n elif abs(entry_move[1]) > abs(entry_move[2]): # J dominant move\n entry_axis = 1\n entry_polarity = 0 if entry_move[1] >= 0 else 1\n else: # I dominant move\n entry_axis = 2\n entry_polarity = 0 if entry_move[2] >= 0 else 1\n if i == len(df) - 1:\n exit_axis, exit_polarity = entry_axis, 1 - entry_polarity\n else:\n next_cell_kji0 = cell_kji0_from_df(df, i + 1)\n if next_cell_kji0 is None:\n exit_axis, exit_polarity = entry_axis, 1 - entry_polarity\n else:\n exit_move = next_cell_kji0 - cell_kji0\n log.debug(f'exit move: {exit_move}')\n if exit_move[1] == 0 and exit_move[2] == 0: # K move\n exit_axis = 0\n exit_polarity = 1 if exit_move[0] >= 0 else 0\n elif abs(exit_move[1]) > abs(exit_move[2]): # J dominant move\n exit_axis = 1\n exit_polarity = 1 if exit_move[1] >= 0 else 0\n else: # I dominant move\n exit_axis = 2\n exit_polarity = 1 if exit_move[2] >= 0 else 0\n\n if use_face_centres: # override the vector based xyz entry and exit points with face centres\n if entry_axis == 0:\n entry_xyz = np.mean(cp[entry_polarity, :, :], axis = (0, 1))\n elif entry_axis == 1:\n entry_xyz = np.mean(cp[:, entry_polarity, :], axis = (0, 1))\n else:\n entry_xyz = np.mean(cp[:, :, entry_polarity], axis = (0, 1)) # entry_axis == 2, ie. I\n if exit_axis == 0:\n exit_xyz = np.mean(cp[exit_polarity, :, :], axis = (0, 1))\n elif exit_axis == 1:\n exit_xyz = np.mean(cp[:, exit_polarity, :], axis = (0, 1))\n else:\n exit_xyz = np.mean(cp[:, :, exit_polarity], axis = (0, 1)) # exit_axis == 2, ie. I\n\n log.debug(\n f'cell: {cell_kji0}; entry axis: {entry_axis}; polarity {entry_polarity}; exit axis: {exit_axis}; polarity {exit_polarity}'\n )\n\n if previous_xyz is None: # first entry\n log.debug('adding mean sea level trajectory start')\n previous_xyz = entry_xyz.copy()\n previous_xyz[2] = 0.0 # use depth zero as md datum\n trajectory_mds.append(0.0)\n trajectory_points.append(previous_xyz)\n if not vec.isclose(previous_xyz, entry_xyz, tolerance = 0.05): # add an unblocked interval\n log.debug('adding unblocked interval')\n trajectory_points.append(entry_xyz)\n new_md = trajectory_mds[-1] + vec.naive_length(\n entry_xyz - previous_xyz) # assumes x, y & z units are same\n trajectory_mds.append(new_md)\n blocked_intervals.append(-1) # unblocked interval\n previous_xyz = entry_xyz\n log.debug('adding blocked interval for cell kji0: ' + str(cell_kji0))\n trajectory_points.append(exit_xyz)\n new_md = trajectory_mds[-1] + vec.naive_length(exit_xyz - previous_xyz) # assumes x, y & z units are same\n trajectory_mds.append(new_md)\n blocked_intervals.append(0) # blocked interval\n previous_xyz = exit_xyz\n blocked_cells_kji0.append(cell_kji0)\n blocked_face_pairs.append(((entry_axis, entry_polarity), (exit_axis, exit_polarity)))\n\n blocked_count = len(blocked_cells_kji0)\n if blocked_count == 0:\n log.warning('no intervals blocked for well ' + str(well_name))\n return None\n else:\n log.info(str(blocked_count) + ' interval' + _pl(blocked_count) + ' blocked for well ' + str(well_name))\n\n self.node_count = len(trajectory_mds)\n self.node_mds = np.array(trajectory_mds)\n self.cell_count = len(blocked_cells_kji0)\n self.grid_indices = np.array(blocked_intervals, dtype = int) # NB. only supporting one grid at the moment\n self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0))\n self.face_pair_indices = np.array(blocked_face_pairs, dtype = int)\n self.grid_list = [grid]\n\n # if last segment terminates at bottom face in bottom layer, add a tail to trajectory\n if blocked_count > 0 and exit_axis == 0 and exit_polarity == 1 and cell_kji0[\n 0] == grid.nk - 1 and grid.k_direction_is_down:\n tail_length = 10.0 # metres or feet\n tail_xyz = trajectory_points[-1].copy()\n tail_xyz[2] += tail_length * (1.0 if grid.z_inc_down() else -1.0)\n trajectory_points.append(tail_xyz)\n new_md = trajectory_mds[-1] + tail_length\n trajectory_mds.append(new_md)\n\n self.create_md_datum_and_trajectory(grid, trajectory_mds, trajectory_points, length_uom, well_name)\n\n if add_as_properties and len(df.columns) > 3:\n # NB: atypical writing of hdf5 data and xml creation in order to support related properties\n self.write_hdf5()\n self.create_xml()\n if isinstance(add_as_properties, list):\n for col in add_as_properties:\n assert col in df.columns[3:] # could just skip missing columns\n property_columns = add_as_properties\n else:\n property_columns = df.columns[3:]\n self._add_df_properties(df, property_columns, length_uom = length_uom)\n\n return self\n\n def import_from_rms_cellio(self, cellio_file, well_name, grid, include_overburden_unblocked_interval = False):\n \"\"\"Populates empty blocked well from RMS cell I/O data; creates simulation trajectory and md datum.\n\n args:\n cellio_file (string): path of RMS ascii export file holding blocked well cell I/O data; cell entry and\n exit points are expected\n well_name (string): the name of the well as used in the cell I/O file\n grid (grid.Grid object): the grid object which the cell indices in the cell I/O data relate to\n\n returns:\n self if successful; None otherwise\n \"\"\"\n\n if well_name:\n self.well_name = well_name\n else:\n well_name = self.well_name\n\n grid_name = rqet.citation_title_for_node(grid.root)\n length_uom = grid.z_units()\n grid_z_inc_down = crs.Crs(grid.model, uuid = grid.crs_uuid).z_inc_down\n log.debug('grid z increasing downwards: ' + str(grid_z_inc_down) + '(type: ' + str(type(grid_z_inc_down)) + ')')\n cellio_z_inc_down = None\n\n try:\n assert ' ' not in well_name, 'cannot import for well name containing spaces'\n with open(cellio_file, 'r') as fp:\n while True:\n kf.skip_blank_lines_and_comments(fp)\n line = fp.readline() # file format version number?\n assert line, 'well ' + str(well_name) + ' not found in file ' + str(cellio_file)\n fp.readline() # 'Undefined'\n words = fp.readline().split()\n assert len(words), 'missing header info in cell I/O file'\n if words[0].upper() == well_name.upper():\n break\n while not kf.blank_line(fp):\n fp.readline() # skip to block of data for next well\n header_lines = int(fp.readline().strip())\n for _ in range(header_lines):\n fp.readline()\n previous_xyz = None\n trajectory_mds = []\n trajectory_points = [] # entries paired with trajectory_mds\n blocked_intervals = [\n ] # will have one fewer entries than trajectory nodes; 0 = blocked, -1 = not blocked (for grid indices)\n blocked_cells_kji0 = [] # will have length equal to number of 0's in blocked intervals\n blocked_face_pairs = [\n ] # same length as blocked_cells_kji0; each is ((entry axis, entry polarity), (exit axis, exit polarity))\n\n while not kf.blank_line(fp):\n\n line = fp.readline()\n words = line.split()\n assert len(words) >= 9, 'not enough items on data line in cell I/O file, minimum 9 expected'\n i1, j1, k1 = int(words[0]), int(words[1]), int(words[2])\n cell_kji0 = np.array((k1 - 1, j1 - 1, i1 - 1), dtype = int)\n assert np.all(0 <= cell_kji0) and np.all(\n cell_kji0 < grid.extent_kji), 'cell I/O cell index not within grid extent'\n entry_xyz = np.array((float(words[3]), float(words[4]), float(words[5])))\n exit_xyz = np.array((float(words[6]), float(words[7]), float(words[8])))\n if cellio_z_inc_down is None:\n cellio_z_inc_down = bool(entry_xyz[2] + exit_xyz[2] > 0.0)\n if cellio_z_inc_down != grid_z_inc_down:\n entry_xyz[2] = -entry_xyz[2]\n exit_xyz[2] = -exit_xyz[2]\n\n cp = grid.corner_points(cell_kji0 = cell_kji0, cache_resqml_array = False)\n assert not np.any(np.isnan(cp)), 'missing geometry for perforation cell(kji0) ' + str(\n cell_kji0) + ' for well ' + str(well_name)\n cell_centre = np.mean(cp, axis = (0, 1, 2))\n\n # let's hope everything is in the same coordinate reference system!\n entry_vector = 100.0 * (entry_xyz - cell_centre)\n exit_vector = 100.0 * (exit_xyz - cell_centre)\n (entry_axis, entry_polarity, facial_entry_xyz, exit_axis, exit_polarity,\n facial_exit_xyz) = find_entry_and_exit(cp, entry_vector, exit_vector, well_name)\n\n if previous_xyz is None: # first entry\n previous_xyz = entry_xyz.copy()\n if include_overburden_unblocked_interval:\n log.debug('adding mean sea level trajectory start')\n previous_xyz[2] = 0.0 # use depth zero as md datum\n trajectory_mds.append(previous_xyz[2])\n trajectory_points.append(previous_xyz)\n\n if not vec.isclose(previous_xyz, entry_xyz, tolerance = 0.05): # add an unblocked interval\n log.debug('adding unblocked interval')\n trajectory_points.append(entry_xyz)\n new_md = trajectory_mds[-1] + vec.naive_length(\n entry_xyz - previous_xyz) # assumes x, y & z units are same\n trajectory_mds.append(new_md)\n blocked_intervals.append(-1) # unblocked interval\n previous_xyz = entry_xyz\n\n log.debug('adding blocked interval for cell kji0: ' + str(cell_kji0))\n trajectory_points.append(exit_xyz)\n new_md = trajectory_mds[-1] + vec.naive_length(\n exit_xyz - previous_xyz) # assumes x, y & z units are same\n trajectory_mds.append(new_md)\n blocked_intervals.append(0) # blocked interval\n previous_xyz = exit_xyz\n blocked_cells_kji0.append(cell_kji0)\n blocked_face_pairs.append(((entry_axis, entry_polarity), (exit_axis, exit_polarity)))\n\n blocked_count = len(blocked_cells_kji0)\n if blocked_count == 0:\n log.warning('no intervals blocked for well ' + well_name + ' in grid ' + str(grid_name))\n return None\n else:\n log.info(\n str(blocked_count) + ' interval' + _pl(blocked_count) + ' blocked for well ' + well_name +\n ' in grid ' + str(grid_name))\n\n self.create_md_datum_and_trajectory(grid,\n trajectory_mds,\n trajectory_points,\n length_uom,\n well_name,\n set_depth_zero = True,\n set_tangent_vectors = True)\n\n self.node_count = len(trajectory_mds)\n self.node_mds = np.array(trajectory_mds)\n self.cell_count = len(blocked_cells_kji0)\n self.grid_indices = np.array(blocked_intervals,\n dtype = int) # NB. only supporting one grid at the moment\n self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0))\n self.face_pair_indices = np.array(blocked_face_pairs)\n self.grid_list = [grid]\n\n except Exception:\n log.exception('failed to import info for blocked well ' + str(well_name) + ' from cell I/O file ' +\n str(cellio_file))\n return None\n\n return self\n\n def dataframe(self,\n i_col = 'IW',\n j_col = 'JW',\n k_col = 'L',\n one_based = True,\n extra_columns_list = [],\n ntg_uuid = None,\n perm_i_uuid = None,\n perm_j_uuid = None,\n perm_k_uuid = None,\n satw_uuid = None,\n sato_uuid = None,\n satg_uuid = None,\n region_uuid = None,\n radw = None,\n skin = None,\n stat = None,\n active_only = False,\n min_k0 = None,\n max_k0 = None,\n k0_list = None,\n min_length = None,\n min_kh = None,\n max_depth = None,\n max_satw = None,\n min_sato = None,\n max_satg = None,\n perforation_list = None,\n region_list = None,\n depth_inc_down = None,\n set_k_face_intervals_vertical = False,\n anglv_ref = 'normal ij down',\n angla_plane_ref = None,\n length_mode = 'MD',\n length_uom = None,\n use_face_centres = False,\n preferential_perforation = True,\n add_as_properties = False,\n use_properties = False):\n \"\"\"Returns a pandas data frame containing WELLSPEC style data.\n\n arguments:\n i_col (string, default 'IW'): the column name to use for cell I index values\n j_col (string, default 'JW'): the column name to use for cell J index values\n k_col (string, default 'L'): the column name to use for cell K index values\n one_based (boolean, default True): if True, simulator protocol i, j & k values are placed in I, J & K columns;\n if False, resqml zero based values; this does not affect the interpretation of min_k0 & max_k0 arguments\n extra_columns_list (list of string, optional): list of WELLSPEC column names to include in the dataframe, from currently\n recognised values: 'GRID', 'ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'RADW', 'SKIN', 'PPERF', 'RADB', 'WI', 'WBC'\n ntg_uuid (uuid.UUID, optional): the uuid of the net to gross ratio property; if present is used to downgrade the i & j\n permeabilities in the calculation of KH; ignored if 'KH' not in the extra column list and min_kh is not specified;\n the argument may also be a dictionary mapping from grid uuid to ntg uuid; if no net to gross data is provided, it\n is effectively assumed to be one (or, equivalently, the I & J permeability data is applicable to the gross rock); see\n also preferential_perforation argument which can cause adjustment of effective ntg in partially perforated cells\n perm_i_uuid (uuid.UUID or dictionary, optional): the uuid of the permeability property in the I direction;\n required if 'KH' is included in the extra columns list and min_kh is not specified; ignored otherwise;\n the argument may also be a dictionary mapping from grid uuid to perm I uuid\n perm_j_uuid (uuid.UUID, optional): the uuid (or dict) of the permeability property in the J direction;\n defaults to perm_i_uuid\n perm_k_uuid (uuid.UUID, optional): the uuid (or dict) of the permeability property in the K direction;\n defaults to perm_i_uuid\n satw_uuid (uuid.UUID, optional): the uuid of a water saturation property; required if max_satw is specified; may also\n be a dictionary mapping from grid uuid to satw uuid; ignored if max_satw is None\n sato_uuid (uuid.UUID, optional): the uuid of an oil saturation property; required if min_sato is specified; may also\n be a dictionary mapping from grid uuid to sato uuid; ignored if min_sato is None\n satg_uuid (uuid.UUID, optional): the uuid of a gas saturation property; required if max_satg is specified; may also\n be a dictionary mapping from grid uuid to satg uuid; ignored if max_satg is None\n region_uuid (uuid.UUID, optional): the uuid of a discrete or categorical property, required if region_list is not None;\n may also be a dictionary mapping from grid uuid to region uuid; ignored if region_list is None\n radw (float, optional): if present, the wellbore radius used for all perforations; must be in correct units for intended\n use of the WELLSPEC style dataframe; will default to 0.25 if 'RADW' is included in the extra column list\n skin (float, optional): if present, a skin column is included with values set to this constant\n stat (string, optional): if present, should be 'ON' or 'OFF' and is used for all perforations; will default to 'ON' if\n 'STAT' is included in the extra column list\n active_only (boolean, default False): if True, only cells that are flagged in the grid object as active are included;\n if False, cells are included whether active or not\n min_k0 (int, optional): if present, perforations in layers above this are excluded (layer number will be applied\n naively to all grids – not recommended when working with more than one grid with different layering)\n max_k0 (int, optional): if present, perforations in layers below this are excluded (layer number will be applied\n naively to all grids – not recommended when working with more than one grid with different layering)\n k0_list (list of int, optional): if present, only perforations in cells in these layers are included (layer numbers\n will be applied naively to all grids – not recommended when working with more than one grid with different layering)\n min_length (float, optional): if present, a minimum length for an individual perforation interval to be included;\n units are the length units of the trajectory object unless length_uom argument is set\n min_kh (float, optional): if present, the minimum permeability x length value for which an individual interval is\n included; permeabilty uuid(s) must be supplied for the kh calculation; units of the length component are those\n of the trajectory object unless length_uom argument is set\n max_depth (float, optional): if present, rows are excluded for cells with a centre point depth greater than this value;\n max_depth should be positive downwards, with units of measure those of the grid z coordinates\n max_satw (float, optional): if present, perforations in cells where the water saturation exceeds this value will\n be excluded; satw_uuid must be supplied if this argument is present\n min_sato (float, optional): if present, perforations in cells where the oil saturation is less than this value will\n be excluded; sato_uuid must be supplied if this argument is present\n max_satg (float, optional): if present, perforations in cells where the gas saturation exceeds this value will\n be excluded; satg_uuid must be supplied if this argument is present\n perforation_list (list of (float, float), optional): if present, a list of perforated intervals; each entry is the\n start and end measured depths for a perforation; these do not need to align with cell boundaries\n region_list (list of int, optional): if present, a list of region numbers for which rows are to be included; the\n property holding the region data is identified by the region_uuid argument\n depth_inc_down (boolean, optional): if present and True, the depth values will increase with depth; if False or None,\n the direction of the depth values will be determined by the z increasing downwards indicator in the trajectory crs\n set_k_face_intervals_vertical (boolean, default False): if True, intervals with entry through K- and exit through K+\n will have angla and anglv set to 0.0 (vertical); if False angles will be computed depending on geometry\n anglv_ref (string, default 'normal ij down'): either 'gravity', 'z down' (same as gravity), 'z+', 'k down', 'k+',\n 'normal ij', or 'normal ij down';\n the ANGLV angles are relative to a local (per cell) reference vector selected by this keyword\n angla_plane_ref (string, optional): string indicating normal vector defining plane onto which trajectory and I axis are\n projected for the calculation of ANGLA; options as for anglv_ref, or 'normal well i+' which results in no projection;\n defaults to the same as anglv_ref\n length_mode (string, default 'MD'): 'MD' or 'straight' indicating which length to use; 'md' takes measured depth\n difference between exit and entry; 'straight' uses a naive straight line length between entry and exit;\n this will affect values for LENGTH, KH, DEPTH, X & Y\n length_uom (string, optional): if present, either 'm' or 'ft': the length units to use for the LENGTH, KH, MD, DEPTH,\n X & Y columns if they are present in extra_columns_list; also used to interpret min_length and min_kh; if None, the\n length units of the trajectory attribute are used LENGTH, KH & MD and those of the grid are used for DEPTH, X & Y;\n RADW value, if present, is assumed to be in the correct units and is not changed; also used implicitly to determine\n conversion constant used in calculation of wellbore constant (WBC)\n use_face_centres (boolean, default False): if True, the centre points of the entry and exit faces will determine the\n vector used as the basis of ANGLA and ANGLV calculations; if False, the trajectory locations for the entry and exit\n measured depths will be used\n preferential_perforation (boolean, default True): if perforation_list is given, and KH is requested or a min_kh given,\n the perforated intervals are assumed to penetrate pay rock preferentially: an effective ntg weighting is computed\n to account for any residual non-pay perforated interval; ignored if perforation_list is None or kh values are not\n being computed\n add_as_properties (boolean or list of str, default False): if True, each column in the extra_columns_list (excluding\n GRID and STAT) is added as a property with the blocked well as supporting representation and 'cells' as the\n indexable element; any cell that is excluded from the dataframe will have corresponding entries of NaN in all the\n properties; if a list is provided it must be a subset of extra_columns_list\n use_properties (boolean or list of str, default False): if True, each column in the extra_columns_list (excluding\n GRID and STAT) is populated from a property with citation title matching the column name, if it exists\n\n notes:\n units of length along wellbore will be those of the trajectory's length_uom (also applies to K.H values) unless\n the length_uom argument is used;\n the constraints are applied independently for each row and a row is excluded if it fails any constraint;\n the min_k0 and max_k0 arguments do not stop later rows within the layer range from being included;\n the min_length and min_kh limits apply to individual cell intervals and thus depend on cell size;\n the water and oil saturation limits are for saturations at a single time and affect whether the interval\n is included in the dataframe – there is no functionality to support turning perforations off and on over time;\n the saturation limits do not stop deeper intervals with qualifying saturations from being included;\n the k0_list, perforation_list and region_list arguments should be set to None to disable the corresponding functionality,\n if set to an empty list, no rows will be included in the dataframe;\n if add_as_properties is True, the blocked well must already have been added as a part to the model;\n at add_as_properties and use_properties cannot both be True;\n add_as_properties and use_properties are only currently functional for single grid blocked wells;\n at present, unit conversion is not handled when using properties\n\n :meta common:\n \"\"\"\n\n def prop_array(uuid_or_dict, grid):\n assert uuid_or_dict is not None and grid is not None\n if isinstance(uuid_or_dict, dict):\n prop_uuid = uuid_or_dict[grid.uuid]\n else:\n prop_uuid = uuid_or_dict # uuid either in form of string or uuid.UUID\n return grid.property_collection.single_array_ref(uuid = prop_uuid)\n\n def get_ref_vector(grid, grid_crs, cell_kji0, mode):\n # gravity = np.array((0.0, 0.0, 1.0))\n if mode == 'normal well i+':\n return None # ANGLA only: option for no projection onto a plane\n ref_vector = None\n # options for anglv or angla reference: 'z down', 'z+', 'k down', 'k+', 'normal ij', 'normal ij down'\n cell_axial_vectors = None\n if not mode.startswith('z'):\n cell_axial_vectors = grid.interface_vectors_kji(cell_kji0)\n if mode == 'z+':\n ref_vector = np.array((0.0, 0.0, 1.0))\n elif mode == 'z down':\n if grid_crs.z_inc_down:\n ref_vector = np.array((0.0, 0.0, 1.0))\n else:\n ref_vector = np.array((0.0, 0.0, -1.0))\n elif mode in ['k+', 'k down']:\n ref_vector = vec.unit_vector(cell_axial_vectors[0])\n if mode == 'k down' and not grid.k_direction_is_down:\n ref_vector = -ref_vector\n else: # normal to plane of ij axes\n ref_vector = vec.unit_vector(vec.cross_product(cell_axial_vectors[1], cell_axial_vectors[2]))\n if mode == 'normal ij down':\n if grid_crs.z_inc_down:\n if ref_vector[2] < 0.0:\n ref_vector = -ref_vector\n else:\n if ref_vector[2] > 0.0:\n ref_vector = -ref_vector\n if ref_vector is None or ref_vector[2] == 0.0:\n if grid_crs.z_inc_down:\n ref_vector = np.array((0.0, 0.0, 1.0))\n else:\n ref_vector = np.array((0.0, 0.0, -1.0))\n return ref_vector\n\n assert length_mode in ['MD', 'straight']\n assert length_uom is None or length_uom in ['m', 'ft']\n assert anglv_ref in ['gravity', 'z down', 'z+', 'k down', 'k+', 'normal ij', 'normal ij down']\n if anglv_ref == 'gravity':\n anglv_ref = 'z down'\n if angla_plane_ref is None:\n angla_plane_ref = anglv_ref\n assert angla_plane_ref in [\n 'gravity', 'z down', 'z+', 'k down', 'k+', 'normal ij', 'normal ij down', 'normal well i+'\n ]\n if angla_plane_ref == 'gravity':\n angla_plane_ref = 'z down'\n column_list = [i_col, j_col, k_col]\n if extra_columns_list:\n for extra in extra_columns_list:\n assert extra.upper() in [\n 'GRID', 'ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'SKIN', 'RADW', 'PPERF', 'RADB',\n 'WI', 'WBC'\n ]\n column_list.append(extra.upper())\n else:\n add_as_properties = use_properties = False\n assert not (add_as_properties and use_properties)\n pc = rqp.PropertyCollection(support = self) if use_properties else None\n pc_titles = [] if pc is None else pc.titles()\n isotropic_perm = None\n if min_length is not None and min_length <= 0.0:\n min_length = None\n if min_kh is not None and min_kh <= 0.0:\n min_kh = None\n if max_satw is not None and max_satw >= 1.0:\n max_satw = None\n if min_sato is not None and min_sato <= 0.0:\n min_sato = None\n if max_satg is not None and max_satg >= 1.0:\n max_satg = None\n doing_kh = False\n if ('KH' in column_list or min_kh is not None) and 'KH' not in pc_titles:\n assert perm_i_uuid is not None, 'KH requested (or minimum specified) without I direction permeabilty being specified'\n doing_kh = True\n if 'WBC' in column_list and 'WBC' not in pc_titles:\n assert perm_i_uuid is not None, 'WBC requested without I direction permeabilty being specified'\n doing_kh = True\n do_well_inflow = (('WI' in column_list and 'WI' not in pc_titles) or\n ('WBC' in column_list and 'WBC' not in pc_titles) or\n ('RADB' in column_list and 'RADB' not in pc_titles))\n if do_well_inflow:\n assert perm_i_uuid is not None, 'WI, RADB or WBC requested without I direction permeabilty being specified'\n if doing_kh or do_well_inflow:\n if perm_j_uuid is None and perm_k_uuid is None:\n isotropic_perm = True\n else:\n if perm_j_uuid is None:\n perm_j_uuid = perm_i_uuid\n if perm_k_uuid is None:\n perm_k_uuid = perm_i_uuid\n # following line assumes arguments are passed in same form; if not, some unnecessary maths might be done\n isotropic_perm = (bu.matching_uuids(perm_i_uuid, perm_j_uuid) and\n bu.matching_uuids(perm_i_uuid, perm_k_uuid))\n if max_satw is not None:\n assert satw_uuid is not None, 'water saturation limit specified without saturation property array'\n if min_sato is not None:\n assert sato_uuid is not None, 'oil saturation limit specified without saturation property array'\n if max_satg is not None:\n assert satg_uuid is not None, 'gas saturation limit specified without saturation property array'\n if region_list is not None:\n assert region_uuid is not None, 'region list specified without region property array'\n if radw is not None and 'RADW' not in column_list:\n column_list.append('RADW')\n if radw is None:\n radw = 0.25\n if skin is not None and 'SKIN' not in column_list:\n column_list.append('SKIN')\n if skin is None:\n skin = 0.0\n if stat is not None:\n assert str(stat).upper() in ['ON', 'OFF']\n stat = str(stat).upper()\n if 'STAT' not in column_list:\n column_list.append('STAT')\n else:\n stat = 'ON'\n if 'GRID' not in column_list and self.number_of_grids() > 1:\n log.error('creating blocked well dataframe without GRID column for well that intersects more than one grid')\n if 'LENGTH' in column_list and 'PPERF' in column_list and 'KH' not in column_list and perforation_list is not None:\n log.warning(\n 'both LENGTH and PPERF will include effects of partial perforation; only one should be used in WELLSPEC'\n )\n elif (perforation_list is not None and 'LENGTH' not in column_list and 'PPERF' not in column_list and\n 'KH' not in column_list and 'WBC' not in column_list):\n log.warning('perforation list supplied but no use of LENGTH, KH, PPERF nor WBC')\n if min_k0 is None:\n min_k0 = 0\n else:\n assert min_k0 >= 0\n if max_k0 is not None:\n assert min_k0 <= max_k0\n if k0_list is not None and len(k0_list) == 0:\n log.warning('no layers included for blocked well dataframe: no rows will be included')\n if perforation_list is not None and len(perforation_list) == 0:\n log.warning('empty perforation list specified for blocked well dataframe: no rows will be included')\n doing_angles = (('ANGLA' in column_list and 'ANGLA' not in pc_titles) or\n ('ANGLV' in column_list and 'ANGLV' not in pc_titles) or doing_kh or do_well_inflow)\n doing_xyz = (('X' in column_list and 'X' not in pc_titles) or ('Y' in column_list and 'Y' not in pc_titles) or\n ('DEPTH' in column_list and 'DEPTH' not in pc_titles))\n doing_entry_exit = doing_angles or ('LENGTH' in column_list and 'LENGTH' not in pc_titles and\n length_mode == 'straight')\n grid_crs_list = []\n for grid in self.grid_list:\n grid_crs = crs.Crs(self.model, uuid = grid.crs_uuid)\n grid_crs_list.append(grid_crs)\n if grid_crs.z_units != grid_crs.xy_units and (len(column_list) > 1 or\n (len(column_list) == 1 and\n column_list[0] != 'GRID')) is not None:\n log.error('grid ' + str(rqet.citation_title_for_node(grid.root_node)) +\n ' has z units different to xy units: some WELLSPEC data likely to be wrong')\n k_face_check = np.zeros((2, 2), dtype = int)\n k_face_check[1, 1] = 1 # now represents entry, exit of K-, K+\n k_face_check_end = k_face_check.copy()\n k_face_check_end[1] = -1 # entry through K-, terminating (TD) within cell\n if self.trajectory is None or self.trajectory.crs_root is None:\n traj_crs = None\n traj_z_inc_down = None\n else:\n traj_crs = crs.Crs(self.trajectory.model, uuid = self.trajectory.crs_uuid)\n assert traj_crs.xy_units == traj_crs.z_units\n traj_z_inc_down = traj_crs.z_inc_down\n\n df = pd.DataFrame(columns = column_list)\n df = df.astype({i_col: int, j_col: int, k_col: int})\n\n ci = -1\n row_ci_list = []\n if self.node_count is None or self.node_count < 2:\n interval_count = 0\n else:\n interval_count = self.node_count - 1\n for interval in range(interval_count):\n if self.grid_indices[interval] < 0:\n continue # unblocked interval\n ci += 1\n row_dict = {}\n grid = self.grid_list[self.grid_indices[interval]]\n grid_crs = grid_crs_list[self.grid_indices[interval]]\n grid_name = rqet.citation_title_for_node(grid.root).replace(' ', '_')\n natural_cell = self.cell_indices[ci]\n cell_kji0 = grid.denaturalized_cell_index(natural_cell)\n tuple_kji0 = tuple(cell_kji0)\n if max_depth is not None:\n cell_depth = grid.centre_point(cell_kji0)[2]\n if not grid_crs.z_inc_down:\n cell_depth = -cell_depth\n if cell_depth > max_depth:\n continue\n if active_only and grid.inactive is not None and grid.inactive[tuple_kji0]:\n continue\n if (min_k0 is not None and cell_kji0[0] < min_k0) or (max_k0 is not None and cell_kji0[0] > max_k0):\n continue\n if k0_list is not None and cell_kji0[0] not in k0_list:\n continue\n if region_list is not None and prop_array(region_uuid, grid)[tuple_kji0] not in region_list:\n continue\n if max_satw is not None and prop_array(satw_uuid, grid)[tuple_kji0] > max_satw:\n continue\n if min_sato is not None and prop_array(sato_uuid, grid)[tuple_kji0] < min_sato:\n continue\n if max_satg is not None and prop_array(satg_uuid, grid)[tuple_kji0] > max_satg:\n continue\n if 'PPERF' in pc_titles:\n part_perf_fraction = pc.single_array_ref(citation_title = 'PPERF')[ci]\n else:\n part_perf_fraction = 1.0\n if perforation_list is not None:\n perf_length = 0.0\n for perf_start, perf_end in perforation_list:\n if perf_end <= self.node_mds[interval] or perf_start >= self.node_mds[interval + 1]:\n continue\n if perf_start <= self.node_mds[interval]:\n if perf_end >= self.node_mds[interval + 1]:\n perf_length += self.node_mds[interval + 1] - self.node_mds[interval]\n break\n else:\n perf_length += perf_end - self.node_mds[interval]\n else:\n if perf_end >= self.node_mds[interval + 1]:\n perf_length += self.node_mds[interval + 1] - perf_start\n else:\n perf_length += perf_end - perf_start\n if perf_length == 0.0:\n continue\n part_perf_fraction = min(1.0, perf_length / (self.node_mds[interval + 1] - self.node_mds[interval]))\n# log.debug('kji0: ' + str(cell_kji0))\n entry_xyz = None\n exit_xyz = None\n if doing_entry_exit:\n assert self.trajectory is not None\n if use_face_centres:\n entry_xyz = grid.face_centre(cell_kji0, self.face_pair_indices[interval, 0, 0],\n self.face_pair_indices[interval, 0, 1])\n if self.face_pair_indices[interval, 1, 0] >= 0:\n exit_xyz = grid.face_centre(cell_kji0, self.face_pair_indices[interval, 1, 0],\n self.face_pair_indices[interval, 1, 1])\n else:\n exit_xyz = grid.face_centre(cell_kji0, self.face_pair_indices[interval, 0, 0],\n 1 - self.face_pair_indices[interval, 0, 1])\n ee_crs = grid_crs\n else:\n entry_xyz = self.trajectory.xyz_for_md(self.node_mds[interval])\n exit_xyz = self.trajectory.xyz_for_md(self.node_mds[interval + 1])\n ee_crs = traj_crs\n if length_mode == 'MD':\n length = self.node_mds[interval + 1] - self.node_mds[interval]\n if length_uom is not None and self.trajectory is not None and length_uom != self.trajectory.md_uom:\n length = bwam.convert_lengths(length, self.trajectory.md_uom, length_uom)\n else: # use straight line length between entry and exit\n length = vec.naive_length(np.array(exit_xyz) -\n np.array(entry_xyz)) # trajectory crs, unless use_face_centres!\n if length_uom is not None:\n length = bwam.convert_lengths(length, ee_crs.z_units, length_uom)\n elif self.trajectory is not None:\n length = bwam.convert_lengths(length, ee_crs.z_units, self.trajectory.md_uom)\n if perforation_list is not None:\n length *= part_perf_fraction\n if min_length is not None and length < min_length:\n continue\n sine_anglv = sine_angla = 0.0\n cosine_anglv = cosine_angla = 1.0\n xyz = (np.NaN, np.NaN, np.NaN)\n md = 0.5 * (self.node_mds[interval + 1] + self.node_mds[interval])\n anglv = pc.single_array_ref(citation_title = 'ANGLV')[ci] if 'ANGLV' in pc_titles else None\n angla = pc.single_array_ref(citation_title = 'ANGLA')[ci] if 'ANGLA' in pc_titles else None\n if doing_angles and not (set_k_face_intervals_vertical and\n (np.all(self.face_pair_indices[ci] == k_face_check) or\n np.all(self.face_pair_indices[ci] == k_face_check_end))):\n vector = vec.unit_vector(np.array(exit_xyz) -\n np.array(entry_xyz)) # nominal wellbore vector for interval\n if traj_z_inc_down is not None and traj_z_inc_down != grid_crs.z_inc_down:\n vector[2] = -vector[2]\n v_ref_vector = get_ref_vector(grid, grid_crs, cell_kji0, anglv_ref)\n # log.debug('v ref vector: ' + str(v_ref_vector))\n if angla_plane_ref == anglv_ref:\n a_ref_vector = v_ref_vector\n else:\n a_ref_vector = get_ref_vector(grid, grid_crs, cell_kji0, angla_plane_ref)\n # log.debug('a ref vector: ' + str(a_ref_vector))\n if anglv is not None:\n anglv_rad = vec.radians_from_degrees(anglv)\n cosine_anglv = maths.cos(anglv_rad)\n sine_anglv = maths.sin(anglv_rad)\n else:\n cosine_anglv = min(max(vec.dot_product(vector, v_ref_vector), -1.0), 1.0)\n anglv_rad = maths.acos(cosine_anglv)\n sine_anglv = maths.sin(anglv_rad)\n anglv = vec.degrees_from_radians(anglv_rad)\n# log.debug('anglv: ' + str(anglv))\n if anglv != 0.0:\n # project well vector and i-axis vector onto plane defined by normal vector a_ref_vector\n i_axis = grid.interface_vector(cell_kji0, 2)\n i_axis = vec.unit_vector(i_axis)\n if a_ref_vector is not None: # project vector and i axis onto a plane\n vector -= vec.dot_product(vector, a_ref_vector) * a_ref_vector\n vector = vec.unit_vector(vector)\n # log.debug('i axis unit vector: ' + str(i_axis))\n i_axis -= vec.dot_product(i_axis, a_ref_vector) * a_ref_vector\n i_axis = vec.unit_vector(i_axis)\n# log.debug('i axis unit vector in reference plane: ' + str(i_axis))\n if angla is not None:\n angla_rad = vec.radians_from_degrees(angla)\n cosine_angla = maths.cos(angla_rad)\n sine_angla = maths.sin(angla_rad)\n else:\n cosine_angla = min(max(vec.dot_product(vector, i_axis), -1.0), 1.0)\n angla_rad = maths.acos(cosine_angla)\n # negate angla if vector is 'clockwise from' i_axis when viewed from above, projected in the xy plane\n # todo: have discussion around angla sign under different ijk handedness (and z inc direction?)\n sine_angla = maths.sin(angla_rad)\n angla = vec.degrees_from_radians(angla_rad)\n if vec.clockwise((0.0, 0.0), i_axis, vector) > 0.0:\n angla = -angla\n angle_rad = -angla_rad\n sine_angla = -sine_angla\n\n\n# log.debug('angla: ' + str(angla))\n else:\n if angla is None:\n angla = 0.0\n if anglv is None:\n anglv = 0.0\n if doing_kh or do_well_inflow:\n if ntg_uuid is None:\n ntg = 1.0\n ntg_is_one = True\n else:\n ntg = prop_array(ntg_uuid, grid)[tuple_kji0]\n ntg_is_one = maths.isclose(ntg, 1.0, rel_tol = 0.001)\n if isotropic_perm and ntg_is_one:\n k_i = k_j = k_k = prop_array(perm_i_uuid, grid)[tuple_kji0]\n else:\n if preferential_perforation and not ntg_is_one:\n if part_perf_fraction <= ntg:\n ntg = 1.0 # effective ntg when perforated intervals are in pay\n else:\n ntg /= part_perf_fraction # adjusted ntg when some perforations in non-pay\n # todo: check netgross facet type in property perm i & j parts: if set to gross then don't multiply by ntg below\n k_i = prop_array(perm_i_uuid, grid)[tuple_kji0] * ntg\n k_j = prop_array(perm_j_uuid, grid)[tuple_kji0] * ntg\n k_k = prop_array(perm_k_uuid, grid)[tuple_kji0]\n if doing_kh:\n if isotropic_perm and ntg_is_one:\n kh = length * prop_array(perm_i_uuid, grid)[tuple_kji0]\n else:\n if np.isnan(k_i) or np.isnan(k_j):\n kh = 0.0\n elif anglv == 0.0:\n kh = length * maths.sqrt(k_i * k_j)\n elif np.isnan(k_k):\n kh = 0.0\n else:\n k_e = maths.pow(k_i * k_j * k_k, 1.0 / 3.0)\n if k_e == 0.0:\n kh = 0.0\n else:\n l_i = length * maths.sqrt(k_e / k_i) * sine_anglv * cosine_angla\n l_j = length * maths.sqrt(k_e / k_j) * sine_anglv * sine_angla\n l_k = length * maths.sqrt(k_e / k_k) * cosine_anglv\n l_p = maths.sqrt(l_i * l_i + l_j * l_j + l_k * l_k)\n kh = k_e * l_p\n if min_kh is not None and kh < min_kh:\n continue\n elif 'KH' in pc_titles:\n kh = pc.single_array_ref(citation_title = 'KH')[ci]\n else:\n kh = None\n if 'LENGTH' in pc_titles:\n length = pc.single_array_ref(citation_title = 'LENGTH')[ci]\n if 'RADW' in pc_titles:\n radw = pc.single_array_ref(citation_title = 'RADW')[ci]\n assert radw > 0.0\n if 'SKIN' in pc_titles:\n skin = pc.single_array_ref(citation_title = 'SKIN')[ci]\n radb = wi = wbc = None\n if 'RADB' in pc_titles:\n radb = pc.single_array_ref(citation_title = 'RADB')[ci]\n if 'WI' in pc_titles:\n wi = pc.single_array_ref(citation_title = 'WI')[ci]\n if 'WBC' in pc_titles:\n wbc = pc.single_array_ref(citation_title = 'WBC')[ci]\n if do_well_inflow:\n if isotropic_perm and ntg_is_one:\n k_ei = k_ej = k_ek = k_i\n radw_e = radw\n else:\n k_ei = maths.sqrt(k_j * k_k)\n k_ej = maths.sqrt(k_i * k_k)\n k_ek = maths.sqrt(k_i * k_j)\n r_wi = 0.0 if k_ei == 0.0 else 0.5 * radw * (maths.sqrt(k_ei / k_j) + maths.sqrt(k_ei / k_k))\n r_wj = 0.0 if k_ej == 0.0 else 0.5 * radw * (maths.sqrt(k_ej / k_i) + maths.sqrt(k_ej / k_k))\n r_wk = 0.0 if k_ek == 0.0 else 0.5 * radw * (maths.sqrt(k_ek / k_i) + maths.sqrt(k_ek / k_j))\n rwi = r_wi * sine_anglv * cosine_angla\n rwj = r_wj * sine_anglv * sine_angla\n rwk = r_wk * cosine_anglv\n radw_e = maths.sqrt(rwi * rwi + rwj * rwj + rwk * rwk)\n if radw_e == 0.0:\n radw_e = radw # no permeability in this situation anyway\n cell_axial_vectors = grid.interface_vectors_kji(cell_kji0)\n d2 = np.empty(3)\n for axis in range(3):\n d2[axis] = np.sum(cell_axial_vectors[axis] * cell_axial_vectors[axis])\n r_bi = 0.0 if k_ei == 0.0 else 0.14 * maths.sqrt(k_ei * (d2[1] / k_j + d2[0] / k_k))\n r_bj = 0.0 if k_ej == 0.0 else 0.14 * maths.sqrt(k_ej * (d2[2] / k_i + d2[0] / k_k))\n r_bk = 0.0 if k_ek == 0.0 else 0.14 * maths.sqrt(k_ek * (d2[2] / k_i + d2[1] / k_j))\n rbi = r_bi * sine_anglv * cosine_angla\n rbj = r_bj * sine_anglv * sine_angla\n rbk = r_bk * cosine_anglv\n radb_e = maths.sqrt(rbi * rbi + rbj * rbj + rbk * rbk)\n if radb is None:\n radb = radw * radb_e / radw_e\n if wi is None:\n wi = 0.0 if radb <= 0.0 else 2.0 * maths.pi / (maths.log(radb / radw) + skin)\n if 'WBC' in column_list and wbc is None:\n conversion_constant = 8.5270171e-5 if length_uom == 'm' else 0.006328286\n wbc = conversion_constant * kh * wi # note: pperf aleady accounted for in kh\n if doing_xyz:\n if length_mode == 'MD' and self.trajectory is not None:\n xyz = self.trajectory.xyz_for_md(md)\n if length_uom is not None and length_uom != self.trajectory.md_uom:\n bwam.convert_lengths(xyz, traj_crs.z_units, length_uom)\n if depth_inc_down and traj_z_inc_down is False:\n xyz[2] = -xyz[2]\n else:\n xyz = 0.5 * (np.array(exit_xyz) + np.array(entry_xyz))\n if length_uom is not None and length_uom != ee_crs.z_units:\n bwam.convert_lengths(xyz, ee_crs.z_units, length_uom)\n if depth_inc_down and ee_crs.z_inc_down is False:\n xyz[2] = -xyz[2]\n xyz = np.array(xyz)\n if 'X' in pc_titles:\n xyz[0] = pc.single_array_ref(citation_title = 'X')[ci]\n if 'Y' in pc_titles:\n xyz[1] = pc.single_array_ref(citation_title = 'Y')[ci]\n if 'DEPTH' in pc_titles:\n xyz[2] = pc.single_array_ref(citation_title = 'DEPTH')[ci]\n if length_uom is not None and self.trajectory is not None and length_uom != self.trajectory.md_uom:\n md = bwam.convert_lengths(md, self.trajectory.md_uom, length_uom)\n if 'MD' in pc_titles:\n md = pc.single_array_ref(citation_title = 'MD')[ci]\n for col_index in range(len(column_list)):\n column = column_list[col_index]\n if col_index < 3:\n if one_based:\n row_dict[column] = cell_kji0[2 - col_index] + 1\n else:\n row_dict[column] = cell_kji0[2 - col_index]\n elif column == 'GRID':\n row_dict['GRID'] = grid_name # todo: worry about spaces and quotes\n elif column == 'RADW':\n row_dict['RADW'] = radw\n elif column == 'SKIN':\n row_dict['SKIN'] = skin\n elif column == 'ANGLA':\n row_dict['ANGLA'] = angla\n elif column == 'ANGLV':\n row_dict['ANGLV'] = anglv\n elif column == 'LENGTH':\n # note: length units are those of trajectory length uom if length mode is MD and length_uom is None\n row_dict['LENGTH'] = length\n elif column == 'KH':\n row_dict['KH'] = kh\n elif column == 'DEPTH':\n row_dict['DEPTH'] = xyz[2]\n elif column == 'MD':\n row_dict['MD'] = md\n elif column == 'X':\n row_dict['X'] = xyz[0]\n elif column == 'Y':\n row_dict['Y'] = xyz[1]\n elif column == 'STAT':\n row_dict['STAT'] = stat\n elif column == 'PPERF':\n row_dict['PPERF'] = part_perf_fraction\n elif column == 'RADB':\n row_dict['RADB'] = radb\n elif column == 'WI':\n row_dict['WI'] = wi\n elif column == 'WBC': # note: not a valid WELLSPEC column name\n row_dict['WBC'] = wbc\n df = df.append(row_dict, ignore_index = True)\n row_ci_list.append(ci)\n\n if add_as_properties:\n if isinstance(add_as_properties, list):\n for col in add_as_properties:\n assert col in extra_columns_list\n property_columns = add_as_properties\n else:\n property_columns = extra_columns_list\n self._add_df_properties(df, property_columns, row_ci_list = row_ci_list, length_uom = length_uom)\n\n return df\n\n def _add_df_properties(self, df, columns, row_ci_list = None, length_uom = None):\n # creates a property part for each named column, based on the dataframe values\n # column name used as the citation title\n # self must already exist as a part in the model\n # currently only handles single grid situations\n # todo: rewrite to add separate property objects for each grid references by the blocked well\n log.debug('_add_df_props: df:')\n log.debug(f'\\n{df}')\n log.debug(f'columns: {columns}')\n assert len(self.grid_list) == 1\n if columns is None or len(columns) == 0 or len(df) == 0:\n return\n if row_ci_list is None:\n row_ci_list = np.arange(self.cell_count)\n assert len(row_ci_list) == len(df)\n if length_uom is None:\n length_uom = self.trajectory.md_uom\n extra_pc = rqp.PropertyCollection()\n extra_pc.set_support(support = self)\n ci_map = np.array(row_ci_list, dtype = int)\n for e in columns:\n extra = e.upper()\n if extra in ['GRID', 'STAT']:\n continue # todo: other non-numeric columns may need to be added to this list\n pk = 'continuous'\n uom = 'Euc'\n if extra in ['ANGLA', 'ANGLV']:\n uom = 'dega'\n # neither azimuth nor dip are correct property kinds; todo: create local property kinds\n pk = 'azimuth' if extra == 'ANGLA' else 'inclination'\n elif extra in ['LENGTH', 'MD', 'X', 'Y', 'DEPTH', 'RADW']:\n if length_uom is None or length_uom == 'Euc':\n if extra in ['LENGTH', 'MD']:\n uom = self.trajectory.md_uom\n elif extra in ['X', 'Y', 'RADW']:\n uom = self.grid_list[0].xy_units()\n else:\n uom = self.grid_list[0].z_units()\n else:\n uom = length_uom\n if extra == 'DEPTH':\n pk = 'depth'\n else:\n pk = 'length'\n elif extra == 'KH':\n uom = 'mD.' + length_uom\n pk = 'permeability length'\n elif extra == 'PPERF':\n uom = length_uom + '/' + length_uom\n else:\n uom = 'Euc'\n # 'SKIN': use defaults for now; todo: create local property kind for skin\n expanded = np.full(self.cell_count, np.NaN)\n expanded[ci_map] = df[extra]\n extra_pc.add_cached_array_to_imported_list(expanded,\n 'blocked well dataframe',\n extra,\n discrete = False,\n uom = uom,\n property_kind = pk,\n local_property_kind_uuid = None,\n facet_type = None,\n facet = None,\n realization = None,\n indexable_element = 'cells',\n count = 1)\n extra_pc.write_hdf5_for_imported_list()\n extra_pc.create_xml_for_imported_list_and_add_parts_to_model()\n\n def static_kh(self,\n ntg_uuid = None,\n perm_i_uuid = None,\n perm_j_uuid = None,\n perm_k_uuid = None,\n satw_uuid = None,\n sato_uuid = None,\n satg_uuid = None,\n region_uuid = None,\n active_only = False,\n min_k0 = None,\n max_k0 = None,\n k0_list = None,\n min_length = None,\n min_kh = None,\n max_depth = None,\n max_satw = None,\n min_sato = None,\n max_satg = None,\n perforation_list = None,\n region_list = None,\n set_k_face_intervals_vertical = False,\n anglv_ref = 'gravity',\n angla_plane_ref = None,\n length_mode = 'MD',\n length_uom = None,\n use_face_centres = False,\n preferential_perforation = True):\n \"\"\"Returns the total static K.H (permeability x height); length units are those of trajectory md_uom unless\n length_upm is set.\n\n note:\n see doc string for dataframe() method for argument descriptions; perm_i_uuid required\n \"\"\"\n\n df = self.dataframe(i_col = 'I',\n j_col = 'J',\n k_col = 'K',\n one_based = False,\n extra_columns_list = ['KH'],\n ntg_uuid = ntg_uuid,\n perm_i_uuid = perm_i_uuid,\n perm_j_uuid = perm_j_uuid,\n perm_k_uuid = perm_k_uuid,\n satw_uuid = satw_uuid,\n sato_uuid = sato_uuid,\n satg_uuid = satg_uuid,\n region_uuid = region_uuid,\n active_only = active_only,\n min_k0 = min_k0,\n max_k0 = max_k0,\n k0_list = k0_list,\n min_length = min_length,\n min_kh = min_kh,\n max_depth = max_depth,\n max_satw = max_satw,\n min_sato = min_sato,\n max_satg = max_satg,\n perforation_list = perforation_list,\n region_list = region_list,\n set_k_face_intervals_vertical = set_k_face_intervals_vertical,\n anglv_ref = anglv_ref,\n angla_plane_ref = angla_plane_ref,\n length_mode = length_mode,\n length_uom = length_uom,\n use_face_centres = use_face_centres,\n preferential_perforation = preferential_perforation)\n\n return sum(df['KH'])\n\n def write_wellspec(self,\n wellspec_file,\n well_name = None,\n mode = 'a',\n extra_columns_list = [],\n ntg_uuid = None,\n perm_i_uuid = None,\n perm_j_uuid = None,\n perm_k_uuid = None,\n satw_uuid = None,\n sato_uuid = None,\n satg_uuid = None,\n region_uuid = None,\n radw = None,\n skin = None,\n stat = None,\n active_only = False,\n min_k0 = None,\n max_k0 = None,\n k0_list = None,\n min_length = None,\n min_kh = None,\n max_depth = None,\n max_satw = None,\n min_sato = None,\n max_satg = None,\n perforation_list = None,\n region_list = None,\n set_k_face_intervals_vertical = False,\n depth_inc_down = True,\n anglv_ref = 'gravity',\n angla_plane_ref = None,\n length_mode = 'MD',\n length_uom = None,\n preferential_perforation = True,\n space_instead_of_tab_separator = True,\n align_columns = True,\n preceeding_blank_lines = 0,\n trailing_blank_lines = 0,\n length_uom_comment = False,\n write_nexus_units = True,\n float_format = '5.3'):\n \"\"\"Writes Nexus WELLSPEC keyword to an ascii file.\n\n returns:\n pandas DataFrame containing data that has been written to the wellspec file\n\n note:\n see doc string for dataframe() method for most of the argument descriptions;\n align_columns and float_format arguments are deprecated and no longer used\n \"\"\"\n\n def tidy_well_name(well_name):\n nexus_friendly = ''\n previous_underscore = False\n for ch in well_name:\n if not 32 <= ord(ch) < 128 or ch in ' ,!*#':\n ch = '_'\n if not (previous_underscore and ch == '_'):\n nexus_friendly += ch\n previous_underscore = (ch == '_')\n if not nexus_friendly:\n well_name = 'WELL_X'\n return nexus_friendly\n\n def is_float_column(col_name):\n if col_name.upper() in ['ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'SKIN', 'RADW', 'PPERF']:\n return True\n return False\n\n def is_int_column(col_name):\n if col_name.upper() in ['IW', 'JW', 'L']:\n return True\n return False\n\n assert wellspec_file, 'no output file specified to write WELLSPEC to'\n\n col_width_dict = {\n 'IW': 4,\n 'JW': 4,\n 'L': 4,\n 'ANGLA': 8,\n 'ANGLV': 8,\n 'LENGTH': 8,\n 'KH': 10,\n 'DEPTH': 10,\n 'MD': 10,\n 'X': 8,\n 'Y': 12,\n 'SKIN': 7,\n 'RADW': 5,\n 'PPERF': 5\n }\n\n if not well_name:\n if self.well_name:\n well_name = self.well_name\n elif self.root is not None:\n well_name = rqet.citation_title_for_node(self.root)\n elif self.wellbore_interpretation is not None:\n well_name = self.wellbore_interpretation.title\n elif self.trajectory is not None:\n well_name = self.trajectory.title\n else:\n log.warning('no well name identified for use in WELLSPEC')\n well_name = 'WELLNAME'\n well_name = tidy_well_name(well_name)\n\n df = self.dataframe(one_based = True,\n extra_columns_list = extra_columns_list,\n ntg_uuid = ntg_uuid,\n perm_i_uuid = perm_i_uuid,\n perm_j_uuid = perm_j_uuid,\n perm_k_uuid = perm_k_uuid,\n satw_uuid = satw_uuid,\n sato_uuid = sato_uuid,\n satg_uuid = satg_uuid,\n region_uuid = region_uuid,\n radw = radw,\n skin = skin,\n stat = stat,\n active_only = active_only,\n min_k0 = min_k0,\n max_k0 = max_k0,\n k0_list = k0_list,\n min_length = min_length,\n min_kh = min_kh,\n max_depth = max_depth,\n max_satw = max_satw,\n min_sato = min_sato,\n max_satg = max_satg,\n perforation_list = perforation_list,\n region_list = region_list,\n depth_inc_down = depth_inc_down,\n set_k_face_intervals_vertical = set_k_face_intervals_vertical,\n anglv_ref = anglv_ref,\n angla_plane_ref = angla_plane_ref,\n length_mode = length_mode,\n length_uom = length_uom,\n preferential_perforation = preferential_perforation)\n\n sep = ' ' if space_instead_of_tab_separator else '\\t'\n\n with open(wellspec_file, mode = mode) as fp:\n for _ in range(preceeding_blank_lines):\n fp.write('\\n')\n if write_nexus_units:\n if length_uom == 'm':\n fp.write('METRIC\\n\\n')\n elif length_uom == 'ft':\n fp.write('ENGLISH\\n\\n')\n if length_uom_comment and self.trajectory is not None and ('LENGTH' in extra_columns_list or\n 'MD' in extra_columns_list or\n 'KH' in extra_columns_list):\n fp.write(\n f'! Length units along wellbore: {self.trajectory.md_uom if length_uom is None else length_uom}\\n')\n fp.write('WELLSPEC ' + str(well_name) + '\\n')\n for col_name in df.columns:\n if col_name in col_width_dict:\n width = col_width_dict[col_name]\n else:\n width = 10\n form = '{0:>' + str(width) + '}'\n fp.write(sep + form.format(col_name))\n fp.write('\\n')\n for row_info in df.iterrows():\n row = row_info[1]\n for col_name in df.columns:\n try:\n if col_name in col_width_dict:\n width = col_width_dict[col_name]\n else:\n width = 10\n if is_float_column(col_name):\n form = '{0:>' + str(width) + '.3f}'\n fp.write(sep + form.format(float(row[col_name])))\n else:\n form = '{0:>' + str(width) + '}'\n if is_int_column(col_name):\n fp.write(sep + form.format(int(row[col_name])))\n else:\n fp.write(sep + form.format(str(row[col_name])))\n except Exception:\n fp.write(sep + str(row[col_name]))\n fp.write('\\n')\n for _ in range(trailing_blank_lines):\n fp.write('\\n')\n\n return df\n\n def kji0_marker(self, active_only = True):\n \"\"\"Convenience method returning (k0, j0, i0), grid_uuid of first blocked interval.\"\"\"\n\n cells, grids = self.cell_indices_and_grid_list()\n if cells is None or grids is None or len(grids) == 0:\n return None, None, None, None\n return cells[0], grids[0].uuid\n\n def xyz_marker(self, active_only = True):\n \"\"\"Convenience method returning (x, y, z), crs_uuid of perforation in first blocked interval.\n\n notes:\n active_only argument not yet in use;\n returns None, None if no blocked interval found\n \"\"\"\n\n cells, grids = self.cell_indices_and_grid_list()\n if cells is None or grids is None or len(grids) == 0:\n return None, None\n node_index = 0\n while node_index < self.node_count - 1 and self.grid_indices[node_index] == -1:\n node_index += 1\n if node_index >= self.node_count - 1:\n return None, None\n md = 0.5 * (self.node_mds[node_index] + self.node_mds[node_index + 1])\n xyz = self.trajectory.xyz_for_md(md)\n return xyz, rqet.uuid_for_part_root(self.trajectory.crs_root)\n\n def create_feature_and_interpretation(self, shared_interpretation = True):\n \"\"\"Instantiate new empty WellboreFeature and WellboreInterpretation objects.\n\n Uses the Blocked well citation title as the well name\n \"\"\"\n if self.trajectory is not None:\n traj_interp_uuid = self.model.uuid(obj_type = 'WellboreInterpretation', related_uuid = self.trajectory.uuid)\n if traj_interp_uuid is not None:\n if shared_interpretation:\n self.wellbore_interpretation = rqo.WellboreInterpretation(parent_model = self.model,\n uuid = traj_interp_uuid)\n traj_feature_uuid = self.model.uuid(obj_type = 'WellboreFeature', related_uuid = traj_interp_uuid)\n if traj_feature_uuid is not None:\n self.wellbore_feature = rqo.WellboreFeature(parent_model = self.model, uuid = traj_feature_uuid)\n if self.wellbore_feature is None:\n self.wellbore_feature = rqo.WellboreFeature(parent_model = self.model, feature_name = self.trajectory.title)\n self.feature_to_be_written = True\n if self.wellbore_interpretation is None:\n self.wellbore_interpretation = rqo.WellboreInterpretation(parent_model = self.model,\n wellbore_feature = self.wellbore_feature)\n if self.trajectory.wellbore_interpretation is None and shared_interpretation:\n self.trajectory.wellbore_interpretation = self.wellbore_interpretation\n self.interpretation_to_be_written = True\n\n def create_md_datum_and_trajectory(self,\n grid,\n trajectory_mds,\n trajectory_points,\n length_uom,\n well_name,\n set_depth_zero = False,\n set_tangent_vectors = False,\n create_feature_and_interp = True):\n \"\"\"Creates an Md Datum object and a (simulation) Trajectory object for this blocked well.\n\n note:\n not usually called directly; used by import methods\n \"\"\"\n\n # create md datum node for synthetic trajectory, using crs for grid\n datum_location = trajectory_points[0].copy()\n if set_depth_zero:\n datum_location[2] = 0.0\n datum = MdDatum(self.model,\n crs_uuid = grid.crs_uuid,\n location = datum_location,\n md_reference = 'mean sea level')\n\n # create synthetic trajectory object, using crs for grid\n trajectory_mds_array = np.array(trajectory_mds)\n trajectory_xyz_array = np.array(trajectory_points)\n trajectory_df = pd.DataFrame({\n 'MD': trajectory_mds_array,\n 'X': trajectory_xyz_array[..., 0],\n 'Y': trajectory_xyz_array[..., 1],\n 'Z': trajectory_xyz_array[..., 2]\n })\n self.trajectory = Trajectory(self.model,\n md_datum = datum,\n data_frame = trajectory_df,\n length_uom = length_uom,\n well_name = well_name,\n set_tangent_vectors = set_tangent_vectors)\n self.trajectory_to_be_written = True\n\n if create_feature_and_interp:\n self.create_feature_and_interpretation()\n\n def create_xml(self,\n ext_uuid = None,\n create_for_trajectory_if_needed = True,\n add_as_part = True,\n add_relationships = True,\n title = None,\n originator = None):\n \"\"\"Create a blocked wellbore representation node from this BlockedWell object, optionally add as part.\n\n note:\n trajectory xml node must be in place before calling this function;\n witsml log reference, interval stratigraphic units, and cell fluid phase units not yet supported\n\n :meta common:\n \"\"\"\n\n assert self.trajectory is not None, 'trajectory object missing'\n\n if ext_uuid is None:\n ext_uuid = self.model.h5_uuid()\n\n if title:\n self.title = title\n if not self.title:\n self.title = 'blocked well'\n\n if self.feature_to_be_written:\n if self.wellbore_feature is None:\n self.create_feature_and_interpretation()\n self.wellbore_feature.create_xml(add_as_part = add_as_part, originator = originator)\n if self.interpretation_to_be_written:\n if self.wellbore_interpretation is None:\n self.create_feature_and_interpretation()\n self.wellbore_interpretation.create_xml(add_as_part = add_as_part,\n title_suffix = None,\n add_relationships = add_relationships,\n originator = originator)\n\n if create_for_trajectory_if_needed and self.trajectory_to_be_written and self.trajectory.root is None:\n md_datum_root = self.trajectory.md_datum.create_xml(add_as_part = add_as_part,\n add_relationships = add_relationships,\n title = str(self.title),\n originator = originator)\n self.trajectory.create_xml(ext_uuid,\n md_datum_root = md_datum_root,\n add_as_part = add_as_part,\n add_relationships = add_relationships,\n title = title,\n originator = originator)\n\n assert self.trajectory.root is not None, 'trajectory xml not established'\n\n bw_node = super().create_xml(title = title, originator = originator, add_as_part = False)\n\n # wellbore frame elements\n\n nc_node = rqet.SubElement(bw_node, ns['resqml2'] + 'NodeCount')\n nc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'positiveInteger')\n nc_node.text = str(self.node_count)\n\n mds_node = rqet.SubElement(bw_node, ns['resqml2'] + 'NodeMd')\n mds_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n mds_node.text = rqet.null_xml_text\n\n mds_values_node = rqet.SubElement(mds_node, ns['resqml2'] + 'Values')\n mds_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n mds_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'NodeMd', root = mds_values_node)\n\n traj_root = self.trajectory.root\n self.model.create_ref_node('Trajectory',\n rqet.find_nested_tags_text(traj_root, ['Citation', 'Title']),\n bu.uuid_from_string(traj_root.attrib['uuid']),\n content_type = 'obj_WellboreTrajectoryRepresentation',\n root = bw_node)\n\n # remaining blocked wellbore elements\n\n cc_node = rqet.SubElement(bw_node, ns['resqml2'] + 'CellCount')\n cc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'nonNegativeInteger')\n cc_node.text = str(self.cell_count)\n\n cis_node = rqet.SubElement(bw_node, ns['resqml2'] + 'CellIndices')\n cis_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'IntegerHdf5Array')\n cis_node.text = rqet.null_xml_text\n\n cnull_node = rqet.SubElement(cis_node, ns['resqml2'] + 'NullValue')\n cnull_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')\n cnull_node.text = str(self.cellind_null)\n\n cis_values_node = rqet.SubElement(cis_node, ns['resqml2'] + 'Values')\n cis_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n cis_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'CellIndices', root = cis_values_node)\n\n gis_node = rqet.SubElement(bw_node, ns['resqml2'] + 'GridIndices')\n gis_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'IntegerHdf5Array')\n gis_node.text = rqet.null_xml_text\n\n gnull_node = rqet.SubElement(gis_node, ns['resqml2'] + 'NullValue')\n gnull_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')\n gnull_node.text = str(self.gridind_null)\n\n gis_values_node = rqet.SubElement(gis_node, ns['resqml2'] + 'Values')\n gis_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n gis_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'GridIndices', root = gis_values_node)\n\n fis_node = rqet.SubElement(bw_node, ns['resqml2'] + 'LocalFacePairPerCellIndices')\n fis_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'IntegerHdf5Array')\n fis_node.text = rqet.null_xml_text\n\n fnull_node = rqet.SubElement(fis_node, ns['resqml2'] + 'NullValue')\n fnull_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')\n fnull_node.text = str(self.facepair_null)\n\n fis_values_node = rqet.SubElement(fis_node, ns['resqml2'] + 'Values')\n fis_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')\n fis_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'LocalFacePairPerCellIndices', root = fis_values_node)\n\n for grid in self.grid_list:\n\n grid_root = grid.root\n self.model.create_ref_node('Grid',\n rqet.find_nested_tags_text(grid_root, ['Citation', 'Title']),\n bu.uuid_from_string(grid_root.attrib['uuid']),\n content_type = 'obj_IjkGridRepresentation',\n root = bw_node)\n\n interp_root = None\n if self.wellbore_interpretation is not None:\n interp_root = self.wellbore_interpretation.root\n self.model.create_ref_node('RepresentedInterpretation',\n rqet.find_nested_tags_text(interp_root, ['Citation', 'Title']),\n bu.uuid_from_string(interp_root.attrib['uuid']),\n content_type = 'obj_WellboreInterpretation',\n root = bw_node)\n\n if add_as_part:\n self.model.add_part('obj_BlockedWellboreRepresentation', self.uuid, bw_node)\n if add_relationships:\n self.model.create_reciprocal_relationship(bw_node, 'destinationObject', self.trajectory.root,\n 'sourceObject')\n\n for grid in self.grid_list:\n self.model.create_reciprocal_relationship(bw_node, 'destinationObject', grid.root, 'sourceObject')\n if interp_root is not None:\n self.model.create_reciprocal_relationship(bw_node, 'destinationObject', interp_root, 'sourceObject')\n ext_part = rqet.part_name_for_object('obj_EpcExternalPartReference', ext_uuid, prefixed = False)\n ext_node = self.model.root_for_part(ext_part)\n self.model.create_reciprocal_relationship(bw_node, 'mlToExternalPartProxy', ext_node,\n 'externalPartProxyToMl')\n\n return bw_node\n\n def write_hdf5(self, file_name = None, mode = 'a', create_for_trajectory_if_needed = True):\n \"\"\"Create or append to an hdf5 file, writing datasets for the measured depths, grid, cell & face indices.\n\n :meta common:\n \"\"\"\n\n # NB: array data must all have been set up prior to calling this function\n\n if self.uuid is None:\n self.uuid = bu.new_uuid()\n\n h5_reg = rwh5.H5Register(self.model)\n\n if create_for_trajectory_if_needed and self.trajectory_to_be_written:\n self.trajectory.write_hdf5(file_name, mode = mode)\n mode = 'a'\n\n h5_reg.register_dataset(self.uuid, 'NodeMd', self.node_mds)\n h5_reg.register_dataset(self.uuid, 'CellIndices', self.cell_indices) # could use int32?\n h5_reg.register_dataset(self.uuid, 'GridIndices', self.grid_indices) # could use int32?\n # convert face index pairs from [axis, polarity] back to strange local face numbering\n mask = (self.face_pair_indices.flatten() == -1).reshape((-1, 2)) # 2nd axis is (axis, polarity)\n masked_face_indices = np.where(mask, 0, self.face_pair_indices.reshape((-1, 2))) # 2nd axis is (axis, polarity)\n # using flat array for raw_face_indices array\n # other resqml writing code might use an array with one int per entry point and one per exit point, with 2nd axis as (entry, exit)\n raw_face_indices = np.where(mask[:, 0], -1, self.face_index_map[masked_face_indices[:, 0],\n masked_face_indices[:,\n 1]].flatten()).reshape(-1)\n\n h5_reg.register_dataset(self.uuid, 'LocalFacePairPerCellIndices', raw_face_indices) # could use uint8?\n\n h5_reg.write(file = file_name, mode = mode)\n\n\nclass WellboreMarkerFrame(BaseResqpy):\n \"\"\"Class to handle RESQML WellBoreMarkerFrameRepresentation objects.\n\n note:\n measured depth data must be in same crs as those for the related trajectory\n \"\"\"\n\n resqml_type = 'WellboreMarkerFrameRepresentation'\n\n def __init__(self,\n parent_model,\n wellbore_marker_frame_root = None,\n uuid = None,\n trajectory = None,\n title = None,\n originator = None,\n extra_metadata = None):\n \"\"\"Creates a new wellbore marker object and optionally loads it from xml, or trajectory, or Nexus wellspec file.\n\n arguments:\n parent_model (model.Model object): the model which the new blocked well belongs to\n wellbore_marker_root (DEPRECATED): the root node of an xml tree representing the wellbore marker;\n trajectory (optional, Trajectory object): the trajectory of the well, to be intersected with the grid;\n not used if wellbore_marker_root is not None;\n title (str, optional): the citation title to use for a new wellbore marker frame;\n ignored if uuid or wellbore_marker_frame_root is not None\n originator (str, optional): the name of the person creating the wellbore marker frame, defaults to login id;\n ignored if uuid or wellbore_marker_frame_root is not None\n extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the wellbore marker frame;\n ignored if uuid or wellbore_marker_frame_root is not None\n\n returns:\n the newly created wellbore framework marker object\n \"\"\"\n\n self.trajectory = None\n self.node_count = None # number of measured depth nodes, each being for a marker\n self.node_mds = None # node_count measured depths (in same units and datum as trajectory) of markers\n self.wellbore_marker_list = [\n ] # list of markers, each: (marker UUID, geologic boundary, marker citation title, interp. object)\n if self.trajectory is not None:\n self.trajectory = trajectory\n\n super().__init__(model = parent_model,\n uuid = uuid,\n title = title,\n originator = originator,\n extra_metadata = extra_metadata,\n root_node = wellbore_marker_frame_root)\n\n def get_trajectory_obj(self, trajectory_uuid):\n \"\"\"Returns a trajectory object.\n\n arguments:\n trajectory_uuid (string or uuid.UUID): the uuid of the trajectory for which a Trajectory object is required\n\n returns:\n well.Trajectory object\n\n note:\n this method is not usually called directly\n \"\"\"\n\n if trajectory_uuid is None:\n log.error('no trajectory was found')\n return None\n else:\n # create new trajectory object\n trajectory_root_node = self.model.root_for_uuid(trajectory_uuid)\n assert trajectory_root_node is not None, 'referenced wellbore trajectory missing from model'\n return Trajectory(self.model, trajectory_root = trajectory_root_node)\n\n def get_interpretation_obj(self, interpretation_uuid, interp_type = None):\n \"\"\"Creates an interpretation object; returns a horizon or fault interpretation object.\n\n arguments:\n interpretation_uiud (string or uuid.UUID): the uuid of the required interpretation object\n interp_type (string, optional): 'HorizonInterpretation' or 'FaultInterpretation' (optionally\n prefixed with `obj_`); if None, the type is inferred from the xml for the given uuid\n\n returns:\n organization.HorizonInterpretation or organization.FaultInterpretation object\n\n note:\n this method is not usually called directly\n \"\"\"\n\n assert interpretation_uuid is not None, 'interpretation uuid argument missing'\n\n interpretation_root_node = self.model.root_for_uuid(interpretation_uuid)\n\n if not interp_type:\n interp_type = rqet.node_type(interpretation_root_node)\n\n if not interp_type.startswith('obj_'):\n interp_type = 'obj_' + interp_type\n\n if interp_type == 'obj_HorizonInterpretation':\n # create new horizon interpretation object\n return rqo.HorizonInterpretation(self.model, root_node = interpretation_root_node)\n\n elif interp_type == 'obj_FaultInterpretation':\n # create new fault interpretation object\n return rqo.FaultInterpretation(self.model, root_node = interpretation_root_node)\n\n elif interp_type == 'obj_GeobodyInterpretation':\n # create new geobody interpretation object\n return rqo.GeobodyInterpretation(self.model, root_node = interpretation_root_node)\n else:\n # No interpretation for the marker\n return None\n # log.error('interpretation type not recognized: ' + str(interp_type))\n\n def _load_from_xml(self):\n \"\"\"Loads the wellbore marker frame object from an xml node (and associated hdf5 data).\n\n note:\n this method is not usually called directly\n \"\"\"\n\n wellbore_marker_frame_root = self.root\n assert wellbore_marker_frame_root is not None\n\n if self.trajectory is None:\n self.trajectory = self.get_trajectory_obj(\n rqet.find_nested_tags_text(wellbore_marker_frame_root, ['Trajectory', 'UUID']))\n\n # list of Wellbore markers, each: (marker UUID, geologic boundary, marker citation title, interp. object)\n self.wellbore_marker_list = []\n for tag in rqet.list_of_tag(wellbore_marker_frame_root, 'WellboreMarker'):\n interp_tag = rqet.content_type(rqet.find_nested_tags_text(tag, ['Interpretation', 'ContentType']))\n if interp_tag is not None:\n interp_obj = self.get_interpretation_obj(rqet.find_nested_tags_text(tag, ['Interpretation', 'UUID']),\n interp_tag)\n else:\n interp_obj = None\n self.wellbore_marker_list.append(\n (str(rqet.uuid_for_part_root(tag)), rqet.find_tag_text(tag, 'GeologicBoundaryKind'),\n rqet.find_nested_tags_text(tag, ['Citation', 'Title']), interp_obj))\n\n self.node_count = rqet.find_tag_int(wellbore_marker_frame_root, 'NodeCount')\n load_hdf5_array(self, rqet.find_tag(wellbore_marker_frame_root, 'NodeMd'), \"node_mds\", tag = 'Values')\n if self.node_count != len(self.node_mds):\n log.error('node count does not match hdf5 array')\n\n if len(self.wellbore_marker_list) != self.node_count:\n log.error('wellbore marker list does not contain correct node count')\n\n def dataframe(self):\n \"\"\"Returns a pandas dataframe with columns X, Y, Z, MD, Type, Surface, Well.\"\"\"\n\n # todo: handle fractures and geobody boundaries as well as horizons and faults\n\n xyz = np.empty((self.node_count, 3))\n type_list = []\n surface_list = []\n well_list = []\n\n for i in range(self.node_count):\n _, boundary_kind, title, interp = self.wellbore_marker_list[i]\n if interp:\n if boundary_kind == 'horizon':\n feature_name = rqo.GeneticBoundaryFeature(self.model, root_node = interp.feature_root).feature_name\n elif boundary_kind == 'fault':\n feature_name = rqo.TectonicBoundaryFeature(self.model, root_node = interp.feature_root).feature_name\n elif boundary_kind == 'geobody':\n feature_name = rqo.GeneticBoundaryFeature(self.model, root_node = interp.feature_root).feature_name\n else:\n assert False, 'unexpected boundary kind'\n else:\n feature_name = title\n boundary_kind = boundary_kind[0].upper() + boundary_kind[1:]\n feature_name = '\"' + feature_name + '\"'\n xyz[i] = self.trajectory.xyz_for_md(self.node_mds[i])\n type_list.append(boundary_kind)\n surface_list.append(feature_name)\n if self.trajectory.wellbore_interpretation is None:\n well_name = '\"' + self.trajectory.title + '\"' # todo: trace through wellbore interp to wellbore feature name\n else:\n well_name = '\"' + self.trajectory.wellbore_interpretation.title + '\"' # use wellbore_interpretation title instead, RMS exports have feature_name as \"Wellbore feature\"\n # well_name = '\"' + self.trajectory.wellbore_interpretation.wellbore_feature.feature_name + '\"'\n well_list.append(well_name)\n\n return pd.DataFrame({\n 'X': xyz[:, 0],\n 'Y': xyz[:, 1],\n 'Z': xyz[:, 2],\n 'MD': self.node_mds,\n 'Type': type_list,\n 'Surface': surface_list,\n 'Well': well_list\n })\n\n def create_xml(self,\n ext_uuid = None,\n add_as_part = True,\n add_relationships = True,\n wellbore_marker_list = None,\n title = 'wellbore marker framework',\n originator = None):\n\n assert type(add_as_part) is bool\n\n if ext_uuid is None:\n ext_uuid = self.model.h5_uuid()\n\n wbm_node = super().create_xml(originator = originator, add_as_part = False)\n\n nodeCount = rqet.SubElement(wbm_node, ns['resqml2'] + 'NodeCount')\n nodeCount.set(ns['xsi'] + 'type', ns['xsd'] + 'positiveInteger')\n nodeCount.text = str(self.node_count)\n\n nodeMd = rqet.SubElement(wbm_node, ns['resqml2'] + 'NodeMd')\n nodeMd.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')\n nodeMd.text = rqet.null_xml_text\n\n md_values_node = rqet.SubElement(nodeMd, ns['resqml2'] + 'Values')\n md_values_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Hdf5Dataset')\n md_values_node.text = rqet.null_xml_text\n\n self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'mds', root = md_values_node)\n\n if self.trajectory is not None:\n traj_root = self.trajectory.root\n self.model.create_ref_node('Trajectory',\n rqet.find_tag(rqet.find_tag(traj_root, 'Citation'), 'Title').text,\n bu.uuid_from_string(traj_root.attrib['uuid']),\n content_type = 'obj_WellboreTrajectoryRepresentation',\n root = wbm_node)\n else:\n log.error('trajectory object is missing and must be included')\n\n # fill wellbore marker\n for marker in self.wellbore_marker_list:\n\n wbm_node_obj = self.model.new_obj_node('WellboreMarker', is_top_lvl_obj = False)\n wbm_node_obj.set('uuid', marker[0])\n wbm_node.append(wbm_node_obj)\n wbm_gb_node = rqet.SubElement(wbm_node_obj, ns['resqml2'] + 'GeologicBoundaryKind')\n wbm_gb_node.set(ns['xsi'] + 'type', ns['xsd'] + 'string')\n wbm_gb_node.text = str(marker[1])\n\n interp = marker[3]\n if interp is not None:\n interp_root = marker[3].root\n if 'HorizonInterpretation' in str(type(marker[3])):\n self.model.create_ref_node('Interpretation',\n rqet.find_tag(rqet.find_tag(interp_root, 'Citation'), 'Title').text,\n bu.uuid_from_string(interp_root.attrib['uuid']),\n content_type = 'obj_HorizonInterpretation',\n root = wbm_node_obj)\n\n elif 'FaultInterpretation' in str(type(marker[3])):\n self.model.create_ref_node('Interpretation',\n rqet.find_tag(rqet.find_tag(interp_root, 'Citation'), 'Title').text,\n bu.uuid_from_string(interp_root.attrib['uuid']),\n content_type = 'obj_FaultInterpretation',\n root = wbm_node_obj)\n\n # add as part\n if add_as_part:\n self.model.add_part('obj_WellboreMarkerFrameRepresentation', self.uuid, wbm_node)\n\n if add_relationships:\n self.model.create_reciprocal_relationship(wbm_node, 'destinationObject', self.trajectory.root,\n 'sourceObject')\n ext_part = rqet.part_name_for_object('obj_EpcExternalPartReference', ext_uuid, prefixed = False)\n ext_node = self.model.root_for_part(ext_part)\n self.model.create_reciprocal_relationship(wbm_node, 'mlToExternalPartProxy', ext_node,\n 'externalPartProxyToMl')\n\n for marker in self.wellbore_marker_list:\n self.model.create_reciprocal_relationship(wbm_node, 'destinationObject', marker[3].root,\n 'sourceObject')\n\n return wbm_node\n\n def write_hdf5(self, file_name = None, mode = 'a'):\n \"\"\"Writes the hdf5 array associated with this object (the measured depth data).\n\n arguments:\n file_name (string): the name of the hdf5 file, or None, in which case the model's default will be used\n mode (string, default 'a'): the write mode for the hdf5, either 'w' or 'a'\n \"\"\"\n\n h5_reg = rwh5.H5Register(self.model)\n h5_reg.register_dataset(self.uuid, 'Mds', self.node_mds)\n h5_reg.write(file = file_name, mode = mode)\n\n def find_marker_from_interp(self, interpetation_obj = None, uuid = None):\n \"\"\"Find wellbore marker by interpretation; can pass object or uuid.\n\n arguments:\n interpretation_obj (organize.HorizonInterpretation or organize.FaultInterpretation object, optional):\n if present, the first (smallest md) marker relating to this interpretation object is returned\n uuid (string or uuid.UUID): if present, the uuid of the interpretation object of interest; ignored if\n interpretation_obj is not None\n\n returns:\n tuple, list of tuples or None; tuple is (marker UUID, geologic boundary, marker citation title, interp. object)\n\n note:\n if no arguments are passed, then a list of wellbore markers is returned;\n if no marker is found for the interpretation object, None is returned\n \"\"\"\n\n if interpetation_obj is None and uuid is None:\n return self.wellbore_marker_list\n\n if interpetation_obj is not None:\n uuid = interpetation_obj.uuid\n\n for marker in self.wellbore_marker_list:\n if bu.matching_uuids(marker[3].uuid, uuid):\n return marker\n\n return None\n\n def get_marker_count(self):\n \"\"\"Retruns number of wellbore markers.\"\"\"\n\n return len(self.wellbore_marker_list)\n\n def find_marker_from_index(self, idx):\n \"\"\"Returns wellbore marker by index.\"\"\"\n\n return self.wellbore_marker_list[idx - 1]\n\n\ndef add_las_to_trajectory(las: lasio.LASFile, trajectory, realization = None, check_well_name = False):\n \"\"\"Creates a WellLogCollection and WellboreFrame from a LAS file.\n\n Note:\n In this current implementation, the first curve in the las object must be\n Measured Depths, not e.g. TVDSS.\n\n Arguments:\n las: an lasio.LASFile object\n trajectory: an instance of :class:`resqpy.well.Trajectory` .\n realization (integer): if present, the single realisation (within an ensemble)\n that this collection is for\n check_well_name (bool): if True, raise warning if LAS well name does not match\n existing wellborefeature citation title\n\n Returns:\n collection, well_frame: instances of :class:`resqpy.property.WellLogCollection`\n and :class:`resqpy.well.WellboreFrame`\n \"\"\"\n\n # Lookup relevant related resqml parts\n model = trajectory.model\n well_interp = trajectory.wellbore_interpretation\n well_title = well_interp.title\n\n if check_well_name and well_title != las.well.WELL.value:\n warnings.warn(f'LAS well title {las.well.WELL.value} does not match resqml tite {well_title}')\n\n # Create a new wellbore frame, using depth data from first curve in las file\n depth_values = np.array(las.index).copy()\n assert isinstance(depth_values, np.ndarray)\n las_depth_uom = bwam.rq_length_unit(las.curves[0].unit)\n\n # Ensure depth units are correct\n bwam.convert_lengths(depth_values, from_units = las_depth_uom, to_units = trajectory.md_uom)\n assert len(depth_values) > 0\n\n well_frame = WellboreFrame(\n parent_model = model,\n trajectory = trajectory,\n mds = depth_values,\n represented_interp = well_interp,\n )\n well_frame.write_hdf5()\n well_frame.create_xml()\n\n # Create a WellLogCollection in which to put logs\n collection = rqp.WellLogCollection(frame = well_frame, realization = realization)\n\n # Read in data from each curve in turn (skipping first curve which has depths)\n for curve in las.curves[1:]:\n\n collection.add_log(\n title = curve.mnemonic,\n data = curve.data,\n unit = curve.unit,\n realization = realization,\n write = False,\n )\n collection.write_hdf5_for_imported_list()\n collection.create_xml_for_imported_list_and_add_parts_to_model()\n\n return collection, well_frame\n\n\ndef add_logs_from_cellio(blockedwell, cellio):\n \"\"\"Creates a WellIntervalPropertyCollection for a given BlockedWell, using a given cell I/O file.\n\n Arguments:\n blockedwell: a resqml blockedwell object\n cellio: an ascii file exported from RMS containing blocked well geometry and logs. Must contain columns i_index, j_index and k_index, plus additional columns for logs to be imported.\n \"\"\"\n # Get the initial variables from the blocked well\n assert isinstance(blockedwell, BlockedWell), 'Not a blocked wellbore object'\n collection = rqp.WellIntervalPropertyCollection(frame = blockedwell)\n well_name = blockedwell.trajectory.title.split(\" \")[0]\n grid = blockedwell.model.grid()\n\n # Read the cell I/O file to get the available columns (cols) and the data (data), and write into a dataframe\n with open(cellio, 'r') as fp:\n wellfound = False\n cols, data = [], []\n for line in fp.readlines():\n if line == \"\\n\":\n wellfound = False # Blankline signifies end of well data\n words = line.split()\n if wellfound:\n if len(words) > 2 and not words[0].isdigit():\n cols.append(line)\n else:\n if len(words) > 9:\n assert len(cols) == len(words), 'Number of columns found should match header of file'\n data.append(words)\n if len(words) == 3:\n if words[0].upper() == well_name.upper():\n wellfound = True\n assert len(data) > 0 and len(cols) > 3, f\"No data for well {well_name} found in file\"\n df = pd.DataFrame(data = data, columns = [x.split()[0] for x in cols])\n df = df.apply(pd.to_numeric)\n # Get the cell_indices from the grid for the given i/j/k\n df['cell_indices'] = grid.natural_cell_indices(\n np.array((df['k_index'] - 1, df['j_index'] - 1, df['i_index'] - 1), dtype = int).T)\n df = df.drop(['i_index', 'j_index', 'k_index', 'x_in', 'y_in', 'z_in', 'x_out', 'y_out', 'z_out'], axis = 1)\n assert (df['cell_indices'] == blockedwell.cell_indices\n ).all(), 'Cell indices do not match between blocked well and log inputs'\n\n # Work out if the data columns are continuous, categorical or discrete\n type_dict = {}\n lookup_dict = {}\n for col in cols:\n words = col.split()\n if words[0] not in ['i_index', 'j_index', 'k_index', 'x_in', 'y_in', 'z_in', 'x_out', 'y_out', 'z_out']:\n if words[1] == 'unit1':\n type_dict[words[0]] = 'continuous'\n elif words[1] == 'DISC' and not words[0] == 'ZONES':\n type_dict[words[0]] = 'categorical'\n lookup_dict[words[0]] = lookup_from_cellio(col, blockedwell.model)\n elif words[1] == 'param' or words[0] == 'ZONES':\n type_dict[words[0]] = 'discrete'\n else:\n raise TypeError(f'unrecognised data type for {col}')\n\n # Loop over the columns, adding them to the blockedwell property collection\n for log in df.columns:\n if log not in ['cell_indices']:\n data_type = type_dict[log]\n if log == 'ZONES':\n data_type, dtype, null, discrete = 'discrete', int, -1, True\n elif data_type == 'continuous':\n dtype, null, discrete = float, np.nan, False\n else:\n dtype, null, discrete = int, -1, True\n if data_type == 'categorical':\n lookup_uuid = lookup_dict[log] # For categorical data, find or generate a StringLookupTable\n else:\n lookup_uuid = None\n array_list = np.zeros((np.shape(blockedwell.grid_indices)), dtype = dtype)\n vals = list(df[log])\n for i, index in enumerate(blockedwell.cell_grid_link):\n if index == -1:\n assert blockedwell.grid_indices[i] == -1\n array_list[i] = null\n else:\n if blockedwell.cell_indices[index] == list(df['cell_indices'])[index]:\n array_list[i] = vals[index]\n collection.add_cached_array_to_imported_list(\n cached_array = array_list,\n source_info = '',\n keyword = f\"{os.path.basename(cellio).split('.')[0]}.{blockedwell.trajectory.title}.{log}\",\n discrete = discrete,\n uom = None,\n property_kind = None,\n facet = None,\n null_value = null,\n facet_type = None,\n realization = None)\n collection.write_hdf5_for_imported_list()\n collection.create_xml_for_imported_list_and_add_parts_to_model(string_lookup_uuid = lookup_uuid)\n\n\ndef lookup_from_cellio(line, model):\n \"\"\"Create a StringLookup Object from a cell I/O row containing a categorical column name and details.\n\n Arguments:\n line: a string from a cell I/O file, containing the column (log) name, type and categorical information\n model: the model to add the StringTableLookup to\n Returns:\n uuid: the uuid of a StringTableLookup, either for a newly created table, or for an existing table if an identical one exists\n \"\"\"\n lookup_dict = {}\n value, string = None, None\n # Generate a dictionary of values and strings\n for i, word in enumerate(line.split()):\n if i == 0:\n title = word\n elif not i < 2:\n if value is not None and string is not None:\n lookup_dict[value] = string\n value, string = None, None\n if value is None:\n value = int(word)\n else:\n if i == len(line.split()) - 1:\n lookup_dict[value] = word\n else:\n string = word\n\n # Check if a StringLookupTable already exists in the model, with the same name and values\n for existing in model.parts_list_of_type('obj_StringTableLookup'):\n table = rqp.StringLookup(parent_model = model, root_node = model.root_for_part(existing))\n if table.title == title:\n if table.str_dict == lookup_dict:\n return table.uuid # If the exact table exists, reuse it by returning the uuid\n\n # If no matching StringLookupTable exists, make a new one and return the uuid\n lookup = rqp.StringLookup(parent_model = model, int_to_str_dict = lookup_dict, title = title)\n lookup.create_xml(add_as_part = True)\n return lookup.uuid\n\n\ndef add_wells_from_ascii_file(model,\n crs_uuid,\n trajectory_file,\n comment_character = '#',\n space_separated_instead_of_csv = False,\n well_col = 'WELL',\n md_col = 'MD',\n x_col = 'X',\n y_col = 'Y',\n z_col = 'Z',\n length_uom = 'm',\n md_domain = None,\n drilled = False):\n \"\"\"Creates new md datum, trajectory, interpretation and feature objects for each well in an ascii file.\n\n arguments:\n crs_uuid (uuid.UUID): the unique identifier of the coordinate reference system applicable to the x,y,z data;\n if None, a default crs will be created, making use of the length_uom and z_inc_down arguments\n trajectory_file (string): the path of the ascii file holding the well trajectory data to be loaded\n comment_character (string, default '#'): character deemed to introduce a comment in the trajectory file\n space_separated_instead_of_csv (boolean, default False): if True, the columns in the trajectory file are space\n separated; if False, comma separated\n well_col (string, default 'WELL'): the heading for the column containing well names\n md_col (string, default 'MD'): the heading for the column containing measured depths\n x_col (string, default 'X'): the heading for the column containing X (usually easting) data\n y_col (string, default 'Y'): the heading for the column containing Y (usually northing) data\n z_col (string, default 'Z'): the heading for the column containing Z (depth or elevation) data\n length_uom (string, default 'm'): the units of measure for the measured depths; should be 'm' or 'ft'\n md_domain (string, optional): the source of the original deviation data; may be 'logger' or 'driller'\n drilled (boolean, default False): True should be used for wells that have been drilled; False otherwise (planned,\n proposed, or a location being studied)\n z_inc_down (boolean, default True): indicates whether z values increase with depth; only used in the creation\n of a default coordinate reference system; ignored if crs_uuid is not None\n\n returns:\n tuple of lists of objects: (feature_list, interpretation_list, trajectory_list, md_datum_list),\n\n notes:\n ascii file must be table with first line being column headers, with columns for WELL, MD, X, Y & Z;\n actual column names can be set with optional arguments;\n all the objects are added to the model, with array data being written to the hdf5 file for the trajectories;\n the md_domain and drilled values are stored in the RESQML metadata but are only for human information and do not\n generally affect computations\n \"\"\"\n\n assert md_col and x_col and y_col and z_col\n md_col = str(md_col)\n x_col = str(x_col)\n y_col = str(y_col)\n z_col = str(z_col)\n if crs_uuid is None:\n crs_uuid = model.crs_uuid\n assert crs_uuid is not None, 'coordinate reference system not found when trying to add wells'\n\n try:\n df = pd.read_csv(trajectory_file,\n comment = comment_character,\n delim_whitespace = space_separated_instead_of_csv)\n if df is None:\n raise Exception\n except Exception:\n log.error('failed to read ascii deviation survey file: ' + str(trajectory_file))\n raise\n if well_col and well_col not in df.columns:\n log.warning('well column ' + str(well_col) + ' not found in ascii trajectory file: ' + str(trajectory_file))\n well_col = None\n if well_col is None:\n for col in df.columns:\n if str(col).upper().startswith('WELL'):\n well_col = str(col)\n break\n else:\n well_col = str(well_col)\n assert well_col\n unique_wells = set(df[well_col])\n if len(unique_wells) == 0:\n log.warning('no well data found in ascii trajectory file: ' + str(trajectory_file))\n # note: empty lists will be returned, below\n\n feature_list = []\n interpretation_list = []\n trajectory_list = []\n md_datum_list = []\n\n for well_name in unique_wells:\n\n log.debug('importing well: ' + str(well_name))\n # create single well data frame (assumes measured depths increasing)\n well_df = df[df[well_col] == well_name]\n # create a measured depth datum for the well and add as part\n first_row = well_df.iloc[0]\n if first_row[md_col] == 0.0:\n md_datum = MdDatum(model,\n crs_uuid = crs_uuid,\n location = (first_row[x_col], first_row[y_col], first_row[z_col]))\n else:\n md_datum = MdDatum(model, crs_uuid = crs_uuid,\n location = (first_row[x_col], first_row[y_col], 0.0)) # sea level datum\n md_datum.create_xml(title = str(well_name))\n md_datum_list.append(md_datum)\n\n # create a well feature and add as part\n feature = rqo.WellboreFeature(model, feature_name = well_name)\n feature.create_xml()\n feature_list.append(feature)\n\n # create interpretation and add as part\n interpretation = rqo.WellboreInterpretation(model, is_drilled = drilled, wellbore_feature = feature)\n interpretation.create_xml(title_suffix = None)\n interpretation_list.append(interpretation)\n\n # create trajectory, write arrays to hdf5 and add as part\n trajectory = Trajectory(model,\n md_datum = md_datum,\n data_frame = well_df,\n length_uom = length_uom,\n md_domain = md_domain,\n represented_interp = interpretation,\n well_name = well_name)\n trajectory.write_hdf5()\n trajectory.create_xml(title = well_name)\n trajectory_list.append(trajectory)\n\n return (feature_list, interpretation_list, trajectory_list, md_datum_list)\n\n\ndef well_name(well_object, model = None):\n \"\"\"Returns the 'best' citation title from the object or related well objects.\n\n arguments:\n well_object (object, uuid or root): Object for which a well name is required. Can be a\n Trajectory, WellboreInterpretation, WellboreFeature, BlockedWell, WellboreMarkerFrame,\n WellboreFrame, DeviationSurvey or MdDatum object\n model (model.Model, optional): required if passing a uuid or root; not recommended otherwise\n\n returns:\n string being the 'best' citation title to serve as a well name, form the object or some related objects\n\n note:\n xml and relationships must be established for this function to work\n \"\"\"\n\n def better_root(model, root_a, root_b):\n a = rqet.citation_title_for_node(root_a)\n b = rqet.citation_title_for_node(root_b)\n if a is None or len(a) == 0:\n return root_b\n if b is None or len(b) == 0:\n return root_a\n parts_like_a = model.parts(title = a)\n parts_like_b = model.parts(title = b)\n if len(parts_like_a) > 1 and len(parts_like_b) == 1:\n return root_b\n elif len(parts_like_b) > 1 and len(parts_like_a) == 1:\n return root_a\n a_digits = 0\n for c in a:\n if c.isdigit():\n a_digits += 1\n b_digits = 0\n for c in b:\n if c.isdigit():\n b_digits += 1\n if a_digits < b_digits:\n return root_b\n return root_a\n\n def best_root(model, roots_list):\n if len(roots_list) == 0:\n return None\n if len(roots_list) == 1:\n return roots_list[0]\n if len(roots_list) == 2:\n return better_root(model, roots_list[0], roots_list[1])\n return better_root(model, roots_list[0], best_root(model, roots_list[1:]))\n\n def best_root_for_object(well_object, model = None):\n\n if well_object is None:\n return None\n if model is None:\n model = well_object.model\n root_list = []\n obj_root = None\n obj_uuid = None\n obj_type = None\n traj_root = None\n\n if isinstance(well_object, str):\n obj_uuid = bu.uuid_from_string(well_object)\n assert obj_uuid is not None, 'well_name string argument could not be interpreted as uuid'\n well_object = obj_uuid\n if isinstance(well_object, bu.uuid.UUID):\n obj_uuid = well_object\n obj_root = model.root_for_uuid(obj_uuid)\n assert obj_root is not None, 'uuid not found in model when looking for well name'\n obj_type = rqet.node_type(obj_root)\n elif rqet.is_node(well_object):\n obj_root = well_object\n obj_type = rqet.node_type(obj_root)\n obj_uuid = rqet.uuid_for_part_root(obj_root)\n elif isinstance(well_object, Trajectory):\n obj_type = 'WellboreTrajectoryRepresentation'\n traj_root = well_object.root\n elif isinstance(well_object, rqo.WellboreFeature):\n obj_type = 'WellboreFeature'\n elif isinstance(well_object, rqo.WellboreInterpretation):\n obj_type = 'WellboreInterpretation'\n elif isinstance(well_object, BlockedWell):\n obj_type = 'BlockedWellboreRepresentation'\n if well_object.trajectory is not None:\n traj_root = well_object.trajectory.root\n elif isinstance(well_object, WellboreMarkerFrame): # note: trajectory might be None\n obj_type = 'WellboreMarkerFrameRepresentation'\n if well_object.trajectory is not None:\n traj_root = well_object.trajectory.root\n elif isinstance(well_object, WellboreFrame): # note: trajectory might be None\n obj_type = 'WellboreFrameRepresentation'\n if well_object.trajectory is not None:\n traj_root = well_object.trajectory.root\n elif isinstance(well_object, DeviationSurvey):\n obj_type = 'DeviationSurveyRepresentation'\n elif isinstance(well_object, MdDatum):\n obj_type = 'MdDatum'\n\n assert obj_type is not None, 'argument type not recognized for well_name'\n if obj_type.startswith('obj_'):\n obj_type = obj_type[4:]\n if obj_uuid is None:\n obj_uuid = well_object.uuid\n obj_root = model.root_for_uuid(obj_uuid)\n\n if obj_type == 'WellboreFeature':\n interp_parts = model.parts(obj_type = 'WellboreInterpretation')\n interp_parts = model.parts_list_filtered_by_related_uuid(interp_parts, obj_uuid)\n all_parts = interp_parts\n all_traj_parts = model.parts(obj_type = 'WellboreTrajectoryRepresentation')\n if interp_parts is not None:\n for part in interp_parts:\n traj_parts = model.parts_list_filtered_by_related_uuid(all_traj_parts, model.uuid_for_part(part))\n all_parts += traj_parts\n if all_parts is not None:\n root_list = [model.root_for_part(part) for part in all_parts]\n elif obj_type == 'WellboreInterpretation':\n feat_roots = model.roots(obj_type = 'WellboreFeature', related_uuid = obj_uuid) # should return one root\n traj_roots = model.roots(obj_type = 'WellboreTrajectoryRepresentation', related_uuid = obj_uuid)\n root_list = feat_roots + traj_roots\n elif obj_type == 'WellboreTrajectoryRepresentation':\n interp_parts = model.parts(obj_type = 'WellboreInterpretation')\n interp_parts = model.parts_list_filtered_by_related_uuid(interp_parts, obj_uuid)\n all_parts = interp_parts\n all_feat_parts = model.parts(obj_type = 'WellboreFeature')\n if interp_parts is not None:\n for part in interp_parts:\n feat_parts = model.parts_list_filtered_by_related_uuid(all_feat_parts, model.uuid_for_part(part))\n all_parts += feat_parts\n if all_parts is not None:\n root_list = [model.root_for_part(part) for part in all_parts]\n elif obj_type in [\n 'BlockedWellboreRepresentation', 'WellboreMarkerFrameRepresentation', 'WellboreFrameRepresentation'\n ]:\n if traj_root is None:\n traj_root = model.root(obj_type = 'WellboreTrajectoryRepresentation', related_uuid = obj_uuid)\n root_list = [best_root_for_object(traj_root, model = model)]\n elif obj_type == 'DeviationSurveyRepresentation':\n root_list = [best_root_for_object(model.root(obj_type = 'MdDatum', related_uuid = obj_uuid), model = model)]\n elif obj_type == 'MdDatum':\n pass\n\n root_list.append(obj_root)\n\n return best_root(model, root_list)\n\n return rqet.citation_title_for_node(best_root_for_object(well_object, model = model))\n\n\ndef add_blocked_wells_from_wellspec(model, grid, wellspec_file):\n \"\"\"Add a blocked well for each well in a Nexus WELLSPEC file.\n\n arguments:\n model (model.Model object): model to which blocked wells are added\n grid (grid.Grid object): grid against which wellspec data will be interpreted\n wellspec_file (string): path of ascii file holding Nexus WELLSPEC keyword and data\n\n returns:\n int: count of number of blocked wells created\n\n notes:\n this function appends to the hdf5 file and creates xml for the blocked wells (but does not store epc);\n 'simulation' trajectory and measured depth datum objects will also be created\n \"\"\"\n\n well_list_dict = wsk.load_wellspecs(wellspec_file, column_list = None)\n\n count = 0\n for well in well_list_dict:\n log.info('processing well: ' + str(well))\n bw = BlockedWell(model,\n grid = grid,\n wellspec_file = wellspec_file,\n well_name = well,\n check_grid_name = True,\n use_face_centres = True)\n if not bw.node_count: # failed to load from wellspec, eg. because of no perforations in grid\n log.warning('no wellspec data loaded for well: ' + str(well))\n continue\n bw.write_hdf5(model.h5_file_name(), mode = 'a', create_for_trajectory_if_needed = True)\n bw.create_xml(model.h5_uuid(), title = well)\n count += 1\n\n log.info(f'{count} blocked wells created based on wellspec file: {wellspec_file}')\n\n\ndef extract_xyz(xyz_node):\n \"\"\"Extracts an x,y,z coordinate from a solitary point xml node.\n\n argument:\n xyz_node: the xml node representing the solitary point (in 3D space)\n\n returns:\n triple float: (x, y, z) coordinates as a tuple\n \"\"\"\n\n if xyz_node is None:\n return None\n xyz = np.zeros(3)\n for axis in range(3):\n xyz[axis] = rqet.find_tag_float(xyz_node, 'Coordinate' + str(axis + 1), must_exist = True)\n return tuple(xyz)\n\n\ndef well_names_in_cellio_file(cellio_file):\n \"\"\"Returns a list of well names as found in the RMS blocked well export cell I/O file.\"\"\"\n\n well_list = []\n with open(cellio_file, 'r') as fp:\n while True:\n kf.skip_blank_lines_and_comments(fp)\n line = fp.readline() # file format version number?\n if line == '':\n break # end of file\n fp.readline() # 'Undefined'\n words = fp.readline().split()\n assert len(words), 'missing header info (well name) in cell I/O file'\n well_list.append(words[0])\n while not kf.blank_line(fp):\n fp.readline() # skip to block of data for next well\n return well_list\n\n\n# 'private' functions\n\n\ndef load_hdf5_array(object, node, array_attribute, tag = 'Values', dtype = 'float', model = None):\n \"\"\"Loads the property array data as an attribute of object, from the hdf5 referenced in xml node.\n\n :meta private:\n \"\"\"\n\n assert (rqet.node_type(node) in ['DoubleHdf5Array', 'IntegerHdf5Array', 'Point3dHdf5Array'])\n if model is None:\n model = object.model\n h5_key_pair = model.h5_uuid_and_path_for_node(node, tag = tag)\n if h5_key_pair is None:\n return None\n return model.h5_array_element(h5_key_pair,\n index = None,\n cache_array = True,\n dtype = dtype,\n object = object,\n array_attribute = array_attribute)\n\n\ndef find_entry_and_exit(cp, entry_vector, exit_vector, well_name):\n \"\"\"Returns (entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz).\n\n :meta private:\n \"\"\"\n\n cell_centre = np.mean(cp, axis = (0, 1, 2))\n face_triangles = gf.triangles_for_cell_faces(cp).reshape(-1, 3, 3) # flattened first index 4 values per face\n entry_points = intersect.line_triangles_intersects(cell_centre, entry_vector, face_triangles, line_segment = True)\n entry_axis = entry_polarity = entry_xyz = exit_xyz = None\n for t in range(24):\n if not np.any(np.isnan(entry_points[t])):\n entry_xyz = entry_points[t]\n entry_axis = t // 8\n entry_polarity = (t - 8 * entry_axis) // 4\n break\n assert entry_axis is not None, 'failed to find entry face for a perforation in well ' + str(well_name)\n exit_points = intersect.line_triangles_intersects(cell_centre, exit_vector, face_triangles, line_segment = True)\n exit_axis = exit_polarity = None\n for t in range(24):\n if not np.any(np.isnan(exit_points[t])):\n exit_xyz = entry_points[t]\n exit_axis = t // 8\n exit_polarity = (t - 8 * exit_axis) // 4\n break\n assert exit_axis is not None, 'failed to find exit face for a perforation in well ' + str(well_name)\n\n return (entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz)\n\n\ndef _as_optional_array(arr):\n \"\"\"If not None, cast as numpy array.\n\n Casting directly to an array can be problematic: np.array(None) creates an unsized array, which is potentially\n confusing.\n \"\"\"\n if arr is None:\n return None\n else:\n return np.array(arr)\n\n\ndef _pl(i, e = False):\n return '' if i == 1 else 'es' if e else 's'\n",
"\"\"\"rq_import.py: Module to import a nexus corp grid & properties, or vdb, or vdb ensemble into resqml format.\"\"\"\n\nversion = '22nd October 2021'\n\n# Nexus is a registered trademark of the Halliburton Company\n# RMS and GOCAD are trademarks of Emerson\n\nimport logging\n\nlog = logging.getLogger(__name__)\nlog.debug('rq_import.py version ' + version)\n\nimport glob\nimport os\n\nimport numpy as np\nimport numpy.ma as ma\n\nimport resqpy.crs as rqc\nimport resqpy.grid as grr\nimport resqpy.model as rq\nimport resqpy.olio.ab_toolbox as abt\nimport resqpy.olio.load_data as ld\nimport resqpy.olio.trademark as tm\nimport resqpy.olio.uuid as bu\nimport resqpy.olio.vdb as vdb\nimport resqpy.olio.vector_utilities as vec\n# import resqpy.olio.grid_functions as gf\nimport resqpy.olio.write_data as wd\nimport resqpy.olio.xml_et as rqet\nimport resqpy.organize as rqo\nimport resqpy.property as rp\nimport resqpy.surface as rqs\nimport resqpy.time_series as rts\nimport resqpy.weights_and_measures as bwam\n\n\ndef import_nexus(\n resqml_file_root, # output path and file name without .epc or .h5 extension\n extent_ijk = None, # 3 element numpy vector\n vdb_file = None, # vdb input file: either this or corp_file should be not None\n vdb_case = None, # if None, first case in vdb is used (usually a vdb only holds one case)\n corp_file = None, # corp ascii input file: nexus corp data without keyword\n corp_bin_file = None, # corp binary file: nexus corp data in bespoke binary format\n corp_xy_units = 'm',\n corp_z_units = 'm',\n corp_z_inc_down = True,\n ijk_handedness = 'right',\n corp_eight_mode = False,\n geometry_defined_everywhere = True,\n treat_as_nan = None,\n active_mask_file = None,\n use_binary = False, # this refers to pure binary arrays, not corp bin format\n resqml_xy_units = 'm',\n resqml_z_units = 'm',\n resqml_z_inc_down = True,\n shift_to_local = False,\n local_origin_place = 'centre', # 'centre' or 'minimum'\n max_z_void = 0.1, # vertical gaps greater than this will introduce k gaps intp resqml grid\n split_pillars = True,\n split_tolerance = 0.01, # applies to each of x, y, z differences\n property_array_files = None, # actually, list of (filename, keyword, uom, time_index, null_value, discrete)\n summary_file = None, # used to extract timestep dates when loading recurrent data from vdb\n vdb_static_properties = True, # if True, static vdb properties are imported (only relevant if vdb_file is not None)\n vdb_recurrent_properties = False,\n timestep_selection = 'all', # 'first', 'last', 'first and last', 'all', or list of ints being reporting timestep numbers\n use_compressed_time_series = True,\n decoarsen = True, # where ICOARSE is present, redistribute data to uncoarse cells\n ab_property_list = None, # list of (file_name, keyword, property_kind, facet_type, facet, uom, time_index, null_value, discrete)\n create_property_set = False,\n ensemble_case_dirs_root = None, # path upto but excluding realisation number\n ensemble_property_dictionary = None, # dictionary mapping title (or keyword) to (filename, property_kind, facet_type, facet,\n # uom, time_index, null_value, discrete)\n ensemble_size_limit = None,\n grid_title = 'ROOT',\n mode = 'w',\n progress_fn = None):\n \"\"\"Read a simulation grid geometry and optionally grid properties and return a resqml model in memory & written to\n disc.\n\n Input may be from nexus ascii input files, or nexus vdb output.\n \"\"\"\n\n if resqml_file_root.endswith('.epc'):\n resqml_file_root = resqml_file_root[:-4]\n assert mode in ['w', 'a']\n\n if vdb_file:\n using_vdb = True\n corp_file = corp_bin_file = None\n grid_title = grid_title.upper()\n log.info('starting import of Nexus ' + str(grid_title) + ' corp from vdb ' + str(vdb_file))\n tm.log_nexus_tm('info')\n vdbase = vdb.VDB(vdb_file)\n case_list = vdbase.cases()\n assert len(case_list) > 0, 'no cases found in vdb'\n if vdb_case is None:\n vdb_case = case_list[0]\n else:\n assert vdb_case in case_list, 'case ' + vdb_case + ' not found in vdb: ' + vdb_file\n vdbase.set_use_case(vdb_case)\n assert grid_title in vdbase.list_of_grids(), 'grid ' + str(grid_title) + ' not found in vdb'\n if extent_ijk is not None:\n vdbase.set_extent_kji(tuple(reversed(extent_ijk)))\n log.debug('using case ' + vdb_case + ' and grid ' + grid_title + ' from vdb')\n if vdb_recurrent_properties and not summary_file:\n if vdb_file.endswith('.vdb.zip'):\n summary_file = vdb_file[:-8] + '.sum'\n elif vdb_file.endswith('.vdb') or vdb_file.endswith('.zip'):\n summary_file = vdb_file[:-4] + '.sum'\n else:\n sep = vdb_file.rfind(os.sep)\n dot = vdb_file[sep + 1:].find('.')\n if dot > 0:\n summary_file = vdb_file[:sep + 1 + dot] + ',sum'\n else:\n summary_file = vdb_file + '.sum'\n cp_array = vdbase.grid_corp(grid_title)\n cp_extent_kji = cp_array.shape[:3]\n if cp_extent_kji[:2] == (1, 1): # auto determination of extent failed\n assert extent_ijk is not None, 'failed to determine extent of grid from corp data'\n (ni, nj, nk) = extent_ijk\n assert cp_extent_kji[2] == ni * nj * nk, 'number of cells in grid corp does not match extent'\n cp_extent = (nk, nj, ni, 2, 2, 2, 3) # (nk, nj, ni, kp, jp, ip, xyz)\n cp_array = cp_array.reshape(cp_extent)\n elif extent_ijk is not None:\n for axis in range(3):\n assert cp_extent_kji[axis] == extent_ijk[\n 2 - axis], 'extent of grid corp data from vdb does not match that supplied'\n\n elif corp_file or corp_bin_file:\n if corp_bin_file:\n corp_file = None\n using_vdb = False\n # geometry_defined_everywhere = (active_mask_file is None)\n log.info('starting import of Nexus corp file ' + str(corp_file if corp_file else corp_bin_file))\n tm.log_nexus_tm('info')\n if extent_ijk is None: # auto detect extent\n extent_kji = None\n cp_extent = None\n else:\n (ni, nj, nk) = extent_ijk\n extent_kji = np.array((nk, nj, ni), dtype = 'int')\n cp_extent = (nk, nj, ni, 2, 2, 2, 3) # (nk, nj, ni, kp, jp, ip, xyz)\n log.debug('reading and resequencing corp data')\n if corp_bin_file: # bespoke nexus corp bin format, not to be confused with pure binary files used below\n cp_array = ld.load_corp_array_from_file(\n corp_bin_file,\n extent_kji,\n corp_bin = True,\n comment_char = None, # comment char will be detected automatically\n data_free_of_comments = False,\n use_binary = use_binary)\n else:\n cp_binary_file = abt.cp_binary_filename(\n corp_file, nexus_ordering = False) # pure binary, not bespoke corp bin used above\n recent_binary_exists = ld.file_exists(cp_binary_file, must_be_more_recent_than_file = corp_file)\n cp_array = None\n if use_binary and (extent_ijk is not None) and recent_binary_exists:\n try:\n cp_array = ld.load_array_from_file(cp_binary_file, cp_extent, use_binary = True)\n except Exception:\n cp_array = None\n if cp_array is None:\n cp_array = ld.load_corp_array_from_file(\n corp_file,\n extent_kji,\n corp_bin = False,\n comment_char = None, # comment char will be detected automatically\n data_free_of_comments = False,\n use_binary = use_binary)\n if use_binary:\n wd.write_pure_binary_data(cp_binary_file,\n cp_array) # NB: this binary file is resequenced, not in nexus ordering!\n\n else:\n raise ValueError('vdb_file and corp_file are both None in import_nexus() call')\n\n if cp_array is None:\n log.error('failed to create corner point array')\n return None\n\n if extent_ijk is None:\n cp_extent = cp_array.shape\n extent_kji = cp_extent[:3]\n (nk, nj, ni) = extent_kji\n extent_ijk = (ni, nj, nk)\n else:\n ni, nj, nk = extent_ijk\n\n # convert units\n log.debug('Converting units')\n if corp_xy_units == corp_z_units and resqml_xy_units == resqml_z_units:\n bwam.convert_lengths(cp_array, corp_xy_units, resqml_xy_units)\n else:\n bwam.convert_lengths(cp_array[:, :, :, :, :, :, 0:1], corp_xy_units, resqml_xy_units)\n bwam.convert_lengths(cp_array[:, :, :, :, :, :, 2], corp_z_units, resqml_z_units)\n\n # invert z if required\n if resqml_z_inc_down != corp_z_inc_down:\n log.debug('Inverting z values')\n inversion = np.negative(cp_array[:, :, :, :, :, :, 2])\n cp_array[:, :, :, :, :, :, 2] = inversion\n\n # read active cell mask\n log.debug('Setting up active cell mask')\n active_mask = inactive_mask = None\n if vdb_file:\n assert vdbase is not None, 'problem with vdb object'\n inactive_mask = vdbase.grid_kid_inactive_mask(grid_title) # TODO: check conversion of KID to boolean for LGRs\n if inactive_mask is not None:\n log.debug('using kid array as inactive cell mask')\n active_mask = np.logical_not(inactive_mask)\n else:\n log.warning('kid array not found, using unpack array as active cell indicator')\n unp = vdbase.grid_unpack(grid_title)\n assert unp is not None, 'failed to load active cell indicator mask from vdb kid or unpack arrays'\n active_mask = np.empty((nk, nj, ni), dtype = 'bool')\n active_mask[:] = (unp > 0)\n inactive_mask = np.logical_not(active_mask)\n elif active_mask_file:\n active_mask = ld.load_array_from_file(active_mask_file, extent_kji, data_type = 'bool', use_binary = use_binary)\n if active_mask is None:\n log.error('failed to load active cell indicator array from file: ' + active_mask_file)\n else:\n inactive_mask = np.logical_not(active_mask) # will crash if active mask load failed\n\n # shift grid geometry to local crs\n local_origin = np.zeros(3)\n if shift_to_local:\n log.debug('shifting to local origin at ' + local_origin_place)\n if local_origin_place == 'centre':\n local_origin = np.nanmean(cp_array, axis = (0, 1, 2, 3, 4, 5))\n elif local_origin_place == 'minimum':\n local_origin = np.nanmin(cp_array, axis = (0, 1, 2, 3, 4, 5)) - 1.0 # The -1 ensures all coords are >0\n else:\n assert (False)\n cp_array -= local_origin\n\n # create empty resqml model\n log.debug('creating an empty resqml model')\n if mode == 'w':\n model = rq.Model(resqml_file_root, new_epc = True, create_basics = True, create_hdf5_ext = True)\n else:\n model = rq.Model(resqml_file_root)\n assert model is not None\n ext_uuid = model.h5_uuid()\n assert ext_uuid is not None\n\n # create coodinate reference system (crs) in model and set references in grid object\n log.debug('creating coordinate reference system')\n crs_uuids = model.uuids(obj_type = 'LocalDepth3dCrs')\n new_crs = rqc.Crs(model,\n x_offset = local_origin[0],\n y_offset = local_origin[1],\n z_offset = local_origin[2],\n xy_units = resqml_xy_units,\n z_units = resqml_z_units,\n z_inc_down = resqml_z_inc_down)\n new_crs.create_xml(reuse = True)\n crs_uuid = new_crs.uuid\n\n grid = grid_from_cp(model,\n cp_array,\n crs_uuid,\n active_mask = active_mask,\n geometry_defined_everywhere = geometry_defined_everywhere,\n treat_as_nan = treat_as_nan,\n max_z_void = max_z_void,\n split_pillars = split_pillars,\n split_tolerance = split_tolerance,\n ijk_handedness = ijk_handedness,\n known_to_be_straight = False)\n\n # create hdf5 file using arrays cached in grid above\n log.info('writing grid geometry to hdf5 file ' + resqml_file_root + '.h5')\n grid.write_hdf5_from_caches(resqml_file_root + '.h5', mode = mode, write_active = False)\n\n # build xml for grid geometry\n log.debug('building xml for grid')\n ijk_node = grid.create_xml(ext_uuid = None, title = grid_title, add_as_part = True, add_relationships = True)\n assert ijk_node is not None, 'failed to create IjkGrid node in xml tree'\n\n # impprt property arrays into a collection\n prop_import_collection = None\n decoarsen_array = None\n ts_node = None\n ts_uuid = None\n\n if active_mask is None and grid.inactive is not None:\n active_mask = np.logical_not(grid.inactive)\n\n if using_vdb:\n prop_import_collection = rp.GridPropertyCollection()\n if vdb_static_properties:\n props = vdbase.grid_list_of_static_properties(grid_title)\n if len(props) > 0:\n prop_import_collection = rp.GridPropertyCollection()\n prop_import_collection.set_grid(grid)\n for keyword in props:\n prop_import_collection.import_vdb_static_property_to_cache(vdbase, keyword, grid_name = grid_title)\n# if active_mask is not None:\n# prop_import_collection.add_cached_array_to_imported_list(active_mask, active_mask_file, 'ACTIVE', property_kind = 'active',\n# discrete = True, uom = None, time_index = None, null_value = None)\n\n elif property_array_files is not None and len(property_array_files) > 0:\n prop_import_collection = rp.GridPropertyCollection()\n prop_import_collection.set_grid(grid)\n for (p_filename, p_keyword, p_uom, p_time_index, p_null_value, p_discrete) in property_array_files:\n prop_import_collection.import_nexus_property_to_cache(p_filename,\n p_keyword,\n grid.extent_kji,\n discrete = p_discrete,\n uom = p_uom,\n time_index = p_time_index,\n null_value = p_null_value,\n use_binary = use_binary)\n# if active_mask is not None:\n# prop_import_collection.add_cached_array_to_imported_list(active_mask, active_mask_file, 'ACTIVE', property_kind = 'active',\n# discrete = True, uom = None, time_index = None, null_value = None)\n\n# ab_property_list: list of (filename, keyword, property_kind, facet_type, facet, uom, time_index, null_value, discrete)\n elif ab_property_list is not None and len(ab_property_list) > 0:\n prop_import_collection = rp.GridPropertyCollection()\n prop_import_collection.set_grid(grid)\n for (p_filename, p_keyword, p_property_kind, p_facet_type, p_facet, p_uom, p_time_index, p_null_value,\n p_discrete) in ab_property_list:\n prop_import_collection.import_ab_property_to_cache(p_filename,\n p_keyword,\n grid.extent_kji,\n discrete = p_discrete,\n property_kind = p_property_kind,\n facet_type = p_facet_type,\n facet = p_facet,\n uom = p_uom,\n time_index = p_time_index,\n null_value = p_null_value)\n# if active_mask is not None:\n# prop_import_collection.add_cached_array_to_imported_list(active_mask, active_mask_file, 'ACTIVE', property_kind = 'active',\n# discrete = True, uom = None, time_index = None, null_value = None)\n\n# ensemble_property_dictionary: mapping title (or keyword) to\n# (filename, property_kind, facet_type, facet, uom, time_index, null_value, discrete)\n elif ensemble_case_dirs_root and ensemble_property_dictionary:\n case_path_list = glob.glob(ensemble_case_dirs_root + '*')\n assert len(case_path_list) > 0, 'no case directories found with path starting: ' + str(ensemble_case_dirs_root)\n case_number_place = len(ensemble_case_dirs_root)\n case_zero_used = False\n case_count = 0\n for case_path in case_path_list:\n if ensemble_size_limit is not None and case_count >= ensemble_size_limit:\n log.warning('stopping after reaching ensemble size limit')\n break\n # NB. import each case individually rather than holding property arrays for whole ensemble in memory at once\n prop_import_collection = rp.GridPropertyCollection()\n prop_import_collection.set_grid(grid)\n tail = case_path[case_number_place:]\n try:\n case_number = int(tail)\n assert case_number >= 0, 'negative case number encountered'\n if case_number == 0:\n assert not case_zero_used, 'more than one case number evaluated to zero'\n case_zero_used = True\n except Exception:\n log.error('failed to determine case number for tail: ' + str(tail))\n continue\n for keyword in ensemble_property_dictionary.keys():\n (filename, p_property_kind, p_facet_type, p_facet, p_uom, p_time_index, p_null_value,\n p_discrete) = ensemble_property_dictionary[keyword]\n p_filename = os.path.join(case_path, filename)\n if not os.path.exists(p_filename):\n log.error('missing property file: ' + p_filename)\n continue\n prop_import_collection.import_nexus_property_to_cache(p_filename,\n keyword,\n grid.extent_kji,\n discrete = p_discrete,\n uom = p_uom,\n time_index = p_time_index,\n null_value = p_null_value,\n property_kind = p_property_kind,\n facet_type = p_facet_type,\n facet = p_facet,\n realization = case_number,\n use_binary = False)\n if len(prop_import_collection.imported_list) > 0:\n # create hdf5 file using arrays cached in grid above\n log.info('writing properties to hdf5 file ' + str(resqml_file_root) + '.h5 for case: ' +\n str(case_number))\n grid.write_hdf5_from_caches(resqml_file_root + '.h5',\n geometry = False,\n imported_properties = prop_import_collection,\n write_active = False)\n # add imported properties parts to model, building property parts list\n prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid,\n time_series_uuid = ts_uuid)\n if create_property_set:\n prop_import_collection.create_property_set_xml('realisation ' + str(case_number))\n case_count += 1\n # remove cached static property arrays from memory\n\n\n# prop_import_collection.remove_all_cached_arrays()\n del prop_import_collection\n prop_import_collection = None\n log.info(f'Nexus ascii ensemble input processed {case_count} cases')\n tm.log_nexus_tm('info')\n\n # create hdf5 file using arrays cached in grid above\n if prop_import_collection is not None and len(prop_import_collection.imported_list) > 0:\n if decoarsen:\n decoarsen_array = prop_import_collection.decoarsen_imported_list()\n if decoarsen_array is not None:\n log.info('static properties decoarsened')\n prop_import_collection.add_cached_array_to_imported_list(decoarsen_array,\n 'decoarsen',\n 'DECOARSEN',\n discrete = True,\n uom = None,\n time_index = None,\n null_value = -1,\n property_kind = 'discrete')\n log.info('writing ' + str(len(prop_import_collection.imported_list)) + ' properties to hdf5 file ' +\n resqml_file_root + '.h5')\n elif not ensemble_case_dirs_root:\n log.info('no static grid properties to import')\n prop_import_collection = None\n grid.write_hdf5_from_caches(resqml_file_root + '.h5',\n geometry = False,\n imported_properties = prop_import_collection,\n write_active = True)\n # remove cached static property arrays from memory\n if prop_import_collection is not None:\n prop_import_collection.remove_all_cached_arrays()\n\n ts_selection = None\n if using_vdb and vdb_recurrent_properties and timestep_selection is not None and str(timestep_selection) != 'none':\n if prop_import_collection is None:\n prop_import_collection = rp.GridPropertyCollection()\n prop_import_collection.set_grid(grid)\n # extract timestep dates from summary file (this info might be hidden in the recurrent binary files but I couldn't find it\n # todo: create cut down time series from recurrent files and differentiate between reporting time index and mapped time step number\n full_time_series = rts.time_series_from_nexus_summary(summary_file)\n if full_time_series is None:\n log.error('failed to fetch time series from Nexus summary file; recurrent data excluded')\n tm.log_nexus_tm('error')\n else:\n full_time_series.set_model(model)\n timestep_list = vdbase.grid_list_of_timesteps(\n grid_title) # get list of timesteps for which recurrent files exist\n recur_time_series = None\n for timestep_number in timestep_list:\n if isinstance(timestep_selection, list):\n if timestep_number not in timestep_selection:\n continue\n else:\n if timestep_selection == 'first':\n if timestep_number != timestep_list[0]:\n break\n elif timestep_selection == 'last':\n if timestep_number != timestep_list[-1]:\n continue\n elif timestep_selection == 'first and last':\n if timestep_number != timestep_list[0] and timestep_number != timestep_list[-1]:\n continue\n # default to importing all timesteps\n stamp = full_time_series.timestamp(timestep_number)\n if stamp is None:\n log.error('timestamp number for which recurrent data exists was not found in summary file: ' +\n str(timestep_number))\n continue\n recur_prop_list = vdbase.grid_list_of_recurrent_properties(grid_title, timestep_number)\n common_recur_prop_set = set()\n if recur_time_series is None:\n recur_time_series = rts.TimeSeries(model, first_timestamp = stamp)\n if recur_prop_list is not None:\n common_recur_prop_set = set(recur_prop_list)\n else:\n recur_time_series.add_timestamp(stamp)\n if recur_prop_list is not None:\n common_recur_prop_set = common_recur_prop_set.intersection(set(recur_prop_list))\n step_import_collection = rp.GridPropertyCollection()\n step_import_collection.set_grid(grid)\n # for each property for this timestep, cache array and add to recur prop import collection for this time step\n if recur_prop_list:\n for keyword in recur_prop_list:\n if not keyword or not keyword.isalnum():\n continue\n step_import_collection.import_vdb_recurrent_property_to_cache(vdbase,\n timestep_number,\n keyword,\n grid_name = grid_title)\n # extend hdf5 with cached arrays for this timestep\n log.info('number of recurrent grid property arrays for timestep: ' + str(timestep_number) + ' is: ' +\n str(step_import_collection.number_of_imports()))\n if decoarsen_array is not None:\n log.info('decoarsening recurrent properties for timestep: ' + str(timestep_number))\n step_import_collection.decoarsen_imported_list(decoarsen_array = decoarsen_array)\n log.info('extending hdf5 file with recurrent properties for timestep: ' + str(timestep_number))\n grid.write_hdf5_from_caches(resqml_file_root + '.h5',\n mode = 'a',\n geometry = False,\n imported_properties = step_import_collection,\n write_active = False)\n # add imported list for this timestep to full imported list\n prop_import_collection.inherit_imported_list_from_other_collection(step_import_collection)\n log.debug('total number of property arrays after timestep: ' + str(timestep_number) + ' is: ' +\n str(prop_import_collection.number_of_imports()))\n # remove cached copies of arrays\n step_import_collection.remove_all_cached_arrays()\n\n ts_node = full_time_series.create_xml(title = 'simulator full timestep series')\n model.time_series = ts_node # save as the primary time series for the model\n ts_uuid = rqet.uuid_for_part_root(ts_node)\n # create xml for recur_time_series (as well as for full_time_series) and add as part; not needed?\n if recur_time_series is not None:\n rts_node = recur_time_series.create_xml(title = 'simulator recurrent array timestep series')\n if use_compressed_time_series:\n ts_uuid = rqet.uuid_for_part_root(rts_node)\n ts_selection = timestep_list\n\n # add imported properties parts to model, building property parts list\n if prop_import_collection is not None and prop_import_collection.imported_list is not None:\n prop_import_collection.set_grid(grid) # update to pick up on recently created xml root node for grid\n prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(\n ext_uuid, time_series_uuid = ts_uuid, selected_time_indices_list = ts_selection)\n if create_property_set:\n prop_import_collection.create_property_set_xml('property set for import for grid ' + str(grid_title))\n\n # mark model as modified (will already have happened anyway)\n model.set_modified()\n\n # create epc file\n log.info('storing model in epc file ' + resqml_file_root + '.epc')\n model.store_epc(resqml_file_root + '.epc')\n\n # return resqml model\n return model\n\n\ndef import_vdb_all_grids(\n resqml_file_root, # output path and file name without .epc or .h5 extension\n extent_ijk = None, # 3 element numpy vector applicable to ROOT\n vdb_file = None,\n vdb_case = None, # if None, first case in vdb is used (usually a vdb only holds one case)\n corp_xy_units = 'm',\n corp_z_units = 'm',\n corp_z_inc_down = True,\n ijk_handedness = 'right',\n geometry_defined_everywhere = True,\n treat_as_nan = None,\n resqml_xy_units = 'm',\n resqml_z_units = 'm',\n resqml_z_inc_down = True,\n shift_to_local = False,\n local_origin_place = 'centre', # 'centre' or 'minimum'\n max_z_void = 0.1, # vertical gaps greater than this will introduce k gaps intp resqml grid\n split_pillars = True,\n split_tolerance = 0.01, # applies to each of x, y, z differences\n vdb_static_properties = True, # if True, static vdb properties are imported (only relevant if vdb_file is not None)\n vdb_recurrent_properties = False,\n decoarsen = True,\n timestep_selection = 'all', # 'first', 'last', 'first and last', 'all', or list of ints being reporting timestep numbers\n create_property_set = False):\n \"\"\"Creates a RESQML dataset containing grids and grid properties, including LGRs, for a single realisation.\"\"\"\n\n vdbase = vdb.VDB(vdb_file)\n case_list = vdbase.cases()\n assert len(case_list) > 0, 'no cases found in vdb'\n if vdb_case is None:\n vdb_case = case_list[0]\n else:\n assert vdb_case in case_list, 'case ' + vdb_case + ' not found in vdb: ' + vdb_file\n vdbase.set_use_case(vdb_case)\n grid_list = vdbase.list_of_grids()\n index = 0\n for grid_name in grid_list:\n if grid_name.upper().startswith('SMALLGRIDS'):\n log.warning('vdb import skipping small grids')\n continue\n log.debug('importing vdb data for grid ' + str(grid_name))\n import_nexus(\n resqml_file_root,\n extent_ijk = extent_ijk if grid_name == 'ROOT' else None, # 3 element numpy vector applicable to ROOT\n vdb_file = vdb_file,\n vdb_case = vdb_case, # if None, first case in vdb is used (usually a vdb only holds one case)\n corp_xy_units = corp_xy_units,\n corp_z_units = corp_z_units,\n corp_z_inc_down = corp_z_inc_down,\n ijk_handedness = ijk_handedness,\n geometry_defined_everywhere = geometry_defined_everywhere,\n treat_as_nan = treat_as_nan,\n resqml_xy_units = resqml_xy_units,\n resqml_z_units = resqml_z_units,\n resqml_z_inc_down = resqml_z_inc_down,\n shift_to_local = shift_to_local,\n local_origin_place = local_origin_place, # 'centre' or 'minimum'\n max_z_void = max_z_void, # vertical gaps greater than this will introduce k gaps intp resqml grid\n split_pillars = split_pillars, # NB: some LGRs may be unsplit even if ROOT is split\n split_tolerance = split_tolerance, # applies to each of x, y, z differences\n vdb_static_properties = vdb_static_properties, # if True, static vdb properties are imported\n vdb_recurrent_properties = vdb_recurrent_properties,\n decoarsen = decoarsen,\n timestep_selection = timestep_selection,\n create_property_set = create_property_set,\n grid_title = grid_name,\n mode = 'w' if index == 0 else 'a')\n index += 1\n\n\ndef import_vdb_ensemble(\n epc_file,\n ensemble_run_dir,\n existing_epc = False,\n keyword_list = None,\n property_kind_list = None,\n vdb_static_properties = True, # if True, static vdb properties are imported\n vdb_recurrent_properties = True,\n decoarsen = True,\n timestep_selection = 'all',\n create_property_set_per_realization = True,\n create_property_set_per_timestep = True,\n create_complete_property_set = False,\n # remaining arguments only used if existing_epc is False\n extent_ijk = None, # 3 element numpy vector\n corp_xy_units = 'metres',\n corp_z_units = 'metres',\n corp_z_inc_down = True,\n ijk_handedness = 'right',\n geometry_defined_everywhere = True,\n treat_as_nan = None,\n resqml_xy_units = 'metres',\n resqml_z_units = 'metres',\n resqml_z_inc_down = True,\n shift_to_local = True,\n local_origin_place = 'centre', # 'centre' or 'minimum'\n max_z_void = 0.1, # import will fail if vertical void greater than this is encountered\n split_pillars = True,\n split_tolerance = 0.01, # applies to each of x, y, z differences\n progress_fn = None):\n \"\"\"Adds properties from all vdb's within an ensemble directory tree to a single RESQML dataset, referencing a shared\n grid.\n\n args:\n epc_file (string): filename of epc file to be extended with ensemble properties\n ensemble_run_dir (string): path of main ensemble run directory; vdb's within this directory tree are source of import\n existing_epc (boolean, default False): if True, the epc_file must already exist and contain the compatible grid\n keyword_list (list of strings, optional): if present, only properties for keywords within the list are included\n property_kind_list (list of strings, optional): if present, only properties which are mapped to these resqml property\n kinds are included in the import\n vdb_static_properties (boolean, default True): if False, no static properties are included, regardless of keyword and/or\n property kind matches\n vdb_recurrent_properties (boolean, default True): if False, no recurrent properties are included, regardless of keyword\n and/or property kind matches\n decoarsen (boolean, default True): if True and ICOARSE property exists for a grid in a case, the associated property\n data is decoarsened; if False, the property data is as stored in the vdb\n timestep_selection (string, default 'all'): may be 'first', 'last', 'first and last', or 'all', controlling which\n reporting timesteps are included when loading recurrent data\n create_property_set_per_realization (boolean, default True): if True, a property set object is created for each realization\n create_property_set_per_timestep (boolean, default True): if True, a property set object is created for each timestep\n included in the recurrent data import\n create_complete_property_set (boolean, default False): if True, a property set object is created containing all the\n properties imported; only really useful to differentiate from other properties related to the grid\n extent_ijk (triple int, optional): this and remaining arguments are only used if existing_epc is False; the extent\n is only needed in case automatic determination of the extent fails\n corp_xy_units (string, default 'metres'): the units of x & y values in the vdb corp data; should be 'metres' or 'feet'\n corp_z_units (string, default 'metres'): the units of z values in the vdb corp data; should be 'metres' or 'feet'\n corp_z_inc_down (boolean, default True): set to True if corp z values are depth; False if elevation\n ijk_handedness (string, default 'right'): set to the handedness of the IJK axes in the Nexus model; 'right' or 'left'\n geometry_defined_everywhere (boolean, default True): set to False if inactive cells do not have valid geometry;\n deprecated - use treat_as_nan argument instead\n treat_as_nan (string, optional): if not None, one of 'dots', 'ij_dots', 'inactive'; controls which inactive cells\n have their geometry set to undefined\n resqml_xy_units (string, default 'metres'): the units of x & y values to use in the generated resqml grid;\n should be 'metres' or 'feet'\n resqml_z_units (string, default 'metres'): the units of z values to use in the generated resqml grid;\n should be 'metres' or 'feet'\n resqml_z_inc_down (boolean, default True): set to True if resqml z values are to be depth; False for elevations\n shift_to_local (boolean, default True): if True, the resqml coordinate reference system will use a local origin\n local_origin_place (string, default 'centre'): where to place the local origin; 'centre' or 'minimum'; only\n relevant if shift_to_local is True\n max_z_void (float, default 0.1): the tolerance of voids between layers, in z direction; voids greater than this\n will cause the grid import to fail\n split_pillars (boolean, default True): if False, a grid is generated without split pillars\n split_tolerance (float, default 0.01): the tolerance applied to each of x, y, & z values, beyond which a corner\n point (and hence pillar) will be split\n progress_fn (function(float), optional): if present, this function is called at intervals during processing; it\n must accept one floating point argument which will range from 0.0 to 1.0\n\n returns:\n resqpy.Model object containing properties for all the realisations; hdf5 and epc files having been updated\n\n note:\n if existing_epc is True, the epc file must already exist and contain one grid (or one grid named ROOT) which must\n have the correct extent for all realisations within the ensemble; if existing_epc is False, the resqml dataset is\n created afresh with a grid extracted from the first realisation in the ensemble; either way, the single grid is used\n as the representative grid in the ensemble resqml dataset being generated;\n all vdb directories within the directory tree headed by ensemble_run_dir are included in the import; by\n default all properties will be imported; the keyword_list, property_kind_list, vdb_static_properties,\n vdb_recurrent_properties and timestep_selection arguments can be used to filter the required properties;\n if both keyword_list and property_kind_list are provided, a property must match an item in both lists in order\n to be included; if recurrent properties are being included then all vdb's should contain the same number of reporting\n steps in their recurrent data and these should relate to the same set of timestamps; timestamp data is extracted from a\n summary file for the first realisation; no check is made to ensure that reporting timesteps in different realisations\n are actually for the same date.\n \"\"\"\n\n assert epc_file.endswith('.epc')\n assert vdb_static_properties or vdb_recurrent_properties, 'no properties selected for ensemble import'\n\n if progress_fn is not None:\n progress_fn(0.0)\n\n # fetch a sorted list of the vdb paths found in the run directory tree\n ensemble_list = vdb.ensemble_vdb_list(ensemble_run_dir)\n if len(ensemble_list) == 0:\n log.error(\"no vdb's found in run directory tree: \" + str(ensemble_run_dir))\n return None\n\n if not existing_epc:\n model = import_nexus(\n epc_file[:-4], # output path and file name without .epc or .h5 extension\n extent_ijk = extent_ijk, # 3 element numpy vector, in case extent is not automatically determined\n vdb_file = ensemble_list[0], # vdb input file\n corp_xy_units = corp_xy_units,\n corp_z_units = corp_z_units,\n corp_z_inc_down = corp_z_inc_down,\n ijk_handedness = ijk_handedness,\n geometry_defined_everywhere = geometry_defined_everywhere,\n treat_as_nan = treat_as_nan,\n resqml_xy_units = resqml_xy_units,\n resqml_z_units = resqml_z_units,\n resqml_z_inc_down = resqml_z_inc_down,\n shift_to_local = shift_to_local,\n local_origin_place = local_origin_place, # 'centre' or 'minimum'\n max_z_void = max_z_void, # import will fail if vertical void greater than this is encountered\n split_pillars = split_pillars,\n split_tolerance = split_tolerance, # applies to each of x, y, z differences\n vdb_static_properties = False,\n vdb_recurrent_properties = False,\n create_property_set = False)\n\n model = rq.Model(\n epc_file = epc_file) # shouldn't be necessary if just created but it feels safer to re-open the model\n assert model is not None, 'failed to instantiate model'\n grid = model.grid()\n assert grid is not None, 'grid not found'\n ext_uuid = model.h5_uuid()\n assert ext_uuid is not None, 'failed to determine uuid for hdf5 file reference'\n hdf5_file = model.h5_file_name(uuid = ext_uuid)\n\n # create reporting timestep time series for recurrent data, if required, based on the first realisation\n recur_time_series = None\n recur_ts_uuid = None\n timestep_list = None\n if vdb_recurrent_properties:\n summary_file = ensemble_list[0][:-4] + '.sum' # TODO: check timestep summary file extension, .tssum?\n full_time_series = rts.time_series_from_nexus_summary(summary_file)\n if full_time_series is None:\n log.error('failed to extract info from timestep summary file; disabling recurrent property import')\n vdb_recurrent_properties = False\n if vdb_recurrent_properties:\n vdbase = vdb.VDB(ensemble_list[0])\n timestep_list = vdbase.list_of_timesteps()\n if len(timestep_list) == 0:\n log.warning(\n 'no ROOT recurrent data found in vdb for first realisation; disabling recurrent property import')\n vdb_recurrent_properties = False\n if vdb_recurrent_properties:\n if timestep_selection == 'all' or ('first' in timestep_selection):\n fs_index = 0\n else:\n fs_index = -1\n first_stamp = full_time_series.timestamp(timestep_list[fs_index])\n if first_stamp is None:\n log.error('first timestamp number selected for import was not found in summary file: ' +\n str(timestep_list[fs_index]))\n log.error('disabling recurrent property import')\n vdb_recurrent_properties = False\n if vdb_recurrent_properties:\n recur_time_series = rts.TimeSeries(model, first_timestamp = first_stamp)\n if timestep_selection == 'all':\n remaining_list = timestep_list[1:]\n elif timestep_selection == 'first and last':\n remaining_list = [timestep_list[-1]]\n else:\n remaining_list = []\n for timestep_number in remaining_list:\n stamp = full_time_series.timestamp(timestep_number)\n if stamp is None:\n log.error('timestamp number for which recurrent data exists was not found in summary file: ' +\n str(timestep_number))\n log.error('disabling recurrent property import')\n vdb_recurrent_properties = False\n recur_time_series = None\n break\n recur_time_series.add_timestamp(stamp)\n if recur_time_series is not None:\n recur_ts_node = recur_time_series.create_xml(title = 'simulator recurrent array timestep series')\n recur_ts_uuid = rqet.uuid_for_part_root(recur_ts_node)\n model.time_series = recur_ts_node # save as the primary time series for the model\n\n if create_complete_property_set or create_property_set_per_timestep:\n complete_collection = rp.GridPropertyCollection()\n complete_collection.set_grid(grid)\n else:\n complete_collection = None\n\n # main loop over realisations\n\n for realisation in range(len(ensemble_list)):\n\n if progress_fn is not None:\n progress_fn(float(1 + realisation) / float(1 + len(ensemble_list)))\n\n vdb_file = ensemble_list[realisation]\n log.info('processing realisation ' + str(realisation) + ' from: ' + str(vdb_file))\n vdbase = vdb.VDB(vdb_file)\n # case_list = vdbase.cases()\n # assert len(case_list) > 0, 'no cases found in vdb: ' + str(vdb_file)\n # if len(case_list) > 1: log.warning('more than one case found in vdb (using first): ' + str(vdb_file))\n # vdb_case = case_list[0]\n # vdbase.set_use_case(vdb_case)\n vdbase.set_extent_kji(grid.extent_kji)\n\n prop_import_collection = rp.GridPropertyCollection(realization = realisation)\n prop_import_collection.set_grid(grid)\n\n decoarsen_array = None\n if vdb_static_properties:\n props = vdbase.list_of_static_properties()\n if len(props) > 0:\n for keyword in props:\n if keyword_list is not None and keyword not in keyword_list:\n continue\n if property_kind_list is not None:\n prop_kind, _, _ = rp.property_kind_and_facet_from_keyword(keyword)\n if prop_kind not in property_kind_list and prop_kind not in ['active', 'region initialization']:\n continue\n prop_import_collection.import_vdb_static_property_to_cache(vdbase,\n keyword,\n realization = realisation)\n if decoarsen:\n decoarsen_array = prop_import_collection.decoarsen_imported_list()\n if decoarsen_array is not None:\n log.debug('static properties decoarsened for realisation ' + str(realisation))\n grid.write_hdf5_from_caches(hdf5_file,\n mode = 'a',\n geometry = False,\n imported_properties = prop_import_collection,\n write_active = False)\n prop_import_collection.remove_all_cached_arrays()\n\n if vdb_recurrent_properties:\n\n r_timestep_list = vdbase.list_of_timesteps() # get list of timesteps for which recurrent files exist\n if len(r_timestep_list) < recur_time_series.number_of_timestamps():\n log.error('insufficient number of reporting timesteps; skipping recurrent data for realisation ' +\n str(realisation))\n else:\n common_recur_prop_set = None\n for tni in range(recur_time_series.number_of_timestamps()):\n if timestep_selection in ['all', 'first']:\n timestep_number = timestep_list[tni]\n r_timestep_number = r_timestep_list[tni]\n elif timestep_selection == 'last' or tni > 0:\n timestep_number = timestep_list[-1]\n r_timestep_number = r_timestep_list[-1]\n else:\n timestep_number = timestep_list[0]\n r_timestep_number = r_timestep_list[0]\n stamp = full_time_series.timestamp(timestep_number)\n recur_prop_list = vdbase.list_of_recurrent_properties(r_timestep_number)\n if common_recur_prop_set is None:\n common_recur_prop_set = set(recur_prop_list)\n elif recur_prop_list is not None:\n common_recur_prop_set = common_recur_prop_set.intersection(set(recur_prop_list))\n step_import_collection = rp.GridPropertyCollection()\n step_import_collection.set_grid(grid)\n # for each property for this timestep, cache array and add to recur prop import collection for this time step\n if recur_prop_list:\n for keyword in recur_prop_list:\n if not keyword or not keyword.isalnum():\n continue\n if keyword_list is not None and keyword not in keyword_list:\n continue\n if property_kind_list is not None:\n prop_kind, _, _ = rp.property_kind_and_facet_from_keyword(keyword)\n if prop_kind not in property_kind_list:\n continue\n step_import_collection.import_vdb_recurrent_property_to_cache(\n vdbase,\n r_timestep_number,\n keyword,\n time_index = tni, # index into recur_time_series\n realization = realisation)\n if decoarsen_array is not None:\n step_import_collection.decoarsen_imported_list(decoarsen_array = decoarsen_array)\n # extend hdf5 with cached arrays for this timestep\n # log.info('number of recurrent grid property arrays for timestep: ' + str(timestep_number) +\n # ' is: ' + str(step_import_collection.number_of_imports()))\n # log.info('extending hdf5 file with recurrent properties for timestep: ' + str(timestep_number))\n grid.write_hdf5_from_caches(hdf5_file,\n mode = 'a',\n geometry = False,\n imported_properties = step_import_collection,\n write_active = False)\n # add imported list for this timestep to full imported list\n prop_import_collection.inherit_imported_list_from_other_collection(step_import_collection)\n # log.debug('total number of property arrays after timestep: ' + str(timestep_number) +\n # ' is: ' + str(prop_import_collection.number_of_imports()))\n # remove cached copies of arrays\n step_import_collection.remove_all_cached_arrays()\n\n if len(prop_import_collection.imported_list) == 0:\n log.warning('no properties imported for realisation ' + str(realisation))\n continue\n\n prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid,\n time_series_uuid = recur_ts_uuid)\n\n if create_property_set_per_realization:\n prop_import_collection.create_property_set_xml('property set for realization ' + str(realisation))\n\n if complete_collection is not None:\n complete_collection.inherit_parts_from_other_collection(prop_import_collection)\n\n if complete_collection is not None:\n if create_property_set_per_timestep and recur_time_series is not None:\n for tni in range(recur_time_series.number_of_timestamps()):\n ts_collection = rp.selective_version_of_collection(complete_collection, time_index = tni)\n if ts_collection.number_of_parts() > 0:\n ts_collection.create_property_set_xml('property set for time index ' + str(tni))\n if create_complete_property_set:\n complete_collection.create_property_set_xml('property set for ensemble vdb import')\n\n # mark model as modified (will already have happened anyway)\n model.set_modified()\n\n # rewrite epc file\n log.info('storing updated model in epc file ' + epc_file)\n model.store_epc(epc_file)\n\n if progress_fn is not None:\n progress_fn(1.0)\n\n # return updated resqml model\n return model\n\n\ndef add_ab_properties(\n epc_file, # existing resqml model\n grid_uuid = None, # optional grid uuid, required if more than one grid in model; todo: handle list of grids?\n ext_uuid = None, # if None, hdf5 file holding grid geometry will be used\n ab_property_list = None\n): # list of (file_name, keyword, property_kind, facet_type, facet, uom, time_index, null_value,\n # discrete, realization)\n \"\"\"Process a list of pure binary property array files, adding as parts of model, related to grid (hdf5 file is\n appended to).\"\"\"\n\n assert ab_property_list, 'property list is empty or missing'\n\n model = rq.Model(epc_file = epc_file)\n if grid_uuid is None:\n grid_node = model.root_for_ijk_grid() # will raise an exception if Model has more than 1 grid\n assert grid_node is not None, 'grid not found in model'\n grid_uuid = rqet.uuid_for_part_root(grid_node)\n grid = grr.any_grid(parent_model = model, uuid = grid_uuid, find_properties = False)\n\n if ext_uuid is None:\n ext_node = rqet.find_nested_tags(grid.geometry_root, ['Points', 'Coordinates', 'HdfProxy', 'UUID'])\n if ext_node is not None:\n ext_uuid = bu.uuid_from_string(ext_node.text.strip())\n\n # ab_property_list: list of (filename, keyword, property_kind, facet_type, facet, uom, time_index, null_value, discrete, realization)\n prop_import_collection = rp.GridPropertyCollection()\n prop_import_collection.set_grid(grid)\n for (p_filename, p_keyword, p_property_kind, p_facet_type, p_facet, p_uom, p_time_index, p_null_value, p_discrete,\n p_realization) in ab_property_list:\n prop_import_collection.import_ab_property_to_cache(p_filename,\n p_keyword,\n grid.extent_kji,\n discrete = p_discrete,\n uom = p_uom,\n time_index = p_time_index,\n null_value = p_null_value,\n property_kind = p_property_kind,\n facet_type = p_facet_type,\n facet = p_facet,\n realization = p_realization)\n # todo: property_kind, facet_type & facet are not currently getting passed through the imported_list tuple in resqml_property\n\n if prop_import_collection is None:\n log.warning('no pure binary grid properties to import')\n else:\n log.info('number of pure binary grid property arrays: ' + str(prop_import_collection.number_of_imports()))\n\n # append to hdf5 file using arrays cached in grid property collection above\n hdf5_file = model.h5_file_name()\n log.debug('appending to hdf5 file: ' + hdf5_file)\n grid.write_hdf5_from_caches(hdf5_file,\n mode = 'a',\n geometry = False,\n imported_properties = prop_import_collection,\n write_active = False)\n # remove cached static property arrays from memory\n if prop_import_collection is not None:\n prop_import_collection.remove_all_cached_arrays()\n\n # add imported properties parts to model, building property parts list\n if prop_import_collection is not None and prop_import_collection.imported_list is not None:\n prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid)\n\n # mark model as modified\n model.set_modified()\n\n # store new version of model\n log.info('storing model with additional properties in epc file: ' + epc_file)\n model.store_epc(epc_file)\n\n return model\n\n\ndef add_surfaces(\n epc_file, # existing resqml model\n crs_uuid = None, # optional crs uuid, defaults to crs associated with model (usually main grid crs)\n ext_uuid = None, # if None, uuid for hdf5 file holding main grid geometry will be used\n surface_file_format = 'zmap', # zmap, rms (roxar) or GOCAD-Tsurf only formats currently supported\n rq_class = 'surface', # 'surface' or 'mesh': the class of object to be created\n surface_role = 'map', # 'map' or 'pick'\n quad_triangles = False, # if True, 4 triangles per quadrangle will be used for mesh formats, otherwise 2\n surface_file_list = None, # list of full file names (paths), each holding one surface\n make_horizon_interpretations_and_features = True): # if True, feature and interpretation objects are created\n \"\"\"Process a list of surface files, adding each surface as a new part in the resqml model.\"\"\"\n\n assert surface_file_list, 'surface file list is empty or missing'\n assert surface_file_format in ['zmap', 'rms', 'roxar',\n 'GOCAD-Tsurf'], 'unsupported surface file format: ' + str(surface_file_format)\n if 'TriangulatedSet' in rq_class:\n rq_class = 'surface'\n elif 'Grid2d' in rq_class:\n rq_class = 'mesh'\n assert rq_class in ['surface', 'mesh']\n\n log.info('accessing existing resqml model from: ' + epc_file)\n model = rq.Model(epc_file = epc_file)\n assert model, 'failed to read existing resqml model from file: ' + epc_file\n\n if crs_uuid is None:\n assert model.crs_root is not None, 'no crs uuid given and no default in model'\n crs_uuid = rqet.uuid_for_part_root(model.crs_root)\n assert crs_uuid is not None\n crs_root = model.root_for_uuid(crs_uuid)\n\n if ext_uuid is None:\n ext_uuid = model.h5_uuid()\n if ext_uuid is None: # no pre-existing hdf5 part or references in model\n hdf5_file = epc_file[:-4] + '.h5'\n ext_node = model.create_hdf5_ext(file_name = hdf5_file)\n ext_uuid = rqet.uuid_for_part_root(ext_node)\n h5_mode = 'w'\n else:\n hdf5_file = model.h5_file_name(uuid = ext_uuid)\n h5_mode = 'a'\n\n assert ext_uuid is not None, 'failed to establish hdf5 uuid'\n\n # append to hdf5 file using arrays from Surface object's patch(es)\n log.info('will append to hdf5 file: ' + hdf5_file)\n\n for surf_file in surface_file_list:\n\n _, short_name = os.path.split(surf_file)\n dot = short_name.rfind('.')\n if dot > 0:\n short_name = short_name[:dot]\n\n log.info('surface ' + short_name + ' processing file: ' + surf_file + ' using format: ' + surface_file_format)\n if rq_class == 'surface':\n if surface_file_format == 'GOCAD-Tsurf':\n surface = rqs.Surface(model,\n tsurf_file = surf_file,\n surface_role = surface_role,\n quad_triangles = quad_triangles)\n else:\n surface = rqs.Surface(model,\n mesh_file = surf_file,\n mesh_format = surface_file_format,\n surface_role = surface_role,\n quad_triangles = quad_triangles)\n elif rq_class == 'mesh':\n if surface_file_format == 'GOCAD-Tsurf':\n log.info(\n f\"Cannot convert a GOCAD-Tsurf to mesh, only to TriangulatedSurface - skipping file {surf_file}\")\n break\n else:\n surface = rqs.Mesh(model,\n mesh_file = surf_file,\n mesh_format = surface_file_format,\n mesh_flavour = 'reg&z',\n surface_role = surface_role,\n crs_uuid = crs_uuid)\n else:\n log.critical('this is impossible')\n # NB. surface may be either a Surface object or a Mesh object\n\n log.debug('appending to hdf5 file for surface file: ' + surf_file)\n surface.write_hdf5(hdf5_file, mode = h5_mode)\n\n if make_horizon_interpretations_and_features:\n feature = rqo.GeneticBoundaryFeature(model, kind = 'horizon', feature_name = short_name)\n feature.create_xml()\n interp = rqo.HorizonInterpretation(model, genetic_boundary_feature = feature, domain = 'depth')\n interp_root = interp.create_xml()\n surface.set_represented_interpretation_root(interp_root)\n\n surface.create_xml(ext_uuid,\n add_as_part = True,\n add_relationships = True,\n crs_uuid = rqet.uuid_for_part_root(crs_root),\n title = short_name + ' sourced from ' + surf_file,\n originator = None)\n\n # mark model as modified\n model.set_modified()\n\n # store new version of model\n log.info('storing model with additional parts in epc file: ' + epc_file)\n model.store_epc(epc_file)\n\n return model\n\n\ndef grid_from_cp(model,\n cp_array,\n crs_uuid,\n active_mask = None,\n geometry_defined_everywhere = True,\n treat_as_nan = None,\n dot_tolerance = 1.0,\n morse_tolerance = 5.0,\n max_z_void = 0.1,\n split_pillars = True,\n split_tolerance = 0.01,\n ijk_handedness = 'right',\n known_to_be_straight = False):\n \"\"\"Create a resqpy.grid.Grid object from a 7D corner point array.\n\n notes:\n this function sets up all the geometry arrays in memory but does not write to hdf5 nor create xml: use Grid methods;\n geometry_defined_everywhere is deprecated, use treat_as_nan instead\n \"\"\"\n\n if treat_as_nan is None:\n if not geometry_defined_everywhere:\n treat_as_nan = 'morse'\n else:\n assert treat_as_nan in ['none', 'dots', 'ij_dots', 'morse', 'inactive']\n if treat_as_nan == 'none':\n treat_as_nan = None\n geometry_defined_everywhere = (treat_as_nan is None)\n\n assert cp_array.ndim == 7\n nk, nj, ni = cp_array.shape[:3]\n nk_plus_1 = nk + 1\n nj_plus_1 = nj + 1\n ni_plus_1 = ni + 1\n\n if active_mask is None:\n active_mask = np.ones((nk, nj, ni), dtype = 'bool')\n inactive_mask = np.zeros((nk, nj, ni), dtype = 'bool')\n else:\n assert active_mask.shape == (nk, nj, ni)\n inactive_mask = np.logical_not(active_mask)\n all_active = np.all(active_mask)\n\n if all_active and geometry_defined_everywhere:\n cp_nan_mask = None\n else:\n cp_nan_mask = np.any(np.isnan(cp_array), axis = (3, 4, 5, 6)) # ie. if any nan per cell\n if not geometry_defined_everywhere and not all_active:\n if treat_as_nan == 'inactive':\n log.debug('all inactive cell geometry being set to NaN')\n cp_nan_mask = np.logical_or(cp_nan_mask, inactive_mask)\n else:\n if treat_as_nan == 'dots':\n # for speed, only check primary diagonal of cells\n log.debug('geometry for cells with no length to primary cell diagonal being set to NaN')\n dot_mask = np.all(np.abs(cp_array[:, :, :, 1, 1, 1] - cp_array[:, :, :, 0, 0, 0]) < dot_tolerance,\n axis = -1)\n elif treat_as_nan in ['ij_dots', 'morse']:\n # check one diagonal of each I & J face\n log.debug(\n 'geometry being set to NaN for inactive cells with no length to primary face diagonal for any I or J face'\n )\n dot_mask = np.zeros((nk, nj, ni), dtype = bool)\n # k_face_vecs = cp_array[:, :, :, :, 1, 1] - cp_array[:, :, :, :, 0, 0]\n j_face_vecs = cp_array[:, :, :, 1, :, 1] - cp_array[:, :, :, 0, :, 0]\n i_face_vecs = cp_array[:, :, :, 1, 1, :] - cp_array[:, :, :, 0, 0, :]\n dot_mask[:] = np.where(np.all(np.abs(j_face_vecs[:, :, :, 0]) < dot_tolerance, axis = -1), True,\n dot_mask)\n dot_mask[:] = np.where(np.all(np.abs(j_face_vecs[:, :, :, 1]) < dot_tolerance, axis = -1), True,\n dot_mask)\n dot_mask[:] = np.where(np.all(np.abs(i_face_vecs[:, :, :, 0]) < dot_tolerance, axis = -1), True,\n dot_mask)\n dot_mask[:] = np.where(np.all(np.abs(i_face_vecs[:, :, :, 1]) < dot_tolerance, axis = -1), True,\n dot_mask)\n log.debug(f'dot mask set for {np.count_nonzero(dot_mask)} cells')\n if treat_as_nan == 'morse':\n morse_tol_sqr = morse_tolerance * morse_tolerance\n # compare face vecs lengths in xy against max for active cells: where much greater set to NaN\n len_j_face_vecs_sqr = np.sum(j_face_vecs[..., :2] * j_face_vecs[..., :2], axis = -1)\n len_i_face_vecs_sqr = np.sum(j_face_vecs[..., :2] * i_face_vecs[..., :2], axis = -1)\n dead_mask = inactive_mask.reshape(nk, nj, ni, 1).repeat(2, -1)\n # mean_len_active_j_face_vecs_sqr = np.mean(ma.masked_array(len_j_face_vecs_sqr, mask = dead_mask))\n # mean_len_active_i_face_vecs_sqr = np.mean(ma.masked_array(len_i_face_vecs_sqr, mask = dead_mask))\n max_len_active_j_face_vecs_sqr = np.max(ma.masked_array(len_j_face_vecs_sqr, mask = dead_mask))\n max_len_active_i_face_vecs_sqr = np.max(ma.masked_array(len_i_face_vecs_sqr, mask = dead_mask))\n dot_mask = np.where(\n np.any(len_j_face_vecs_sqr > morse_tol_sqr * max_len_active_j_face_vecs_sqr, axis = -1),\n True, dot_mask)\n dot_mask = np.where(\n np.any(len_i_face_vecs_sqr > morse_tol_sqr * max_len_active_i_face_vecs_sqr, axis = -1),\n True, dot_mask)\n log.debug(f'morse mask set for {np.count_nonzero(dot_mask)} cells')\n else:\n raise Exception('code broken')\n cp_nan_mask = np.logical_or(cp_nan_mask, np.logical_and(inactive_mask, dot_mask))\n geometry_defined_everywhere = not np.any(cp_nan_mask)\n if geometry_defined_everywhere:\n cp_nan_mask = None\n\n if cp_nan_mask is not None:\n inactive_mask = np.logical_or(inactive_mask, cp_nan_mask)\n active_mask = np.logical_not(inactive_mask)\n\n # set up masked version of corner point data based on cells with defined geometry\n if geometry_defined_everywhere:\n full_mask = None\n masked_cp_array = ma.masked_array(cp_array, mask = ma.nomask)\n log.info('geometry present for all cells')\n else:\n full_mask = cp_nan_mask.reshape((nk, nj, ni, 1)).repeat(24, axis = 3).reshape((nk, nj, ni, 2, 2, 2, 3))\n masked_cp_array = ma.masked_array(cp_array, mask = full_mask)\n log.info('number of cells without geometry: ' + str(np.count_nonzero(cp_nan_mask)))\n\n # convert to resqml\n\n k_gaps = None\n k_gap_after_layer = None\n k_gap_raw_index = None\n\n if nk > 1:\n # check for (vertical) voids, or un-pillar-like anomalies, which will require k gaps in the resqml ijk grid\n log.debug('checking for voids')\n gap = masked_cp_array[1:, :, :, 0, :, :, :] - masked_cp_array[:-1, :, :, 1, :, :, :]\n max_gap_by_layer_and_xyz = np.max(np.abs(gap), axis = (1, 2, 3, 4))\n max_gap = np.max(max_gap_by_layer_and_xyz)\n log.debug('maximum void distance: {0:.3f}'.format(max_gap))\n if max_gap > max_z_void:\n log.warning('maximum void distance exceeds limit, grid will include k gaps')\n k_gaps = 0\n k_gap_after_layer = np.zeros((nk - 1,), dtype = bool)\n k_gap_raw_index = np.empty((nk,), dtype = int)\n k_gap_raw_index[0] = 0\n for k in range(nk - 1):\n max_layer_gap = np.max(max_gap_by_layer_and_xyz[k])\n if max_layer_gap > max_z_void:\n k_gap_after_layer[k] = True\n k_gaps += 1\n elif max_layer_gap > 0.0:\n # close void (includes shifting x & y)\n log.debug('closing void below layer (0 based): ' + str(k))\n layer_gap = gap[k] * 0.5\n layer_gap_unmasked = np.where(gap[k].mask, 0.0, layer_gap)\n masked_cp_array[k + 1, :, :, 0, :, :, :] -= layer_gap_unmasked\n masked_cp_array[k, :, :, 1, :, :, :] += layer_gap_unmasked\n k_gap_raw_index[k + 1] = k + k_gaps\n elif max_gap > 0.0:\n # close voids (includes shifting x & y)\n log.debug('closing voids')\n gap *= 0.5\n gap_unmasked = np.where(gap.mask, 0.0, gap)\n masked_cp_array[1:, :, :, 0, :, :, :] -= gap_unmasked\n masked_cp_array[:-1, :, :, 1, :, :, :] += gap_unmasked\n\n if k_gaps:\n nk_plus_1 += k_gaps\n if k_gap_raw_index is None:\n k_gap_raw_index = np.arange(nk, dtype = int)\n\n # reduce cp array extent in k\n log.debug('reducing k extent of corner point array (sharing points vertically)')\n k_reduced_cp_array = ma.masked_array(np.zeros((nk_plus_1, nj, ni, 2, 2, 3))) # (nk+1+k_gaps, nj, ni, jp, ip, xyz)\n k_reduced_cp_array[0, :, :, :, :, :] = masked_cp_array[0, :, :, 0, :, :, :]\n k_reduced_cp_array[-1, :, :, :, :, :] = masked_cp_array[-1, :, :, 1, :, :, :]\n if k_gaps:\n raw_k = 1\n for k in range(nk - 1):\n # fill reduced array slice(s) for base of layer k and top of layer k + 1\n if k_gap_after_layer[k]:\n k_reduced_cp_array[raw_k, :, :, :, :, :] = masked_cp_array[k, :, :, 1, :, :, :]\n raw_k += 1\n k_reduced_cp_array[raw_k, :, :, :, :, :] = masked_cp_array[k + 1, :, :, 0, :, :, :]\n raw_k += 1\n else: # take data from either possible cp slice, whichever is defined\n slice = masked_cp_array[k + 1, :, :, 0, :, :, :]\n k_reduced_cp_array[raw_k, :, :, :, :, :] = np.where(slice.mask, masked_cp_array[k, :, :, 1, :, :, :],\n slice)\n raw_k += 1\n assert raw_k == nk + k_gaps\n else:\n slice = masked_cp_array[1:, :, :, 0, :, :, :]\n # where cell geometry undefined, if cell above is defined, take data from cell above with kp = 1 and set shared point defined\n k_reduced_cp_array[1:-1, :, :, :, :, :] = np.where(slice.mask, masked_cp_array[:-1, :, :, 1, :, :, :], slice)\n\n # create 2D array of active columns (columns where at least one cell is active)\n log.debug('creating 2D array of active columns')\n active_mask_2D = np.any(active_mask, axis = 0)\n\n # create primary pillar reference indices as one of four column corners around pillar, active column preferred\n log.debug('creating primary pillar reference neighbourly indices')\n primary_pillar_jip = np.zeros((nj_plus_1, ni_plus_1, 2), dtype = 'int') # (nj + 1, ni + 1, jp:ip)\n primary_pillar_jip[-1, :, 0] = 1\n primary_pillar_jip[:, -1, 1] = 1\n for j in range(nj_plus_1):\n for i in range(ni_plus_1):\n if active_mask_2D[j - primary_pillar_jip[j, i, 0], i - primary_pillar_jip[j, i, 1]]:\n continue\n if i > 0 and primary_pillar_jip[j, i, 1] == 0 and active_mask_2D[j - primary_pillar_jip[j, i, 0], i - 1]:\n primary_pillar_jip[j, i, 1] = 1\n continue\n if j > 0 and primary_pillar_jip[j, i, 0] == 0 and active_mask_2D[j - 1, i - primary_pillar_jip[j, i, 1]]:\n primary_pillar_jip[j, i, 0] = 1\n continue\n if i > 0 and j > 0 and primary_pillar_jip[j, i,\n 0] == 0 and primary_pillar_jip[j, i,\n 1] == 0 and active_mask_2D[j - 1,\n i - 1]:\n primary_pillar_jip[j, i, :] = 1\n\n # build extra pillar references for split pillars\n extras_count = np.zeros((nj_plus_1, ni_plus_1), dtype = 'int') # count (0 to 3) of extras for pillar\n extras_list_index = np.zeros((nj_plus_1, ni_plus_1), dtype = 'int') # index in list of 1st extra for pillar\n extras_list = [] # list of (jp, ip)\n extras_use = np.negative(np.ones((nj, ni, 2, 2), dtype = 'int')) # (j, i, jp, ip); -1 means use primary\n if split_pillars:\n log.debug('building extra pillar references for split pillars')\n # loop over pillars\n for j in range(nj_plus_1):\n for i in range(ni_plus_1):\n primary_jp = primary_pillar_jip[j, i, 0]\n primary_ip = primary_pillar_jip[j, i, 1]\n p_col_j = j - primary_jp\n p_col_i = i - primary_ip\n # loop over 4 columns surrounding this pillar\n for jp in range(2):\n col_j = j - jp\n if col_j < 0 or col_j >= nj:\n continue # no column this side of pillar in j\n for ip in range(2):\n col_i = i - ip\n if col_i < 0 or col_i >= ni:\n continue # no column this side of pillar in i\n if jp == primary_jp and ip == primary_ip:\n continue # this column is the primary for this pillar\n discrepancy = np.max(\n np.abs(k_reduced_cp_array[:, col_j, col_i, jp, ip, :] -\n k_reduced_cp_array[:, p_col_j, p_col_i, primary_jp, primary_ip, :]))\n if discrepancy <= split_tolerance:\n continue # data for this column's corner aligns with primary\n for e in range(extras_count[j, i]):\n eli = extras_list_index[j, i] + e\n pillar_j_extra = j - extras_list[eli][0]\n pillar_i_extra = i - extras_list[eli][1]\n discrepancy = np.max(\n np.abs(k_reduced_cp_array[:, col_j, col_i, jp, ip, :] -\n k_reduced_cp_array[:, pillar_j_extra, pillar_i_extra, extras_list[eli][0],\n extras_list[eli][1], :]))\n if discrepancy <= split_tolerance: # data for this corner aligns with existing extra\n extras_use[col_j, col_i, jp, ip] = e\n break\n if extras_use[col_j, col_i, jp, ip] >= 0: # reusing an existing extra for this pillar\n continue\n # add this corner as an extra\n if extras_count[j, i] == 0: # create entry point for this pillar in extras\n extras_list_index[j, i] = len(extras_list)\n extras_list.append((jp, ip))\n extras_use[col_j, col_i, jp, ip] = extras_count[j, i]\n extras_count[j, i] += 1\n if len(extras_list) == 0:\n split_pillars = False\n log.debug('number of extra pillars: ' + str(len(extras_list)))\n\n # create points array as used in resqml\n log.debug('creating points array as used in resqml format')\n if split_pillars:\n points_array = np.zeros(\n (nk_plus_1, (nj_plus_1 * ni_plus_1) + len(extras_list), 3)) # note: nk_plus_1 might include k_gaps\n index = 0\n # primary pillars\n for pillar_j in range(nj_plus_1):\n for pillar_i in range(ni_plus_1):\n (jp, ip) = primary_pillar_jip[pillar_j, pillar_i]\n slice = k_reduced_cp_array[:, pillar_j - jp, pillar_i - ip, jp, ip, :]\n points_array[:, index, :] = np.where(slice.mask, np.nan,\n slice) # NaN indicates undefined/invalid geometry\n index += 1\n # add extras for split pillars\n for pillar_j in range(nj_plus_1):\n for pillar_i in range(ni_plus_1):\n for e in range(extras_count[pillar_j, pillar_i]):\n eli = extras_list_index[pillar_j, pillar_i] + e\n (jp, ip) = extras_list[eli]\n pillar_j_extra = pillar_j - jp\n pillar_i_extra = pillar_i - ip\n slice = k_reduced_cp_array[:, pillar_j_extra, pillar_i_extra, jp, ip, :]\n points_array[:, index, :] = np.where(slice.mask, np.nan,\n slice) # NaN indicates unedefined/invalid geometry\n index += 1\n assert (index == (nj_plus_1 * ni_plus_1) + len(extras_list))\n else: # unsplit pillars\n points_array = np.zeros((nk_plus_1, nj_plus_1, ni_plus_1, 3))\n for j in range(nj_plus_1):\n for i in range(ni_plus_1):\n (jp, ip) = primary_pillar_jip[j, i]\n slice = k_reduced_cp_array[:, j - jp, i - ip, jp, ip, :]\n points_array[:, j, i, :] = np.where(slice.mask, np.nan,\n slice) # NaN indicates undefined/invalid geometry\n\n # create an empty grid object and fill in some basic info\n log.debug('initialising grid object')\n grid = grr.Grid(model)\n grid.grid_representation = 'IjkGrid'\n grid.extent_kji = np.array((nk, nj, ni), dtype = 'int')\n grid.nk, grid.nj, grid.ni = nk, nj, ni\n grid.k_direction_is_down = True # assumed direction for corp; todo: determine from geometry and crs z_inc_down flag\n if known_to_be_straight:\n grid.pillar_shape = 'straight'\n else:\n grid.pillar_shape = 'curved'\n grid.has_split_coordinate_lines = split_pillars\n grid.k_gaps = k_gaps\n grid.k_gap_after_array = k_gap_after_layer\n grid.k_raw_index_array = k_gap_raw_index\n\n grid.crs_uuid = crs_uuid\n grid.crs_root = model.root_for_uuid(crs_uuid)\n crs = rqc.Crs(model, uuid = crs_uuid)\n\n # add pillar points array to grid object\n log.debug('attaching points array to grid object')\n grid.points_cached = points_array # NB: reference to points_array, array not copied here\n\n # add split pillar arrays to grid object\n if split_pillars:\n log.debug('adding split pillar arrays to grid object')\n split_pillar_indices_list = []\n cumulative_length_list = []\n cols_for_extra_pillar_list = []\n cumulative_length = 0\n for pillar_j in range(nj_plus_1):\n for pillar_i in range(ni_plus_1):\n for e in range(extras_count[pillar_j, pillar_i]):\n split_pillar_indices_list.append((pillar_j * ni_plus_1) + pillar_i)\n use_count = 0\n for jp in range(2):\n j = pillar_j - jp\n if j < 0 or j >= nj:\n continue\n for ip in range(2):\n i = pillar_i - ip\n if i < 0 or i >= ni:\n continue\n if extras_use[j, i, jp, ip] == e:\n use_count += 1\n cols_for_extra_pillar_list.append((j * ni) + i)\n assert (use_count > 0)\n cumulative_length += use_count\n cumulative_length_list.append(cumulative_length)\n log.debug('number of extra pillars: ' + str(len(split_pillar_indices_list)))\n assert (len(cumulative_length_list) == len(split_pillar_indices_list))\n grid.split_pillar_indices_cached = np.array(split_pillar_indices_list, dtype = 'int')\n log.debug('number of uses of extra pillars: ' + str(len(cols_for_extra_pillar_list)))\n assert (len(cols_for_extra_pillar_list) == np.count_nonzero(extras_use + 1))\n assert (len(cols_for_extra_pillar_list) == cumulative_length)\n grid.cols_for_split_pillars = np.array(cols_for_extra_pillar_list, dtype = 'int')\n assert (len(cumulative_length_list) == len(extras_list))\n grid.cols_for_split_pillars_cl = np.array(cumulative_length_list, dtype = 'int')\n grid.split_pillars_count = len(extras_list)\n\n # following is not part of resqml standard but is used by resqml_grid module for speed optimisation\n log.debug('setting up column to pillars mapping')\n base_pillar_count = nj_plus_1 * ni_plus_1\n grid.pillars_for_column = np.empty((nj, ni, 2, 2), dtype = 'int')\n for j in range(nj):\n for i in range(ni):\n for jp in range(2):\n for ip in range(2):\n if not split_pillars or extras_use[j, i, jp, ip] < 0: # use primary pillar\n pillar_index = (j + jp) * ni_plus_1 + i + ip\n else:\n eli = extras_list_index[j + jp, i + ip] + extras_use[j, i, jp, ip]\n pillar_index = base_pillar_count + eli\n grid.pillars_for_column[j, i, jp, ip] = pillar_index\n\n # add inactive cell mask to grid\n log.debug('setting inactive cell mask')\n grid.inactive = inactive_mask.copy()\n\n # add cell geometry defined array to model (using active cell mask unless geometry_defined_everywhere is True)\n if geometry_defined_everywhere:\n grid.geometry_defined_for_all_cells_cached = True\n grid.array_cell_geometry_is_defined = None\n else:\n log.debug('using active cell mask as indicator of defined cell geometry')\n grid.array_cell_geometry_is_defined = active_mask.copy() # a bit harsh: disallows reactivation of cells\n grid.geometry_defined_for_all_cells_cached = np.all(active_mask)\n grid.geometry_defined_for_all_pillars_cached = True # following fesapi convention of defining all pillars regardless\n # note: grid.array_pillar_geometry_is_defined not set, as line above should be sufficient\n\n # tentatively add corner point array to grid object in case it is needed\n log.debug('noting corner point array in grid')\n grid.array_corner_points = cp_array\n\n # set handedness of ijk axes\n if ijk_handedness is None or ijk_handedness == 'auto':\n # work out handedness from sample cell / column axes directions and handedness of crs\n sample_kji0 = tuple(np.array(grid.extent_kji) // 2)\n if not geometry_defined_everywhere and not grid.array_cell_geometry_is_defined[sample_kji0]:\n where_defined = np.where(\n np.logical_and(grid.array_cell_geometry_is_defined, np.logical_not(grid.pinched_out())))\n assert len(where_defined) == 3 and len(where_defined[0]) > 0, 'no extant cell geometries'\n sample_kji0 = (where_defined[0][0], where_defined[1][0], where_defined[2][0])\n sample_cp = cp_array[sample_kji0]\n cell_ijk_lefthanded = (vec.clockwise(sample_cp[0, 0, 0], sample_cp[0, 1, 0], sample_cp[0, 0, 1]) >= 0.0)\n if not grid.k_direction_is_down:\n cell_ijk_lefthanded = not cell_ijk_lefthanded\n if crs.is_right_handed_xyz():\n cell_ijk_lefthanded = not cell_ijk_lefthanded\n grid.grid_is_right_handed = not cell_ijk_lefthanded\n else:\n assert ijk_handedness in ['left', 'right']\n grid.grid_is_right_handed = (ijk_handedness == 'right')\n\n return grid\n",
"import numpy as np\nfrom numpy.testing import assert_array_almost_equal\n\nimport resqpy.grid\nimport resqpy.grid_surface as rqgs\nimport resqpy.lines as rql\nimport resqpy.model as rq\nimport resqpy.olio.uuid as bu\nimport resqpy.organize\nimport resqpy.surface\n\n\ndef test_surface(tmp_model):\n\n # Set up a Surface\n title = 'Mountbatten'\n model = tmp_model\n surf = resqpy.surface.Surface(parent_model = model, title = title)\n surf.create_xml()\n\n # Add a interpretation\n assert surf.represented_interpretation_root is None\n surf.create_interpretation_and_feature(kind = 'fault')\n assert surf.represented_interpretation_root is not None\n\n # Check fault can be loaded in again\n model.store_epc()\n fault_interp = resqpy.organize.FaultInterpretation(model, uuid = surf.represented_interpretation_uuid)\n fault_feature = resqpy.organize.TectonicBoundaryFeature(model, uuid = fault_interp.tectonic_boundary_feature.uuid)\n\n # Check title matches expected title\n assert fault_feature.feature_name == title\n\n\ndef test_faces_for_surface(tmp_model):\n crs = resqpy.crs.Crs(tmp_model)\n crs.create_xml()\n grid = resqpy.grid.RegularGrid(tmp_model, extent_kji = (3, 3, 3), crs_uuid = crs.uuid, set_points_cached = True)\n grid.write_hdf5()\n grid.create_xml(write_geometry = True)\n # todo: create sloping planar surface\n # call find faces for each of 3 different methods\n points = np.zeros((2, 2, 3))\n points[1, :, 1] = 3.0\n points[:, 1, 0] = 3.0\n points[:, 1, 2] = 3.0\n points[:, :, 2] += 0.25\n triangles = np.zeros((2, 3), dtype = int)\n triangles[0] = (0, 1, 2)\n triangles[1] = (3, 1, 2)\n surf = resqpy.surface.Surface(tmp_model, crs_uuid = crs.uuid)\n surf.set_from_triangles_and_points(triangles, points.reshape((-1, 3)))\n assert surf is not None\n gcs = rqgs.find_faces_to_represent_surface(grid, surf, 'staffa', mode = 'staffa')\n assert gcs is not None\n assert gcs.count == 12\n cip = set([tuple(pair) for pair in gcs.cell_index_pairs])\n expected_cip = grid.natural_cell_indices(\n np.array([[[0, 0, 0], [1, 0, 0]], [[0, 1, 0], [1, 1, 0]], [[0, 2, 0], [1, 2, 0]], [[1, 0, 0], [1, 0, 1]],\n [[1, 1, 0], [1, 1, 1]], [[1, 2, 0], [1, 2, 1]], [[1, 0, 1], [2, 0, 1]], [[1, 1, 1], [2, 1, 1]],\n [[1, 2, 1], [2, 2, 1]], [[2, 0, 1], [2, 0, 2]], [[2, 1, 1], [2, 1, 2]], [[2, 2, 1], [2, 2, 2]]],\n dtype = int))\n e_cip = set([tuple(pair) for pair in expected_cip])\n assert cip == e_cip # note: this assumes lower cell index is first, which happens to be true\n # todo: check face indices\n gcs.write_hdf5()\n gcs.create_xml()\n assert bu.matching_uuids(tmp_model.uuid(obj_type = 'GridConnectionSetRepresentation'), gcs.uuid)\n\n\ndef test_delaunay_triangulation(example_model_and_crs):\n\n model, crs = example_model_and_crs\n\n # number of random points to use\n n = 20\n\n # create a set of random points\n x = np.random.random(n) * 1000.0\n y = np.random.random(n) * 1000.0\n z = np.random.random(n) # note: triangulation does not use z values\n p = np.stack((x, y, z), axis = -1)\n\n # make a PointSet object\n ps = resqpy.surface.PointSet(model, crs_uuid = crs.uuid, points_array = p, title = 'random points in square')\n\n # make another PointSet as random points within a closed polyline\n vertices = np.array(\n ((50.0, 99.0, 13.0), (85.0, 60.0, 17.5), (62.7, 11.0, 10.0), (33.3, 15.3, 19.2), (12.8, 57.8, 15.0)))\n polygon = rql.Polyline(model, set_crs = crs.uuid, set_bool = True, set_coord = vertices, title = 'the pentagon')\n polygon.write_hdf5()\n polygon.create_xml()\n ps2 = resqpy.surface.PointSet(model,\n crs_uuid = crs.uuid,\n polyline = polygon,\n random_point_count = n,\n title = 'random points in polygon')\n\n # process the point sets into triangulated surfaces\n for point_set in (ps, ps2):\n point_set.write_hdf5()\n point_set.create_xml()\n surf = resqpy.surface.Surface(model, point_set = point_set, title = 'surface from ' + str(point_set.title))\n assert surf is not None\n surf.write_hdf5()\n surf.create_xml()\n # check that coordinate range of points looks okay\n triangles, points = surf.triangles_and_points()\n assert len(points) == n\n original_points = point_set.full_array_ref()\n assert_array_almost_equal(np.nanmin(original_points, axis = 0), np.nanmin(points, axis = 0))\n assert_array_almost_equal(np.nanmax(original_points, axis = 0), np.nanmax(points, axis = 0))\n\n\ndef test_regular_mesh(example_model_and_crs):\n\n model, crs = example_model_and_crs\n\n # number of points in mesh, origin spacing\n ni = 7\n nj = 5\n origin = (409000.0, 1605000.0, 0.0)\n di = dj = 50.0\n\n # create some random depths\n z = (np.random.random(ni * nj) * 20.0 + 1000.0).reshape((nj, ni))\n\n # make a regular mesh representation\n mesh = resqpy.surface.Mesh(model,\n crs_uuid = crs.uuid,\n mesh_flavour = 'reg&z',\n ni = ni,\n nj = nj,\n origin = origin,\n dxyz_dij = np.array([[di, 0.0, 0.0], [0.0, dj, 0.0]]),\n z_values = z,\n title = 'random mesh',\n originator = 'Andy',\n extra_metadata = {'testing mode': 'automated'})\n assert mesh is not None\n mesh.write_hdf5()\n mesh.create_xml()\n mesh_uuid = mesh.uuid\n\n # fully write model to disc\n model.store_epc()\n epc = model.epc_file\n\n # re-open model and check the mesh object is there\n model = rq.Model(epc)\n assert bu.matching_uuids(model.uuid(obj_type = 'Grid2dRepresentation', title = 'random mesh'), mesh_uuid)\n\n # establish a resqpy Mesh from the object in the RESQML dataset\n peristent_mesh = resqpy.surface.Mesh(model, uuid = mesh_uuid)\n\n # check some of the metadata\n assert peristent_mesh.ni == ni and peristent_mesh.nj == nj\n assert peristent_mesh.flavour == 'reg&z'\n assert_array_almost_equal(np.array(peristent_mesh.regular_origin), np.array(origin))\n assert_array_almost_equal(np.array(peristent_mesh.regular_dxyz_dij), np.array([[di, 0.0, 0.0], [0.0, dj, 0.0]]))\n\n # check a fully expanded version of the points\n assert_array_almost_equal(peristent_mesh.full_array_ref(), mesh.full_array_ref())\n\n # check that we can build a Surface from the Mesh\n surf = peristent_mesh.surface(quad_triangles = True)\n assert surf is not None\n\n # do some basic checks that the surface looks consistent with the mesh\n t, p = surf.triangles_and_points()\n assert len(p) == (ni * nj) + ((ni - 1) * (nj - 1)) # quad triangles mode introduces the extra points\n assert len(t) == 4 * (ni - 1) * (nj - 1)\n assert_array_almost_equal(np.min(p, axis = 0), np.min(peristent_mesh.full_array_ref().reshape(-1, 3), axis = 0))\n assert_array_almost_equal(np.max(p, axis = 0), np.max(peristent_mesh.full_array_ref().reshape(-1, 3), axis = 0))\n assert len(surf.distinct_edges()) == 6 * (ni - 1) * (nj - 1) + (ni - 1) + (nj - 1)\n",
"# vector_utilities module\n# note: many of these functions are redundant as they are provided by built-in numpy operations\n\nversion = '11th October 2021'\n\nimport logging\n\nlog = logging.getLogger(__name__)\nlog.debug('vector_utilities.py version %s', version)\n\n# works with 3D vectors in a cartesian space\n# a vector is a one dimensional numpy array with 3 elements: x, y, z\n# some functions accept a tuple or list of 3 elements as an alternative to a numpy array\n\nimport math as maths\n\nimport numpy as np\n\n\ndef radians_from_degrees(deg):\n \"\"\"Converts angle from degrees to radians.\"\"\"\n return np.radians(deg)\n\n\ndef degrees_from_radians(rad):\n \"\"\"Converts angle from radians to degrees.\"\"\"\n return np.degrees(rad)\n\n\ndef zero_vector():\n \"\"\"Returns a zero vector [0.0, 0.0, 0.0].\"\"\"\n return np.zeros(3)\n\n\ndef v_3d(v):\n \"\"\"Returns a 3D vector for a 2D or 3D vector.\"\"\"\n assert 2 <= len(v) <= 3\n if len(v) == 3:\n return v\n v3 = np.zeros(3)\n v3[:2] = v\n return v3\n\n\ndef add(a, b): # note: could just use numpy a + b facility\n \"\"\"Returns vector sum a+b.\"\"\"\n a = np.array(a)\n b = np.array(b)\n assert a.size == b.size\n return a + b\n\n\ndef subtract(a, b): # note: could just use numpy a - b facility\n \"\"\"Returns vector difference a-b.\"\"\"\n a = np.array(a)\n b = np.array(b)\n assert a.size == b.size\n return a - b\n\n\ndef elemental_multiply(a, b): # note: could just use numpy a * b facility\n \"\"\"Returns vector with products of corresponding elements of a and b.\"\"\"\n a = np.array(a)\n b = np.array(b)\n assert a.size == b.size\n return a * b\n\n\ndef amplify(v, scaling): # note: could just use numpy a * scalar facility\n \"\"\"Returns vector with direction of v, amplified by scaling.\"\"\"\n v = np.array(v)\n return scaling * v\n\n\ndef unit_vector(v):\n \"\"\"Returns vector with same direction as v but with unit length.\"\"\"\n assert 2 <= len(v) <= 3\n v = np.array(v, dtype = float)\n if np.all(v == 0.0):\n return v\n return v / maths.sqrt(np.sum(v * v))\n\n\ndef unit_vectors(v):\n \"\"\"Returns vectors with same direction as those in v but with unit length.\"\"\"\n scaling = np.sqrt(np.sum(v * v, axis = -1))\n zero_mask = np.zeros(v.shape, dtype = bool)\n zero_mask[np.where(scaling == 0.0), :] = True\n restore = np.seterr(all = 'ignore')\n result = np.where(zero_mask, 0.0, v / np.expand_dims(scaling, -1))\n np.seterr(**restore)\n return result\n\n\ndef unit_vector_from_azimuth(azimuth):\n \"\"\"Returns horizontal unit vector in compass bearing given by azimuth (x = East, y = North).\"\"\"\n azimuth = azimuth % 360.0\n azimuth_radians = radians_from_degrees(azimuth)\n result = zero_vector()\n result[0] = maths.sin(azimuth_radians) # x (increasing to east)\n result[1] = maths.cos(azimuth_radians) # y (increasing to north)\n return result # leave z as zero\n\n\ndef azimuth(v): # 'azimuth' is synonymous with 'compass bearing'\n \"\"\"Returns the compass bearing in degrees of the direction of v (x = East, y = North), ignoring z.\"\"\"\n assert 2 <= v.size <= 3\n z_zero_v = np.zeros(3)\n z_zero_v[:2] = v[:2]\n unit_v = unit_vector(z_zero_v) # also checks that z_zero_v is not zero vector\n x = unit_v[0]\n y = unit_v[1] # ignore z component\n if x == 0.0 and y == 0.0:\n return 0.0 # arbitrary azimuth of a vertical vector\n if abs(x) >= abs(y):\n radians = maths.pi / 2.0 - maths.atan(y / x)\n if x < 0.0:\n radians += maths.pi\n else:\n radians = maths.atan(x / y)\n if y < 0.0:\n radians += maths.pi\n if radians < 0.0:\n radians += 2.0 * maths.pi\n return degrees_from_radians(radians)\n\n\ndef azimuths(va): # 'azimuth' is synonymous with 'compass bearing'\n \"\"\"Returns the compass bearings in degrees of the direction of each vector in va (x = East, y = North), ignoring z.\"\"\"\n assert va.ndim > 1 and 2 <= va.shape[-1] <= 3\n shape = tuple(list(va.shape[:-1]) + [3])\n z_zero_v = np.zeros(shape)\n z_zero_v[..., :2] = va[..., :2]\n unit_v = unit_vectors(z_zero_v) # also checks that z_zero_v is not zero vector\n x = unit_v[..., 0]\n y = unit_v[..., 1] # ignore z component\n # todo: handle cases where x == y == 0\n restore = np.seterr(all = 'ignore')\n radians = np.where(\n np.abs(x) >= np.abs(y),\n np.where(x < 0.0, maths.pi * 3.0 / 2.0 - np.arctan(y / x), maths.pi / 2.0 - np.arctan(y / x)),\n np.where(y < 0.0, maths.pi + np.arctan(x / y), np.arctan(x / y)))\n np.seterr(**restore)\n radians = radians % (2.0 * maths.pi)\n return np.degrees(radians)\n\n\ndef inclination(v):\n \"\"\"Returns the inclination in degrees of v (angle relative to +ve z axis).\"\"\"\n assert 2 <= len(v) <= 3\n unit_v = unit_vector(v)\n radians = maths.acos(dot_product(unit_v, np.array((0.0, 0.0, 1.0))))\n return degrees_from_radians(radians)\n\n\ndef points_direction_vector(a, axis):\n \"\"\"Returns an average direction vector based on first and last non-NaN points or slices in given axis.\"\"\"\n\n assert a.ndim > 1 and 0 <= axis < a.ndim - 1 and a.shape[-1] > 1 and a.shape[axis] > 1\n if np.all(np.isnan(a)):\n return None\n start = 0\n start_slicing = [slice(None)] * a.ndim\n while True:\n start_slicing[axis] = slice(start)\n if not np.all(np.isnan(a[tuple(start_slicing)])):\n break\n start += 1\n finish = a.shape[axis] - 1\n finish_slicing = [slice(None)] * a.ndim\n while True:\n finish_slicing[axis] = slice(finish)\n if not np.all(np.isnan(a[tuple(finish_slicing)])):\n break\n finish += 1\n if start >= finish:\n return None\n if a.ndim > 2:\n mean_axes = tuple(range(a.ndim - 1))\n start_p = np.nanmean(a[tuple(start_slicing)], axis = mean_axes)\n finish_p = np.nanmean(a[tuple(finish_slicing)], axis = mean_axes)\n else:\n start_p = a[start]\n finish_p = a[finish]\n\n return finish_p - start_p\n\n\ndef dot_product(a, b):\n \"\"\"Returns the dot product (scalar product) of the two vectors.\"\"\"\n return np.dot(a, b)\n\n\ndef dot_products(a, b):\n \"\"\"Returns the dot products of pairs of vectors; last axis covers element of a vector.\"\"\"\n return np.sum(a * b, axis = -1)\n\n\ndef cross_product(a, b):\n \"\"\"Returns the cross product (vector product) of the two vectors.\"\"\"\n return np.cross(a, b)\n\n\ndef naive_length(v):\n \"\"\"Returns the length of the vector assuming consistent units.\"\"\"\n return maths.sqrt(dot_product(v, v))\n\n\ndef naive_lengths(v):\n \"\"\"Returns the lengths of the vectors assuming consistent units.\"\"\"\n return np.sqrt(np.sum(v * v, axis = -1))\n\n\ndef naive_2d_length(v):\n \"\"\"Returns the length of the vector projected onto xy plane, assuming consistent units.\"\"\"\n return maths.sqrt(dot_product(v[0:2], v[0:2]))\n\n\ndef naive_2d_lengths(v):\n \"\"\"Returns the lengths of the vectors projected onto xy plane, assuming consistent units.\"\"\"\n v2d = v[..., :2]\n return np.sqrt(np.sum(v2d * v2d, axis = -1))\n\n\ndef unit_corrected_length(v, unit_conversion):\n \"\"\"Returns the length of the vector v after applying the unit_conversion factors.\"\"\"\n # unit_conversion might be [1.0, 1.0, 0.3048] to convert z from feet to metres, for example\n # or [3.28084, 3.28084, 1.0] to convert x and y from metres to feet\n converted = elemental_multiply(v, unit_conversion)\n return naive_length(converted)\n\n\ndef manhatten_distance(p1, p2):\n \"\"\"Returns the Manhattan distance between two points.\"\"\"\n return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1]) + abs(p2[2] - p1[2])\n\n\ndef manhattan_distance(p1, p2): # alternative spelling to above\n \"\"\"Returns the Manhattan distance between two points.\"\"\"\n return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1]) + abs(p2[2] - p1[2])\n\n\ndef radians_difference(a, b):\n \"\"\"Returns the angle between two vectors, in radians.\"\"\"\n\n return maths.acos(min(1.0, max(-1.0, dot_product(unit_vector(a), unit_vector(b)))))\n\n\ndef degrees_difference(a, b):\n \"\"\"Returns the angle between two vectors, in degrees.\"\"\"\n\n return degrees_from_radians(radians_difference(a, b))\n\n\ndef rotation_matrix_3d_axial(axis, angle):\n \"\"\"Retuns a rotation matrix which will rotate points about axis (0, 1, or 2) by angle in degrees.\"\"\"\n\n axis_a = (axis + 1) % 3\n axis_b = (axis_a + 1) % 3\n matrix = np.zeros((3, 3))\n matrix[axis, axis] = 1.0\n radians = radians_from_degrees(angle)\n cosine = maths.cos(radians)\n sine = maths.sin(radians)\n matrix[axis_a, axis_a] = cosine\n matrix[axis_b, axis_b] = cosine\n matrix[axis_a, axis_b] = -sine # left handed coordinate system, eg. UTM & depth\n matrix[axis_b, axis_a] = sine\n return matrix\n\n\ndef rotation_3d_matrix(xzy_axis_angles):\n matrix = np.zeros((3, 3))\n for axis in range(3):\n matrix[axis, axis] = 1.0\n for axis in range(3):\n matrix = np.dot(matrix, rotation_matrix_3d_axial(axis, xzy_axis_angles[axis]))\n return matrix\n\n\ndef rotate_vector(rotation_matrix, vector):\n \"\"\"Returns the rotated vector.\"\"\"\n\n return np.dot(rotation_matrix, vector)\n\n\ndef rotate_array(rotation_matrix, a):\n \"\"\"Returns a copy of array a with each vector rotated by the rotation matrix.\"\"\"\n\n s = a.shape\n return np.matmul(rotation_matrix, a.reshape(-1, 3).T).T.reshape(s)\n\n\ndef rotate_xyz_array_around_z_axis(a, target_xy_vector):\n \"\"\"Returns a copy of array a suitable for presenting a cross-section using the resulting x,z values.\n\n arguments:\n a (numpy float array of shape (..., 3)): the xyz points to be rotated\n target_xy_vector (2 (or 3) floats): a vector indicating which direction in source xy space will end up\n being mapped to the positive x axis in the returned data\n\n returns:\n numpy float array of same shape as a\n\n notes:\n if the input points of a lie in a vertical plane parallel to the target xy vector, then the resulting\n points will have constant y values; in general, a full rotation of the points is applied, so resulting\n y values will indicate distance 'into the page' for non-planar or unaligned data\n \"\"\"\n\n target_v = np.zeros(3)\n target_v[:2] = target_xy_vector[:2]\n rotation_angle = azimuth(target_v) - 90.0\n rotation_matrix = rotation_matrix_3d_axial(2, rotation_angle) # todo: check sign of rotation angle\n return rotate_array(rotation_matrix, a)\n\n\ndef unit_vector_from_azimuth_and_inclination(azimuth, inclination):\n \"\"\"Returns unit vector with compass bearing of azimuth and inclination off +z axis.\"\"\"\n\n matrix = rotation_3d_matrix((inclination, azimuth, 0.0))\n return rotate_vector(matrix, np.array((0.0, 0.0, 1.0)))\n\n\ndef tilt_3d_matrix(azimuth, dip):\n \"\"\"Returns a 3D rotation matrix for applying a dip in a certain azimuth.\n\n note:\n if azimuth is compass bearing in degrees, and dip is in degrees, the resulting matrix can be used\n to rotate xyz points where x values are eastings, y values are northings and z increases downwards\n \"\"\"\n\n matrix = rotation_matrix_3d_axial(2, -azimuth) # will yield rotation around z axis so azimuth goes north\n matrix = np.dot(matrix, rotation_matrix_3d_axial(0, dip)) # adjust for dip\n matrix = np.dot(matrix, rotation_matrix_3d_axial(2, azimuth)) # rotate azimuth back to original\n return matrix\n\n\ndef tilt_points(pivot_xyz, azimuth, dip, points):\n \"\"\"Modifies array of xyz points in situ to apply dip in direction of azimuth, about pivot point.\"\"\"\n\n # log.debug('pivot xyz: ' + str(pivot_xyz))\n matrix = tilt_3d_matrix(azimuth, dip)\n points_shape = points.shape\n points[:] -= pivot_xyz\n # log.debug('points shape: ' + str(points.shape))\n points[:] = np.matmul(matrix, points.reshape((-1, 3)).transpose()).transpose().reshape(points_shape)\n points[:] += pivot_xyz\n\n\ndef project_points_onto_plane(plane_xyz, normal_vector, points):\n \"\"\"Modifies array of xyz points in situ to project onto a plane defined by a point and normal vector.\"\"\"\n\n az = azimuth(normal_vector)\n incl = inclination(normal_vector)\n tilt_points(plane_xyz, az, -incl, points)\n points[..., 2] = plane_xyz[2]\n tilt_points(plane_xyz, az, incl, points)\n\n\ndef perspective_vector(xyz_box, view_axis, vanishing_distance, vector):\n mid_points = np.zeros(3)\n xyz_ranges = np.zeros(3)\n result = np.zeros(3)\n for axis in range(3):\n mid_points[axis] = 0.5 * (xyz_box[0, axis] + xyz_box[1, axis])\n xyz_ranges[axis] = xyz_box[1, axis] - xyz_box[0, axis]\n factor = 1.0 - (vector[view_axis] - xyz_box[0, view_axis]) / (vanishing_distance * (xyz_ranges[view_axis]))\n result[view_axis] = vector[view_axis]\n for axis in range(3):\n if axis == view_axis:\n continue\n result[axis] = mid_points[axis] + factor * (vector[axis] - mid_points[axis])\n return result\n\n\ndef determinant(a, b, c):\n \"\"\"Returns the determinant of the 3 x 3 matrix comprised of the 3 vectors.\"\"\"\n\n return (a[0] * b[1] * c[2] + a[1] * b[2] * c[0] + a[2] * b[0] * c[1] - a[2] * b[1] * c[0] - a[1] * b[0] * c[2] -\n a[0] * b[2] * c[1])\n\n\ndef determinant_3x3(a):\n \"\"\"Returns the determinant of the 3 x 3 matrix.\"\"\"\n\n return determinant(a[0], a[1], a[2])\n\n\ndef clockwise(a, b, c):\n \"\"\"Returns a +ve value if 2D points a,b,c are in clockwise order, 0.0 if in line, -ve for ccw.\n\n note:\n assumes positive y-axis is anticlockwise from positive x-axis\n \"\"\"\n\n return (c[0] - a[0]) * (b[1] - a[1]) - ((c[1] - a[1]) * (b[0] - a[0]))\n\n\ndef in_triangle(a, b, c, d):\n \"\"\"Returns True if point d lies wholly within the triangle pf ccw points a, b, c, projected onto xy plane.\n\n note:\n a, b & c must be sorted into anti-clockwise order before calling this function\n \"\"\"\n\n return clockwise(a, b, d) < 0.0 and clockwise(b, c, d) < 0.0 and clockwise(c, a, d) < 0.0\n\n\ndef in_triangle_edged(a, b, c, d):\n \"\"\"Returns True if d lies within or on the boudnary of triangle of ccw points a,b,c projected onto xy plane.\n\n note:\n a, b & c must be sorted into anti-clockwise order before calling this function\n \"\"\"\n\n return clockwise(a, b, d) <= 0.0 and clockwise(b, c, d) <= 0.0 and clockwise(c, a, d) <= 0.0\n\n\ndef in_circumcircle(a, b, c, d):\n \"\"\"Returns True if point d lies within the circumcircle pf ccw points a, b, c, projected onto xy plane.\n\n note:\n a, b & c must be sorted into anti-clockwise order before calling this function\n \"\"\"\n\n m = np.empty((3, 3))\n m[0, :2] = a[:2] - d[:2]\n m[1, :2] = b[:2] - d[:2]\n m[2, :2] = c[:2] - d[:2]\n m[:, 2] = (m[:, 0] * m[:, 0]) + (m[:, 1] * m[:, 1])\n return determinant_3x3(m) > 0.0\n\n\ndef point_distance_to_line_2d(p, l1, l2):\n \"\"\"Ignoring any z values, returns the xy distance of point p from line passing through l1 and l2.\"\"\"\n\n return (abs(p[0] * (l1[1] - l2[1]) + l1[0] * (l2[1] - p[1]) + l2[0] * (p[1] - l1[1])) / naive_2d_length(l2 - l1))\n\n\ndef isclose(a, b, tolerance = 1.0e-6):\n \"\"\"Returns True if the two points are extremely close to one another (ie.\n\n the same point).\n \"\"\"\n\n # return np.all(np.isclose(a, b, atol = tolerance))\n # cheap and cheerful alternative to thorough numpy version commented out above\n return np.max(np.abs(a - b)) <= tolerance\n\n\ndef is_close(a, b, tolerance = 1.0e-6):\n \"\"\"Returns True if the two points are extremely close to one another (ie.\n\n the same point).\n \"\"\"\n\n return isclose(a, b, tolerance = tolerance)\n\n\ndef point_distance_sqr_to_points_projected(p, points, projection):\n \"\"\"Returns an array of projected distances squared between p and points; projection is 'xy', 'xz' or 'yz'.\"\"\"\n\n if projection == 'xy':\n d = points[..., :2] - p[:2]\n elif projection == 'xz':\n d = points[..., 0:3:2] - p[0:3:2]\n elif projection == 'yz':\n d = points[..., 1:] - p[1:]\n else:\n raise ValueError(\"projection must be 'xy', 'xz' or 'yz'\")\n return np.sum(d * d, axis = -1)\n\n\ndef nearest_point_projected(p, points, projection):\n \"\"\"Returns the index into points array closest to point p; projection is 'xy', 'xz' or 'yz'.\"\"\"\n\n # note: in the case of equidistant points, the index of the 'first' point is returned\n\n d2 = point_distance_sqr_to_points_projected(p, points, projection)\n return np.unravel_index(np.nanargmin(d2), d2.shape)\n\n\ndef area_of_triangle(a, b, c):\n \"\"\"Returns the area of the triangle defined by three vertices.\"\"\"\n\n # uses Heron's formula\n la = naive_length(a - b)\n lb = naive_length(b - c)\n lc = naive_length(c - a)\n s = 0.5 * (la + lb + lc)\n return maths.sqrt(s * (s - la) * (s - lb) * (s - lc))\n\n\ndef area_of_triangles(p, t, xy_projection = False):\n \"\"\"Returns numpy array of areas of triangles, optionally when projected onto xy plane.\"\"\"\n\n # uses Heron's formula\n pt = p[t]\n if xy_projection:\n la = naive_2d_lengths(pt[:, 0, :] - pt[:, 1, :])\n lb = naive_2d_lengths(pt[:, 1, :] - pt[:, 2, :])\n lc = naive_2d_lengths(pt[:, 2, :] - pt[:, 0, :])\n else:\n la = naive_lengths(pt[:, 0, :] - pt[:, 1, :])\n lb = naive_lengths(pt[:, 1, :] - pt[:, 2, :])\n lc = naive_lengths(pt[:, 2, :] - pt[:, 0, :])\n s = 0.5 * (la + lb + lc)\n return np.sqrt(s * (s - la) * (s - lb) * (s - lc))\n\n\ndef clockwise_sorted_indices(p, b):\n \"\"\"Returns a clockwise sorted numpy list of indices b into the points p.\n\n note:\n this function is designed for preparing a list of points defining a convex polygon when projected in\n the xy plane, starting from a subset of the unsorted points; more specifically, it assumes that the\n mean of p (over axis 0) lies within the polygon and the clockwise ordering is relative to that mean point\n \"\"\"\n\n # note: this function currently assumes that the mean of points bp lies within the hull of bp\n # and that the points form a convex polygon from the perspective of the mean point\n assert p.ndim == 2 and len(p) >= 3\n centre = np.mean(p, axis = 0)\n hull_list = [] # list of azimuths and indices into p (axis 0)\n for i in b:\n azi = azimuth(p[i] - centre)\n hull_list.append((azi, i))\n return np.array([i for (_, i) in sorted(hull_list)], dtype = int)\n\n\n# end of vector_utilities module\n"
] |
[
[
"pandas.DataFrame",
"numpy.all",
"numpy.max",
"numpy.mean",
"pandas.isna",
"numpy.where",
"pandas.notna",
"pandas.read_csv",
"numpy.allclose",
"numpy.unique",
"numpy.arange",
"numpy.full",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.min",
"numpy.isnan",
"numpy.array",
"numpy.sum",
"pandas.isnull",
"numpy.shape",
"numpy.empty"
],
[
"numpy.nanmin",
"numpy.all",
"numpy.max",
"numpy.any",
"numpy.nanmean",
"numpy.negative",
"numpy.where",
"numpy.arange",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.logical_not",
"numpy.isnan",
"numpy.logical_or",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.ones",
"numpy.ma.masked_array",
"numpy.empty"
],
[
"numpy.nanmax",
"numpy.random.random",
"numpy.min",
"numpy.nanmin",
"numpy.stack",
"numpy.max",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.radians",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.abs",
"numpy.arctan",
"numpy.isnan",
"numpy.degrees",
"numpy.all",
"numpy.seterr",
"numpy.nanargmin",
"numpy.mean",
"numpy.where",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tkim135/scibert
|
[
"3da79bcfc0f6649a5adc8622aca7fa488f6eee26"
] |
[
"scibert/models/gpt_text_classifier.py"
] |
[
"from typing import Dict, Optional, List, Any\n\nimport torch\nimport torch.nn.functional as F\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn import util\nfrom allennlp.training.metrics import CategoricalAccuracy, F1Measure\nfrom overrides import overrides\nfrom scibert.models.text_classifier import TextClassifier\n\n\[email protected](\"gpt_text_classifier\")\nclass GptTextClassifier(TextClassifier):\n \"\"\"\n Implements a basic text classifier:\n 1) Embed tokens using `text_field_embedder`\n 2) Get the CLS token\n 3) Final feedforward layer\n\n Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.\n \"\"\"\n def __init__(self, vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n verbose_metrics: bool = False,\n dropout: float = 0.2,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None,\n ) -> None:\n super(TextClassifier, self).__init__(vocab, regularizer)\n\n self.text_field_embedder = text_field_embedder\n self.dropout = torch.nn.Dropout(dropout)\n self.num_classes = self.vocab.get_vocab_size(\"labels\")\n self.classifier_feedforward = torch.nn.Linear(self.text_field_embedder.get_output_dim() , self.num_classes)\n\n self.label_accuracy = CategoricalAccuracy()\n self.label_f1_metrics = {}\n\n self.verbose_metrics = verbose_metrics\n\n for i in range(self.num_classes):\n self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace=\"labels\")] = F1Measure(positive_label=i)\n self.loss = torch.nn.CrossEntropyLoss()\n\n initializer(self)\n\n @overrides\n def forward(self,\n text: Dict[str, torch.LongTensor],\n label: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n \"\"\"\n Parameters\n ----------\n text : Dict[str, torch.LongTensor]\n From a ``TextField``\n label : torch.IntTensor, optional (default = None)\n From a ``LabelField``\n metadata : ``List[Dict[str, Any]]``, optional, (default = None)\n Metadata containing the original tokenization of the premise and\n hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.\n Returns\n -------\n An output dictionary consisting of:\n label_logits : torch.FloatTensor\n A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.\n label_probs : torch.FloatTensor\n A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.\n loss : torch.FloatTensor, optional\n A scalar loss to be optimised.\n \"\"\"\n logits = self.text_field_embedder(text, gpt=True)\n class_probs = F.softmax(logits, dim=1)\n\n output_dict = {\"logits\": logits}\n if label is not None:\n loss = self.loss(logits.view(-1, self.num_classes), label.view(-1))\n output_dict[\"loss\"] = loss\n\n # compute F1 per label\n for i in range(self.num_classes):\n metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace=\"labels\")]\n metric(class_probs, label)\n self.label_accuracy(logits, label)\n return output_dict\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout",
"torch.nn.functional.softmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
f1tenth/ESweek2021_educationclassA3
|
[
"7a32bacdb7f3154a773d28b6b6abffdaa154a526"
] |
[
"03_GraphBasedPlanner/graph_ltpl/offline_graph/src/main_offline_callback.py"
] |
[
"import numpy as np\nimport configparser\nimport pickle\nimport os.path as osfuncs\nimport hashlib\nimport logging\n\n# custom modules\nimport graph_ltpl\n\n\ndef main_offline_callback(globtraj_param_path: str,\n graph_off_config_path: str,\n graph_store_path: str,\n graph_logging_path: str = None,\n graph_id: str = None,\n force_recalc=False) -> tuple:\n \"\"\"\n The main function to be called once for the offline graph setup. The function tries to load an existing GraphBase\n object. If the object is not existent or not valid for the current parameter set, a new one is created. In this case\n the following steps are executed:\n\n * Load global race line and map\n * Calculate variable step-size along track depending on straight and curve segments\n * Init new GraphBase class instance\n * Setup node sceletion (spread nodes on normal vectors along reference line)\n * Generate edges between nodes\n * Prune graph (e.g. remove dead end edges and associated paths)\n * Calculate costs for each path segment\n * Store graph for later executions\n\n :param globtraj_param_path: path pointing to data file holding all information about the global race line and map\n :param graph_off_config_path: path pointing to the config file specifying the offline graph behavior / generation\n :param graph_store_path: path pointing to location, where a copy of the setup graph should be stored/loaded\n :param graph_logging_path: path pointing to location where the graph should be stored together with the logs\n :param graph_id: unique graph identifier retrieved from a (eventually) loaded graph\n :param force_recalc: flag, if set to \"True\" a new graph is calculated instead of loading from file\n :returns:\n * **graph_base** - reference to the GraphBase object instance holding all graph relevant information\n * **new_base_generated** - status, whether a new graph base has been generated or not\n\n :Authors:\n * Tim Stahl <[email protected]>\n\n :Created on:\n 28.09.2018\n\n \"\"\"\n\n # ------------------------------------------------------------------------------------------------------------------\n # SETUP GRAPH ------------------------------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------------------------------------------\n\n new_base_generated = False\n graph_base = None\n # get the MD5-hash of all config files (fused together, since we want to recalculate whenever any file changed)\n calculated_md5 = md5(globtraj_param_path) + md5(graph_off_config_path)\n\n # If legible, load graph from file (else generate)\n if not force_recalc and osfuncs.isfile(graph_store_path):\n f = open(graph_store_path, 'rb')\n graph_base = pickle.load(f)\n f.close()\n logging.getLogger(\"local_trajectory_logger\").debug(\"Loaded database with \" + str(len(graph_base.get_nodes()))\n + \" node and \" + str(len(graph_base.get_edges()))\n + \" edges from file...\")\n\n if force_recalc or graph_base is None or calculated_md5 != graph_base.md5_params:\n new_base_generated = True\n if force_recalc:\n print(\"Manually forced recalculation of graph! Skipped graph import from file!\")\n if graph_base is not None and calculated_md5 is not graph_base.md5_params:\n print(\"MD5-Sum of any param-file does not match the one in the graph object! Triggered recalculation!\")\n\n # load graph configuration\n graph_config = configparser.ConfigParser()\n if not graph_config.read(graph_off_config_path):\n raise ValueError('Specified graph config file does not exist or is empty!')\n\n # load data from csv files\n refline, t_width_right, t_width_left, normvec_normalized, alpha, length_rl, vel_rl, kappa_rl \\\n = graph_ltpl.imp_global_traj.src.import_globtraj_csv.import_globtraj_csv(import_path=globtraj_param_path)\n\n # calculate closed race line parameters\n # s, x, y, kappa, vel\n s = np.concatenate(([0], np.cumsum(length_rl)))\n xy = refline + normvec_normalized * alpha[:, np.newaxis]\n raceline_params = np.column_stack((xy, kappa_rl, vel_rl))\n\n # determine if track is closed or unclosed (check if end and start-point are close together)\n closed = (np.hypot(xy[0, 0] - xy[-1, 0], xy[0, 1] - xy[-1, 1])\n < graph_config.getfloat('LATTICE', 'closure_detection_dist'))\n if closed:\n logging.getLogger(\"local_trajectory_logger\").debug(\"Input line is interpreted as closed track!\")\n\n # close line\n glob_rl = np.column_stack((s, np.vstack((raceline_params, raceline_params[0, :]))))\n else:\n logging.getLogger(\"local_trajectory_logger\").debug(\"Input line is interpreted as _unclosed_ track!\")\n glob_rl = np.column_stack((s[:-1], raceline_params))\n\n # based on curvature get index array for selection of normal vectors and corresponding raceline parameters\n idx_array = graph_ltpl.imp_global_traj.src.variable_step_size. \\\n variable_step_size(kappa=kappa_rl,\n dist=length_rl,\n d_curve=graph_config.getfloat('LATTICE', 'lon_curve_step'),\n d_straight=graph_config.getfloat('LATTICE', 'lon_straight_step'),\n curve_th=graph_config.getfloat('LATTICE', 'curve_thr'),\n force_last=not closed)\n\n # extract values at determined positions\n refline = refline[idx_array, :]\n t_width_right = t_width_right[idx_array]\n t_width_left = t_width_left[idx_array]\n normvec_normalized = normvec_normalized[idx_array]\n alpha = alpha[idx_array]\n vel_rl = vel_rl[idx_array]\n s_raceline = s[idx_array]\n\n length_rl_tmp = []\n for idx_from, idx_to in zip(idx_array[:-1], idx_array[1:]):\n length_rl_tmp.append(np.sum(length_rl[idx_from:idx_to]))\n\n length_rl_tmp.append(0.0)\n length_rl = list(length_rl_tmp)\n\n # init graph base object\n graph_base = graph_ltpl.data_objects.GraphBase.\\\n GraphBase(lat_offset=graph_config.getfloat('LATTICE', 'lat_offset'),\n num_layers=np.size(alpha, axis=0),\n refline=refline,\n normvec_normalized=normvec_normalized,\n track_width_right=t_width_right,\n track_width_left=t_width_left,\n alpha=alpha,\n vel_raceline=vel_rl,\n s_raceline=s_raceline,\n lat_resolution=graph_config.getfloat('LATTICE', 'lat_resolution'),\n sampled_resolution=graph_config.getfloat('SAMPLING', 'stepsize_approx'),\n vel_decrease_lat=graph_config.getfloat('PLANNINGTARGET', 'vel_decrease_lat'),\n veh_width=graph_config.getfloat('VEHICLE', 'veh_width'),\n veh_length=graph_config.getfloat('VEHICLE', 'veh_length'),\n veh_turn=graph_config.getfloat('VEHICLE', 'veh_turn'),\n md5_params=calculated_md5,\n graph_id=graph_id,\n glob_rl=glob_rl,\n virt_goal_node=graph_config.getboolean('LATTICE', 'virt_goal_n'),\n virt_goal_node_cost=graph_config.getfloat('COST', 'w_virt_goal'),\n min_plan_horizon=graph_config.getfloat('PLANNINGTARGET', 'min_plan_horizon'),\n plan_horizon_mode=graph_config.get('PLANNINGTARGET', 'plan_horizon_mode'),\n closed=closed)\n\n # set up state space\n state_pos = graph_ltpl.offline_graph.src.gen_node_skeleton. \\\n gen_node_skeleton(graph_base=graph_base,\n length_raceline=length_rl,\n var_heading=graph_config.getboolean('LATTICE', 'variable_heading'))\n\n # convert to array of arrays\n state_pos_arr = np.empty(shape=(len(state_pos), 2), dtype=np.object)\n state_pos_arr[:] = state_pos\n\n # generate edges (polynomials and coordinate arrays)\n graph_ltpl.offline_graph.src.gen_edges.gen_edges(state_pos=state_pos_arr,\n graph_base=graph_base,\n stepsize_approx=graph_config.getfloat('SAMPLING',\n 'stepsize_approx'),\n min_vel_race=graph_config.getfloat('LATTICE', 'min_vel_race'),\n closed=closed)\n\n # prune graph (remove dead ends)\n graph_ltpl.offline_graph.src.prune_graph.prune_graph(graph_base=graph_base,\n closed=closed)\n\n # generate cost\n graph_ltpl.offline_graph.src.gen_offline_cost.gen_offline_cost(graph_base=graph_base,\n cost_config_path=graph_off_config_path)\n\n # declare initialization as finished and initialize original filter\n graph_base.init_filtering()\n\n # store graph for later use\n f = open(graph_store_path, 'wb')\n pickle.dump(graph_base, f)\n f.close()\n else:\n if graph_logging_path is not None:\n # if existing graph object is valid, adapt logging file name according to stored id in graph object\n graph_logging_path = (graph_logging_path[:graph_logging_path.rfind('/Graph_Objects/') + 15]\n + str(graph_base.graph_id) + \".pckl\")\n\n # log graph, if path provided and not existent\n if graph_logging_path is not None and not osfuncs.isfile(graph_logging_path):\n f = open(graph_logging_path, 'wb')\n pickle.dump(graph_base, f)\n f.close()\n\n return graph_base, new_base_generated\n\n\ndef md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n"
] |
[
[
"numpy.vstack",
"numpy.cumsum",
"numpy.size",
"numpy.column_stack",
"numpy.sum",
"numpy.hypot"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Top1Miami/ITMO_FS
|
[
"781b196219f54488fe17b9307694666221b5ddd0"
] |
[
"ITMO_FS/filters/multivariate/mimaga.py"
] |
[
"import numpy as np\nimport random\nfrom sklearn.metrics import f1_score\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\n\n\ndef marginal_entropy(x):\n x_counter = {xi: 0 for xi in x}\n for xi in x:\n x_counter[xi] += 1\n probs = np.array(list(map(lambda xi: x_counter[xi] / len(x), x)))\n nonzero_probs = probs[np.where(probs > 0)]\n entropy = -sum(nonzero_probs * np.log(nonzero_probs))\n return entropy\n\n\ndef conditional_entropy(x, y):\n y_counter = {yi: 0 for yi in y}\n x_by_y_counter = {yi: {xi: 0 for xi in x} for yi in y}\n for i in range(len(x)):\n xi = x[i]\n yi = y[i]\n y_counter[yi] += 1\n x_by_y_counter[yi][xi] += 1\n entropy = 0.\n for yi in y_counter.keys():\n x_yi = x_by_y_counter[yi].values()\n x_by_yi = np.array(list(map(lambda xi: xi / y_counter[yi], x_yi)))\n nonzero_probs = x_by_yi[np.where(x_by_yi > 0)]\n yi_entropy = -sum(nonzero_probs * np.log(nonzero_probs))\n entropy += (y_counter[yi] / len(y)) * yi_entropy\n return entropy\n\n\ndef mutual_information(x, y):\n return marginal_entropy(x) - conditional_entropy(x, y)\n\n\ndef genes_mutual_information(genes):\n \"\"\"\n :param genes: dataset\n :return: mutual information for every gene in dataset\n \"\"\"\n g_num, _ = genes.shape # number of features\n mi_matrix = np.zeros((g_num, g_num))\n for i in range(g_num):\n for j in range(g_num):\n if i != j:\n mi_matrix[i][j] = mutual_information(genes[i], genes[j])\n mi_vector = [sum(mi_matrix[i]) for i in range(g_num)]\n return mi_vector\n\n\ndef decode_genes(mapping, chromosome, train, test):\n \"\"\"\n :param chromosome: binary vector of feature presence\n :param train: train set of initial dataset\n :param test: test set of initial dataset\n :return: decoded train and test sets (reduced)\n \"\"\"\n filtered_train, filtered_test = [], []\n for i in range(len(chromosome)):\n if chromosome[i] == 1:\n initial_index = mapping[i]\n filtered_train.append(train[initial_index])\n filtered_test.append(test[initial_index])\n return np.array(filtered_train), np.array(filtered_test)\n\n\ndef population_fitness(mapping, population, train, train_cl, test, test_cl):\n \"\"\"\n :param population: vector of chromosomes\n :return: vector of (chromosome code, chromosome fitness), max fitness, average fitness\n \"\"\"\n code_fitness = []\n f_sum = 0\n for i in range(len(population)):\n filtered_train, filtered_test = decode_genes(mapping, population[i], train, test)\n clf = make_pipeline(StandardScaler(), SVC(gamma='auto'))\n if len(filtered_train) == 0:\n continue\n clf.fit(filtered_train.transpose(), train_cl)\n predicted_classes = clf.predict(filtered_test.transpose())\n f = f1_score(test_cl, predicted_classes)\n code_fitness.append((population[i], f))\n f_sum += f\n code_fitness.sort(key=lambda p: p[1], reverse=True)\n f_max = code_fitness[0][1]\n f_avg = f_sum / len(population)\n return code_fitness, f_max, f_avg\n\n\ndef crossover(x, y):\n \"\"\" simple one-point crossover \"\"\"\n random_point = random.randint(1, len(x) - 1)\n return x[0:random_point] + y[random_point:len(x)], \\\n y[0:random_point] + x[random_point:len(x)]\n\n\ndef mutation(x):\n \"\"\" simple one-bit-inverse mutation \"\"\"\n random_point = random.randint(0, len(x) - 1)\n x[random_point] = (x[random_point] - 1) % 2\n return x\n\n\ndef cross_and_mutate(pc, pm, population):\n \"\"\"\n :param pc: crossover probability\n :param pm: mutation probability\n :param population: (chromosome code, chromosome fitness) pairs\n :return: (new population, maximum parents' fitness) pair\n \"\"\"\n cross_number = int(pc * len(population))\n mutate_number = int(pm * len(population))\n max_parent_f = 0\n new_population = list(map(lambda x: x[0], population))\n for i in range(cross_number):\n parent1, f1 = population[random.randint(0, len(population) - 1)]\n parent2, f2 = population[random.randint(0, len(population) - 1)]\n child1, child2 = crossover(parent1, parent2)\n new_population.extend([child1, child2])\n max_parent_f = max([max_parent_f, f1, f2])\n for i in range(mutate_number):\n mutant = mutation(population[random.randint(0, len(population) - 1)][0])\n new_population.append(mutant)\n return new_population, max_parent_f\n\n\nclass MIMAGA(object):\n\n def __init__(self, mim_size, pop_size, max_iter, f_target, k1, k2, k3, k4):\n \"\"\"\n :param mim_size: desirable number of filtered features after MIM\n :param pop_size: initial population size\n :param max_iter: maximum number of iterations in algorithm\n :param f_target: desirable fitness value\n :param k1: consts to determine crossover probability\n :param k2: consts to determine crossover probability\n :param k3: consts to determine mutation probability\n :param k4: consts to determine mutation probability\n \"\"\"\n self._mim_size = mim_size\n self._pop_size = pop_size\n self._max_iter = max_iter\n self._f_target = f_target\n self._k1 = k1\n self._k2 = k2\n self._k3 = k3\n self._k4 = k4\n\n # MIM\n\n def _mim_filter(self, genes):\n \"\"\"\n :param genes: initial dataset\n :return: sequence of feature indexes with minimum MI\n \"\"\"\n g_num, _ = genes.shape\n mi_vector = genes_mutual_information(genes)\n seq_nums = [i for i in range(g_num)]\n target_sequence = list(map(lambda p: p[1], sorted(zip(mi_vector, seq_nums))))[:self.mim_size]\n return target_sequence\n\n # AGA\n def _initial_population(self):\n \"\"\"\n :return: initial population\n P.S. each individual corresponds to chromosome\n \"\"\"\n population = []\n for _ in range(self._pop_size):\n individual_num = random.randint(1, 2 << self._mim_size - 1)\n individual_code = list(map(int, bin(individual_num)[2:].zfill(self._mim_size)))\n population.append(individual_code)\n return population\n\n def _crossover_probability(self, f_max, f_avg, f_par):\n \"\"\" probability of crossover in population \"\"\"\n if f_par >= f_avg:\n return self._k1 * ((f_max - f_par) / (f_max - f_avg)) \\\n if f_max != f_avg else 1\n else:\n return self._k2\n\n def _mutation_probability(self, f_max, f_avg, f_par):\n \"\"\" probability of mutation in population \"\"\"\n if f_par >= f_avg:\n return self._k3 * ((f_max - f_par) / (f_max - f_avg)) \\\n if f_max != f_avg else 1\n else:\n return self._k4\n\n def _aga_filter(self, max_size, mapping, population, train, train_cl, test, test_cl):\n \"\"\"\n :param max_size: maximum size of population (if population becomes bigger,\n the worst individuals are killed)\n :param mapping: mapping from mim-filter index to initial index in dataset\n :param population: vector of chromosomes\n :param train: train set of initial dataset\n :param train_cl: class distribution of initial train dataset\n :param test: test set of initial dataset\n :param test_cl: class distribution of initial test dataset\n :return: best individual (sequence of features), it's fitness value\n \"\"\"\n f_par = f_max = 0\n counter = 0\n best_individual = [1 for _ in range(len(population[0]))]\n while counter < self._max_iter and f_max < self._f_target:\n code_fitness, f_max, f_avg = population_fitness(mapping, population, train, train_cl, test, test_cl)\n if len(code_fitness) > max_size:\n code_fitness = code_fitness[:max_size]\n population = list(map(lambda x: x[0], code_fitness))\n\n highly_fitted = list(filter(lambda x: x[1] >= f_max / 2, code_fitness))\n if len(highly_fitted) == 0:\n highly_fitted = code_fitness\n best_individual = code_fitness[0][0]\n\n pc = self._crossover_probability(f_max, f_avg, f_par)\n pm = self._mutation_probability(f_max, f_avg, f_par)\n new_generation, f_par = cross_and_mutate(pc, pm, highly_fitted)\n population = population + new_generation\n counter += 1\n return best_individual, f_max\n\n def mimaga_filter(self, genes, classes):\n \"\"\"\n The main function to run algorithm\n :param genes: initial dataset in format: features are rows, samples are columns\n :param classes: distribution pf initial dataset\n :return: filtered with MIMAGA dataset, fitness value\n \"\"\"\n genes_T = genes.transpose()\n train_set, test_set, train_classes, test_classes = train_test_split(genes_T, classes, test_size=0.33)\n filtered_indexes = self._mim_filter(train_set.transpose())\n index_map = dict(zip([i for i in range(self._mim_size)], filtered_indexes))\n\n first_population = self._initial_population()\n best, max_fitness = self._aga_filter(self._pop_size * 2, index_map, first_population,\n train_set.transpose(), train_classes, test_set.transpose(), test_classes)\n result_genes, _ = decode_genes(index_map, best, train_set.transpose(), test_set.transpose())\n return result_genes, max_fitness\n\n\n# mimaga = MIMAGA(30, 20, 20, 0.8, 0.6, 0.3, 0.9, 0.001)\n# res_dataset, fitness = mimaga.mimaga_filter(dataset, distribution)\n"
] |
[
[
"numpy.log",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
reasonsolo/MTCNN-Tensorflow
|
[
"a6c06439b0d42e72c05510631f99fad2a4067131"
] |
[
"prepare_data/gen_landmark_helen.py"
] |
[
"from load_dataset import load_helen_annotation, load_muct_annotation\nfrom BBox_utils import BBox, IoU\nfrom Landmark_utils import rotate\nimport sys\nsys.path.append('../')\nfrom train_models.MTCNN_config import config\nimport cv2\nimport os\nimport random\nimport numpy as np\n\n\nLANDMARK_LEN = config.LANDMARK_SIZE * 2\n\nNETS_IMG_SIZE = config.IMAGE_SIZES\nRANDOM_SHIFT_TIMES = 50\n\nIOU_POS = 0.65\nIOU_NEG = 0.3\n\nBASE_LANDMARK_DIR = 'train_%s_landmark'\nBASE_LANDMARK_FILE = 'landmark_%s.txt'\nBASE_IMG_DIR = 'train_%s_landmark'\n\ndef generate_data(anno_dir, image_dir, net, load_annotation):\n size = NETS_IMG_SIZE[net]\n\n f_imgs = []\n f_landmarks = []\n img_count= 0\n for image_name, box_corner, landmarks_gt in load_annotation(anno_dir):\n if len(landmarks_gt) != config.LANDMARK_SIZE:\n print('invalid landmakr size %d file %s' % (len(landmarks_gt), image_name))\n continue\n image_path = os.path.join(image_dir, \"%s.jpg\" % image_name)\n img = cv2.imread(image_path)\n if img is None:\n continue\n # print('transform image %s' % image_path + 'landmarks len %d' % len(landmarks_gt))\n img_h, img_w, img_c = img.shape\n bbox = BBox(box_corner)\n gt_box = np.array([bbox.left, bbox.top, bbox.right, bbox.bottom])\n face = img[bbox.top:bbox.bottom+1, bbox.left:bbox.right+1]\n try:\n face = cv2.resize(face, (size, size))\n except Exception as ex:\n print(\"canno resize file %s\" % image_path)\n\n # normalized landmark in (0, 1)\n f_landmark = np.zeros((len(landmarks_gt), 2))\n for i, lm in enumerate(landmarks_gt):\n rv = ((lm[0] - gt_box[0]) / (gt_box[2] - gt_box[0]),\n (lm[1] - gt_box[1]) / (gt_box[3] - gt_box[1]))\n f_landmark[i] = rv\n\n f_imgs.append(face)\n f_landmarks.append(f_landmark.reshape(np.prod(f_landmark.shape)))\n img_count += 1\n if img_count % 100 == 0:\n print(\"%d images done\" % img_count)\n x1, y1, x2, y2 = gt_box\n gt_w = x2 - x1 + 1\n gt_h = y2 - y1 + 1\n\n if max(gt_w, gt_h) < 40 or x1 < 0 or y1 < 0:\n continue\n\n for i in range(RANDOM_SHIFT_TIMES):\n bbox_size = np.random.randint(int(min(gt_w, gt_h) * 0.8),\n np.ceil(1.25 * max(gt_w, gt_h)))\n delta_x = np.random.randint(-gt_w * 0.2, gt_w * 0.2)\n delta_y = np.random.randint(-gt_h * 0.2, gt_h * 0.2)\n\n nx1 = int(max(x1 + gt_w / 2 - bbox_size / 2 + delta_x, 0))\n ny1 = int(max(y1 + gt_h / 2 - bbox_size / 2 + delta_y, 0))\n\n nx2 = nx1 + bbox_size\n ny2 = ny1 + bbox_size\n if nx2 > img_w or ny2 > img_h:\n continue\n crop_box = np.array([nx1, ny1, nx2, ny2])\n # print([nx1, ny1, nx2, ny2])\n cropped_img = img[ny1:ny2+1, nx1:nx2+1, :]\n resized_img = cv2.resize(cropped_img, (size, size))\n #cal iou\n iou = IoU(crop_box, np.expand_dims(gt_box,0))\n\n if iou > IOU_POS:\n f_landmark = np.zeros((len(landmarks_gt), 2))\n for j, lm in enumerate(landmarks_gt):\n rv = ((lm[0] - nx1) / bbox_size, (lm[1] - ny1) / bbox_size)\n f_landmark[j] = rv\n\n shifted_landmark = f_landmark.copy()\n f_landmarks.append(f_landmark)\n f_imgs.append(resized_img)\n bbox = BBox([nx1, ny1, nx2, ny2])\n\n #print('shifted landmark shape %s' % str(shifted_landmark.shape))\n\n # rotate image and landmark\n rotate_alpha = random.choice([-1, 1]) * np.random.randint(5, 10)\n rotated_face, rotated_landmark = rotate(img, bbox,\n bbox.reprojectLandmark(shifted_landmark),\n rotate_alpha)\n rotated_landmark = bbox.projectLandmark(rotated_landmark)\n if np.sum(np.where(rotated_landmark < 0, 1, 0)) > 0:\n continue\n if np.sum(np.where(rotated_landmark > 1, 1, 0)) > 0:\n continue\n # print('rotated_landmark %s' % str(rotated_landmark))\n rotated_cropped_img = cv2.resize(rotated_face, (size, size))\n f_imgs.append(rotated_cropped_img)\n f_landmarks.append(rotated_landmark)\n\n np_imgs, np_landmarks = np.asarray(f_imgs), np.asarray(f_landmarks)\n print('np_imgs shape %s, np_landmarks shape %s' % (np_imgs.shape, np_landmarks.shape))\n # print(np_landmarks)\n\n output_dir = net\n landmark_dir = os.path.join(output_dir, BASE_LANDMARK_DIR % net)\n landmark_file = os.path.join(output_dir, BASE_LANDMARK_FILE % net)\n img_dir = os.path.join(output_dir, BASE_IMG_DIR % net)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n if not os.path.exists(landmark_dir):\n os.mkdir(landmark_dir)\n if not os.path.exists(img_dir):\n os.mkdir(img_dir)\n print('writing to landmark %s' % landmark_file)\n with open(landmark_file, 'w') as f:\n img_count = 0\n for i, img in enumerate(np_imgs):\n if np.sum(np.where(np_landmarks[i] <= 0, 1, 0)) > 0:\n continue\n if np.sum(np.where(np_landmarks[i] >= 1, 1, 0)) > 0:\n continue\n img_count += 1\n img_file_path = os.path.join(img_dir, \"%d.jpg\" % (img_count))\n cv2.imwrite(img_file_path, img)\n flattened_landmark = map(str, list(np_landmarks[i].reshape(np.prod(np_landmarks[i].shape))))\n f.write(\" \".join([img_file_path, \"-2\"] + flattened_landmark))\n f.write(\"\\n\")\n print('total img %d' % img_count)\n\nif __name__ == '__main__':\n net = sys.argv[1]\n dataset = sys.argv[2]\n if dataset == 'helen':\n generate_data('helen/annotation','helen/image', net, load_helen_annotation)\n elif dataset == 'muct':\n generate_data('muct/muct76-opencv.csv', 'muct/image', net, load_muct_annotation)\n\n"
] |
[
[
"numpy.expand_dims",
"numpy.asarray",
"numpy.prod",
"numpy.array",
"numpy.where",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mehrad0711/HUBERT
|
[
"2f13fd2f7f5a2ec13544f4007158b582ae7408c3",
"2f13fd2f7f5a2ec13544f4007158b582ae7408c3"
] |
[
"run_visual.py",
"utils/prediction.py"
] |
[
"import os\nimport argparse\nimport json\nimport logging\nfrom collections import defaultdict, Counter, OrderedDict\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom utils.global_vars import POS_TAGS_MAP\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n elif v.lower() in ['true', '1']:\n return True\n elif v.lower() in ['false', '0']:\n return False\n else:\n raise argparse.ArgumentTypeError('Unable to parse the argument')\n\ndef run(args, tag_type, target_tag):\n\n role2token_targeted = defaultdict(list)\n\n tag2role = defaultdict(list)\n role2tag = defaultdict(list)\n\n with open(args.input_file) as fin:\n lines = json.load(fin)\n\n if tag_type == 'const_parse_path':\n tag2idx = defaultdict(int)\n\n for id, line in lines.items():\n flag = False\n try:\n tags = line[tag_type]\n except:\n logger.error('Specified file does not contain {} information'.format(tag_type))\n logger.error('Skipping this tag and continuing the process...')\n break\n if args.role_type == 'structure':\n all_roles = line['all_aRs']\n else:\n all_roles = line['all_aFs']\n\n if tag_type == 'dep_edge':\n dep_tokens = line['dep_parse_tokens']\n orig_tokens = line['tokens']\n\n # find mapping between dep and orig tokens\n indices = []\n for tok in dep_tokens:\n try:\n indices.append(orig_tokens.index(tok))\n except:\n logger.error('Failing to find token: {} in sentence: {}'.format(tok, orig_tokens))\n flag = True\n break\n if flag:\n continue\n all_roles = [all_roles[i] for i in indices]\n tokens = dep_tokens\n else:\n tokens = line['tokens']\n\n assert len(tags) == len(all_roles) == len(tokens)\n for tag, role, token in zip(tags, all_roles, tokens):\n if tag == target_tag:\n role2token_targeted[str(role)].append(token)\n if tag == '[SEP]':\n continue\n if tag_type == 'const_parse_path':\n tag = tuple(tag)\n if tag not in tag2idx.keys():\n tag2idx[tag] = len(tag2idx.keys())\n tag2role[str(tag2idx[tag])].append(str(role))\n role2tag[str(role)].append(str(tag2idx[tag]))\n else:\n tag2role[str(tag)].append(str(role))\n role2tag[str(role)].append(str(tag))\n\n\n tag2role_mostcommon = defaultdict()\n role2tag_mostcommon = defaultdict()\n\n for tag, role in tag2role.items():\n tag2role[tag] = Counter(role)\n tag2role_mostcommon[tag] = tag2role[tag].most_common(1)[0][0]\n\n for role, tag in role2tag.items():\n role2tag[role] = Counter(tag)\n role2tag_mostcommon[role] = role2tag[role].most_common(1)[0][0]\n\n # merge close tags\n if args.merge_tags and tag_type == 'pos_tags':\n prev_tag2role = tag2role.copy()\n tag2role = defaultdict(Counter)\n for k, v in POS_TAGS_MAP.items():\n for val in v:\n tag2role[k].update(prev_tag2role[val])\n\n if args.prune:\n prev_tag2role = tag2role.copy()\n tag2role = defaultdict(Counter)\n for tag, role in prev_tag2role.items():\n tag2role[tag] = Counter({k: v for k, v in role.items() if v > args.threshold})\n\n prev_tag2role = tag2role.copy()\n tag2role = defaultdict(Counter)\n tag2sum = dict()\n for tag, role in prev_tag2role.items():\n summ = sum(role.values())\n tag2sum[tag] = summ\n if summ != 0:\n if args.normalize:\n tag2role[tag] = Counter({k: float(v)/summ for k, v in role.items()})\n else:\n tag2role[tag] = Counter({k: float(v) for k, v in role.items()})\n\n num_roles = len(role2tag.keys())\n num_tags = len(tag2role.keys())\n\n # color the bars\n ROLES = list(role2tag.keys())\n TAGS = list(tag2role.keys())\n\n # Values of each group\n all_bars = [[None]*num_tags for _ in range(num_roles)]\n\n TAGS_sorted = sorted(TAGS, key=lambda t: tag2sum[t], reverse=True)\n ROLES_sorted_index = sorted(range(num_roles), key=lambda i: sum([tag2role[t][ROLES[i]] for t in TAGS_sorted]), reverse=True)\n ROLES_sorted = [ROLES[i] for i in ROLES_sorted_index]\n\n for i, role in enumerate(ROLES_sorted):\n for j, tag in enumerate(TAGS_sorted):\n all_bars[i][j] = tag2role[tag][role]\n\n # Heights\n all_bars = np.array(all_bars)\n all_bars_cumsum = np.cumsum(all_bars, axis=0).tolist()\n\n # Names of group and bar width\n barWidth = 0.8\n pal = sns.color_palette(\"Set1\", n_colors=num_roles)\n pal_sorted = [pal[i] for i in ROLES_sorted_index]\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax2 = ax1.twiny()\n\n for i in range(len(all_bars)):\n if i==0:\n bottom = None\n else:\n bottom = all_bars_cumsum[i-1]\n top = all_bars[i]\n ax1.bar(range(num_tags), height=top, bottom=bottom, color=pal_sorted[i], edgecolor='white', width=barWidth)\n\n new_roles = [r for r in ROLES_sorted if r in role2token_targeted.keys()]\n\n if target_tag:\n updated_output_text_file = \"_{}.\".format(target_tag).join(args.output_text_file.rsplit('.', 1))\n with open(updated_output_text_file, 'w') as f_out:\n for role in new_roles:\n f_out.write('num of roles for {} is {}'.format(role, tag2role[target_tag][role]))\n f_out.write(str(set(role2token_targeted[role])))\n f_out.write('\\n\\n')\n\n # Custom X axis\n ax1.set_xticks(range(num_tags))\n ax1.set_xticklabels(TAGS_sorted, fontsize=7.5, rotation='vertical')\n ax1.set_xlabel(tag_type)\n\n ax2.set_xlim(ax1.get_xlim())\n ax2.set_xticks(range(num_tags))\n sum_vals = [str(tag2sum[tag]) for tag in TAGS_sorted]\n ax2.set_xticklabels(sum_vals, fontsize=7.5, rotation='vertical')\n ax2.set_xlabel(\"Number of Roles\")\n\n plt.ylabel(\"Role Frequency\")\n\n # Show graphic\n plt.tight_layout()\n plt.subplots_adjust(bottom=0.15)\n updated_output_plot_file = \"_{}.\".format(tag_type).join(args.output_plot_file.rsplit('.', 1))\n plt.savefig(updated_output_plot_file)\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_file', default='./results/MNLI/tpr_attention.txt', type=str)\n parser.add_argument('--output_plot_file', default='./plot.png', type=str)\n parser.add_argument('--tag_type', default='pos_tags', choices=['all', 'pos_tags', 'ner_tags', 'dep_edge', 'const_parse_path', 'tree_depth'], type=str)\n parser.add_argument('--role_type', default='structure', choices=['structure', 'semantic'], type=str)\n parser.add_argument('--merge_tags', default=False, type=str2bool, help='merge similar tags')\n parser.add_argument('--prune', default=False, type=str2bool, help='prune low frequency values')\n parser.add_argument('--threshold', default=0.0, type=float, help='cutoff value for pruning')\n parser.add_argument('--normalize', default=False, type=str2bool, help='normalize number of roles for each tag')\n parser.add_argument('--target_tag', default=[], type=str, help='target tag to generate roles for', nargs='*')\n parser.add_argument('--output_text_file', default='./role2tokens.txt', type=str)\n\n args = parser.parse_args()\n\n if not os.path.exists('./workdir/log_eval/'):\n os.makedirs('./workdir/log_eval/', exist_ok=True)\n\n if len(args.target_tag) !=0 and len(args.target_tag) != 5 and args.tag_type == 'all':\n raise ValueError('When tag_type is set to all you must provide either to target_tags or one for each tag type')\n\n all_tag_types = ['pos_tags', 'ner_tags', 'dep_edge', 'const_parse_path', 'tree_depth']\n if args.tag_type == 'all':\n for i in range(len(all_tag_types)):\n tag_type = all_tag_types[i]\n logger.info('Processing tag type: {}'.format(tag_type))\n target_tag = None\n if len(args.target_tag):\n target_tag = args.target_tag[i].strip(',.: ')\n logger.info('Target tag is: {}'.format(target_tag))\n run(args, tag_type, target_tag)\n else:\n logger.info('Processing tag type: {}'.format(args.tag_type))\n logger.info('Target tag is: {}'.format(args.target_tag))\n run(args, args.tag_type, args.target_tag)\n logger.info('*** Process is completed! ***')",
"import logging\r\nimport numpy as np\r\nimport torch\r\nfrom tqdm import tqdm\r\nfrom utils.metrics import class_acc, reg_acc\r\n\r\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\r\n datefmt='%m/%d/%Y %H:%M:%S',\r\n level=logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\ndef predict(args, model, test_dataloader, all_guids, device, task_type):\r\n\r\n test_loss, test_accuracy = 0, 0\r\n nb_test_steps, nb_test_examples = 0, 0\r\n\r\n all_ids = []\r\n all_predictions = []\r\n\r\n for data in tqdm(test_dataloader, desc=\"predicting\"):\r\n input_ids, input_mask, segment_ids, sub_word_masks = data[:4]\r\n input_ids = input_ids.to(device)\r\n input_mask = input_mask.to(device)\r\n segment_ids = segment_ids.to(device)\r\n sub_word_masks = sub_word_masks.to(device)\r\n\r\n if args.task_name.lower() == 'snli':\r\n label_ids = data[4]\r\n label_ids = label_ids.to(device)\r\n\r\n with torch.no_grad():\r\n logits, _, _ = model(input_ids, segment_ids, input_mask, sub_word_masks)\r\n\r\n logits = logits.detach().cpu().numpy()\r\n\r\n if task_type != 1:\r\n predictions = np.argmax(logits, axis=1)\r\n else:\r\n predictions = logits\r\n nb_test_examples += input_ids.size(0)\r\n\r\n if args.task_name.lower() == 'snli':\r\n label_ids = label_ids.to('cpu').numpy()\r\n if task_type == 0:\r\n tmp_test_accuracy = class_acc(logits, label_ids)\r\n else:\r\n tmp_test_accuracy = reg_acc(logits, label_ids)\r\n test_accuracy += tmp_test_accuracy\r\n\r\n nb_test_steps += 1\r\n all_predictions.extend(predictions.flatten().tolist())\r\n\r\n test_accuracy = test_accuracy / nb_test_examples\r\n if args.task_name.lower() == 'hans':\r\n all_ids = all_guids\r\n else:\r\n all_ids = list(range(len(all_predictions)))\r\n result = {'input_ids': all_ids,\r\n 'predictions': all_predictions,\r\n 'test_accuracy': test_accuracy\r\n }\r\n\r\n return result"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"numpy.cumsum",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.argmax",
"torch.no_grad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PanchoVarallo/Schafkopf-Application
|
[
"98e58f69cadfeb63c13068320df1e8ea96cd91fe"
] |
[
"schafkopf/database/queries.py"
] |
[
"import datetime\nimport logging\nfrom typing import Union, List, Optional, Tuple\n\nimport pandas as pd\nfrom sqlalchemy import literal\nfrom sqlalchemy.orm import sessionmaker\n\nfrom schafkopf.database.data_model import Teilnehmer, Runde, Punkteconfig, Einzelspiel, Resultat, Verdopplung, User\nfrom schafkopf.database.session import Sessions\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef get_teilnehmer_by_nachname_vorname(nachname: str, vorname: str, dataframe: bool = False,\n session: sessionmaker() = None) -> Union[List[Teilnehmer], pd.DataFrame]:\n actual_session = Sessions.get_session() if session is None else session\n query = actual_session.query(Teilnehmer).filter(Teilnehmer.vorname == vorname) \\\n .filter(Teilnehmer.nachname == nachname)\n teilnehmer = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return teilnehmer\n\n\ndef get_teilnehmer(dataframe: bool = False, session: sessionmaker() = None) -> Union[List[Teilnehmer], pd.DataFrame]:\n actual_session = Sessions.get_session() if session is None else session\n query = actual_session.query(Teilnehmer).order_by(Teilnehmer.nachname.asc(), Teilnehmer.vorname.asc())\n teilnehmer = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return teilnehmer\n\n\ndef get_teilnehmer_by_id(teilnehmer_id: Union[None, int], session: sessionmaker() = None) -> Union[None, Teilnehmer]:\n actual_session = _build_session(session)\n if teilnehmer_id is None:\n _close_session(actual_session, session)\n return None\n teilnehmer = actual_session.query(Teilnehmer).filter(Teilnehmer.id == teilnehmer_id).all()\n _close_session(actual_session, session)\n return teilnehmer[0]\n\n\ndef get_teilnehmers_by_ids(teilnehmer_ids: List[Union[None, int]],\n dataframe: bool = False,\n session: sessionmaker() = None) -> Union[List[Union[None, Teilnehmer]], pd.DataFrame]:\n actual_session = _build_session(session)\n query = actual_session.query(Teilnehmer).filter(Teilnehmer.id.in_(teilnehmer_ids))\n teilnehmers = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return teilnehmers\n\n\ndef get_teilnehmer_name_by_id(teilnehmer_id: Union[None, int], session: sessionmaker() = None) -> Union[None, str]:\n actual_session = _build_session(session)\n teilnehmer = get_teilnehmer_by_id(teilnehmer_id, actual_session)\n _close_session(actual_session, session)\n if teilnehmer is None:\n return None\n return teilnehmer.name\n\n\ndef get_teilnehmer_vorname_by_id(teilnehmer_id: Union[None, int], session: sessionmaker() = None) -> Union[None, str]:\n actual_session = _build_session(session)\n teilnehmer = get_teilnehmer_by_id(teilnehmer_id, actual_session)\n _close_session(actual_session, session)\n if teilnehmer is None:\n return None\n return teilnehmer.vorname\n\n\ndef get_runde_by_id(runde_id: Union[None, int], session: sessionmaker() = None) -> Union[None, Runde]:\n actual_session = _build_session(session)\n if runde_id is None:\n _close_session(actual_session, session)\n return None\n runde = actual_session.query(Runde).filter(Runde.id == runde_id).all()\n _close_session(actual_session, session)\n return runde[0]\n\n\ndef get_runden(active: bool = True, dataframe: bool = False,\n session: sessionmaker() = None) -> Union[List[Runde], pd.DataFrame]:\n actual_session = _build_session(session)\n if active:\n query = actual_session.query(Runde).filter(Runde.is_active == active).order_by(Runde.datum.asc())\n else:\n query = actual_session.query(Runde).order_by(Runde.created_on.asc())\n runden = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return runden\n\n\ndef get_resultate_by_einzelspiele_ids(einzelspiel_ids: List[int],\n dataframe: bool = False,\n session: sessionmaker() = None) -> Union[None, List[Resultat], pd.DataFrame]:\n actual_session = Sessions.get_session() if session is None else session\n query = actual_session.query(Resultat).filter(Resultat.einzelspiel_id.in_(einzelspiel_ids))\n resultate = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return resultate\n\n\ndef insert_resultat(teilnehmer_id: int,\n einzelspiel_id: int,\n augen: float,\n punkte: float,\n gewonnen: bool,\n session: sessionmaker() = None) -> Resultat:\n actual_session = _build_session(session)\n resultat = Resultat(teilnehmer_id=teilnehmer_id, einzelspiel_id=einzelspiel_id, augen=augen,\n punkte=punkte, gewonnen=gewonnen)\n actual_session.add(resultat)\n if session is None:\n actual_session.commit()\n actual_session.close()\n return resultat\n\n\ndef get_default_punkteconfig(session: sessionmaker() = None) -> Punkteconfig:\n actual_session = Sessions.get_session() if session is None else session\n query = actual_session.query(Punkteconfig).filter(Punkteconfig.name == 'sauspiel_config_plus_hochzeit')\n punkteconfig = query.all()[0]\n _close_session(actual_session, session)\n return punkteconfig\n\n\ndef get_punkteconfig_by_runde_id(runde_id: Union[None, int],\n session: sessionmaker() = None) -> Union[None, Punkteconfig]:\n actual_session = _build_session(session)\n if runde_id is None:\n _close_session(actual_session, session)\n return None\n runde = actual_session.query(Runde).filter(Runde.id == runde_id).all()\n punkteconfig = runde[0].punkteconfig\n _close_session(actual_session, session)\n return punkteconfig\n\n\ndef get_einzelspiele_by_einzelspiel_ids(einzelspiel_ids: List[int],\n active: bool = True,\n dataframe: bool = False,\n session: sessionmaker() = None) -> Union[None, List[Einzelspiel], pd.DataFrame]:\n actual_session = Sessions.get_session() if session is None else session\n if active:\n query = actual_session.query(Einzelspiel).filter(Einzelspiel.is_active == active) \\\n .filter(Einzelspiel.id.in_(einzelspiel_ids))\n else:\n query = actual_session.query(Einzelspiel).filter(Einzelspiel.id.in_(einzelspiel_ids))\n einzelspiele = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return einzelspiele\n\n\ndef inactivate_einzelspiel_by_einzelspiel_id(einzelspiel_id: int, session: sessionmaker() = None) -> bool:\n actual_session = Sessions.get_session() if session is None else session\n try:\n einzelspiel = actual_session.query(Einzelspiel).filter(Einzelspiel.id == einzelspiel_id).all()[0]\n einzelspiel.is_active = False\n actual_session.commit()\n except Exception:\n return False\n finally:\n _close_session(actual_session, session)\n return True\n\n\ndef get_einzelspiele_by_teilnehmer_ids(teilnehmer_ids: List[int],\n active: bool = True,\n dataframe: bool = False,\n session: sessionmaker() = None) -> Union[List[Einzelspiel], pd.DataFrame]:\n if len(teilnehmer_ids) == 0:\n if dataframe:\n return pd.DataFrame()\n else:\n return []\n actual_session = Sessions.get_session() if session is None else session\n query = actual_session.query(Einzelspiel)\n positionen = [Einzelspiel.geberhand_id, Einzelspiel.ausspieler_id,\n Einzelspiel.mittelhand_id, Einzelspiel.hinterhand_id]\n if active:\n query = query.filter(Einzelspiel.is_active == active)\n if len(teilnehmer_ids) <= 4:\n for t in teilnehmer_ids:\n query = query.filter(literal(t).in_(positionen))\n else:\n query = query.filter(Einzelspiel.geberhand_id.in_(teilnehmer_ids)) \\\n .filter(Einzelspiel.ausspieler_id.in_(teilnehmer_ids)) \\\n .filter(Einzelspiel.mittelhand_id.in_(teilnehmer_ids)) \\\n .filter(Einzelspiel.hinterhand_id.in_(teilnehmer_ids))\n einzelspiele = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return einzelspiele\n\n\ndef get_verdopplungen_by_einzelspiel_ids(einzelspiel_ids: List[int],\n dataframe: bool = False,\n session: sessionmaker() = None) -> Union[List[Verdopplung], pd.DataFrame]:\n actual_session = Sessions.get_session() if session is None else session\n query = actual_session.query(Verdopplung).filter(Verdopplung.einzelspiel_id.in_(einzelspiel_ids))\n verdopplungen = query.all() if not dataframe else pd.read_sql(query.statement, actual_session.bind)\n _close_session(actual_session, session)\n return verdopplungen\n\n\ndef get_einzelspiel_ids_by_runde_ids(runde_ids: List[int],\n active: bool = True,\n session: sessionmaker() = None) -> List[int]:\n actual_session = _build_session(session)\n if active:\n einzelspiel_ids = [e[0] for e in\n actual_session.query(Einzelspiel.id).filter(Einzelspiel.runde_id.in_(runde_ids)).filter(\n Einzelspiel.is_active == active).all()]\n else:\n einzelspiel_ids = [e[0] for e in\n actual_session.query(Einzelspiel.id).filter(Einzelspiel.runde_id.in_(runde_ids)).all()]\n _close_session(actual_session, session)\n return einzelspiel_ids\n\n\ndef get_runde_id_by_einzelspiel_id(einzelspiel_id: int,\n session: sessionmaker() = None) -> Union[int, None]:\n actual_session = Sessions.get_session() if session is None else session\n runde_id = actual_session.query(Einzelspiel.runde_id).filter(Einzelspiel.id == einzelspiel_id).all()\n runde_id = runde_id[0][0] if len(runde_id) == 1 else None\n _close_session(actual_session, session)\n return runde_id\n\n\ndef get_latest_einzelspiel_id(session: sessionmaker() = None) -> Union[None, int]:\n actual_session = _build_session(session)\n einzelspiel_id = actual_session.query(Einzelspiel.id) \\\n .filter(Einzelspiel.is_active == True) \\\n .join(Einzelspiel.runde) \\\n .filter(Runde.is_active == True) \\\n .order_by(Einzelspiel.id.desc()).limit(1).all()\n einzelspiel_id = einzelspiel_id[0][0] if len(einzelspiel_id) == 1 else None\n _close_session(actual_session, session)\n return einzelspiel_id\n\n\ndef get_latest_result(session: sessionmaker() = None) -> Union[None, pd.DataFrame]:\n actual_session = _build_session(session)\n einzelspiel_id = get_latest_einzelspiel_id()\n if einzelspiel_id is None:\n return None\n resultate = get_resultate_by_einzelspiele_ids([einzelspiel_id], dataframe=True)\n teilnehmer = get_teilnehmers_by_ids(resultate[\"teilnehmer_id\"].to_list(), dataframe=True)\n result = pd.merge(resultate, teilnehmer, left_on=[\"teilnehmer_id\"], right_on=[\"id\"])[[\"teilnehmer_id\", \"punkte\"]]\n _close_session(actual_session, session)\n return result\n\n\ndef get_users(session: sessionmaker() = None) -> Union[None, List[User]]:\n actual_session = _build_session(session)\n users = actual_session.query(User).all()\n _close_session(actual_session, session)\n return users\n\n\ndef insert_einzelspiel(runde_id: int, ansager_id: Union[None, int], geber_id: int, ausspieler_id: int,\n mittelhand_id: int, hinterhand_id: int, geberhand_id: int, spielpunkte: float,\n spielart: str, farbe: Union[None, str] = None, laufende: Union[None, int] = None,\n schneider: bool = False, schwarz: bool = False, partner_id: Union[None, int] = None,\n durchmarsch: Union[None, bool] = False, tout: Union[None, bool] = False,\n session: sessionmaker() = None) -> Einzelspiel:\n actual_session = _build_session(session)\n einzelspiel = Einzelspiel(runde_id=runde_id, ansager_id=ansager_id,\n partner_id=partner_id, geber_id=geber_id, ausspieler_id=ausspieler_id,\n mittelhand_id=mittelhand_id, hinterhand_id=hinterhand_id, geberhand_id=geberhand_id,\n farbe=farbe, laufende=laufende, spielart=spielart, schneider=schneider, schwarz=schwarz,\n durchmarsch=durchmarsch, tout=tout, spielpunkte=spielpunkte)\n actual_session.add(einzelspiel)\n actual_session.flush()\n if session is None:\n actual_session.commit()\n actual_session.close()\n return einzelspiel\n\n\ndef insert_teilnehmer(vorname: str, nachname: str,\n session: sessionmaker() = None) -> Tuple[Optional[int], List[str]]:\n actual_session = _build_session(session)\n vorname = '' if vorname is None else vorname.strip()\n nachname = '' if nachname is None else nachname.strip()\n validation_messages = []\n if len(get_teilnehmer_by_nachname_vorname(nachname=nachname, vorname=vorname, session=actual_session)) > 0:\n validation_messages.append(f'{nachname}, {vorname} existiert bereits.')\n if vorname == '':\n validation_messages.append(f'Ein leerer Vorname ist nicht erlaubt.')\n if nachname == '':\n validation_messages.append(f'Ein leerer Nachname ist nicht erlaubt.')\n if len(validation_messages) > 0:\n _close_session(actual_session, session)\n return None, validation_messages\n teilnehmer = Teilnehmer(name=f'{nachname}, {vorname}', vorname=vorname, nachname=nachname)\n actual_session.add(teilnehmer)\n actual_session.flush()\n teilnehmer_id = teilnehmer.id\n if session is None:\n actual_session.commit()\n actual_session.close()\n return teilnehmer_id, []\n\n\ndef insert_default_punkteconfig(session: sessionmaker() = None) -> Punkteconfig:\n actual_session = _build_session(session)\n punkteconfig = Punkteconfig()\n actual_session.add(punkteconfig)\n actual_session.flush()\n if session is None:\n actual_session.commit()\n actual_session.close()\n return punkteconfig\n\n\ndef insert_user(username: str, password: str, session: sessionmaker() = None) -> User:\n actual_session = _build_session(session)\n user = User(username=username, password=password)\n actual_session.add(user)\n actual_session.flush()\n if session is None:\n actual_session.commit()\n actual_session.close()\n return user\n\n\ndef insert_runde(datum: str, name: str, ort: str, session: sessionmaker() = None) -> Tuple[Optional[int], List[str]]:\n actual_session = _build_session(session)\n punkteconfig = get_default_punkteconfig(actual_session)\n name = '' if name is None else name.strip()\n ort = '' if ort is None else ort.strip()\n validation_messages = []\n if name == '':\n validation_messages.append(f'Ein leerer Name ist nicht erlaubt.')\n if ort == '':\n validation_messages.append(f'Ein leerer Ort ist nicht erlaubt.')\n if datum == '':\n validation_messages.append('Bitte gültiges Datum angeben.')\n if len(validation_messages) > 0:\n _close_session(actual_session, session)\n return None, validation_messages\n datum = datetime.datetime.strptime(datum, '%Y-%m-%d')\n runde = Runde(datum=datum, name=name, ort=ort, punkteconfig_id=punkteconfig.id)\n actual_session.add(runde)\n actual_session.flush()\n runde_id = runde.id\n if session is None:\n actual_session.commit()\n actual_session.close()\n return runde_id, []\n\n\ndef insert_verdopplung(teilnehmer_id: int, einzelspiel_id: int, doppler: str,\n session: sessionmaker() = None) -> Verdopplung:\n actual_session = _build_session(session)\n verdopplung = Verdopplung(teilnehmer_id=teilnehmer_id, einzelspiel_id=einzelspiel_id, doppler=doppler)\n actual_session.add(verdopplung)\n if session is None:\n actual_session.flush()\n actual_session.commit()\n actual_session.close()\n return verdopplung\n\n\ndef _close_session(actual_session: sessionmaker(), session: sessionmaker()):\n if session is None:\n actual_session.close()\n\n\ndef _build_session(session: sessionmaker()) -> sessionmaker():\n actual_session = Sessions.get_session() if session is None else session\n return actual_session\n"
] |
[
[
"pandas.merge",
"pandas.read_sql",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
tomzhang/mars-1
|
[
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d",
"6f1d85e37eb1b383251314cb0ba13e06288af03d"
] |
[
"mars/tensor/random/pareto.py",
"mars/dataframe/base/fillna.py",
"mars/dataframe/align.py",
"mars/scheduler/tests/integrated/test_worker_failover.py",
"mars/worker/storage/tests/test_shared_io.py",
"mars/learn/utils/multiclass.py",
"mars/dataframe/window/ewm/aggregation.py",
"mars/dataframe/reduction/tests/test_reduction.py",
"mars/tensor/reduction/nanmean.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import AnyField\nfrom .core import TensorRandomOperandMixin, handle_array, TensorDistribution\n\n\nclass TensorPareto(TensorDistribution, TensorRandomOperandMixin):\n __slots__ = '_a', '_size'\n _input_fields_ = ['_a']\n _op_type_ = OperandDef.RAND_PARETO\n\n _a = AnyField('a')\n _func_name = 'pareto'\n\n def __init__(self, size=None, state=None, dtype=None, gpu=None, **kw):\n dtype = np.dtype(dtype) if dtype is not None else dtype\n super().__init__(_size=size, _state=state, _dtype=dtype, _gpu=gpu, **kw)\n\n @property\n def a(self):\n return self._a\n\n def __call__(self, a, chunk_size=None):\n return self.new_tensor([a], None, raw_chunk_size=chunk_size)\n\n\ndef pareto(random_state, a, size=None, chunk_size=None, gpu=None, dtype=None):\n r\"\"\"\n Draw samples from a Pareto II or Lomax distribution with\n specified shape.\n\n The Lomax or Pareto II distribution is a shifted Pareto\n distribution. The classical Pareto distribution can be\n obtained from the Lomax distribution by adding 1 and\n multiplying by the scale parameter ``m`` (see Notes). The\n smallest value of the Lomax distribution is zero while for the\n classical Pareto distribution it is ``mu``, where the standard\n Pareto distribution has location ``mu = 1``. Lomax can also\n be considered as a simplified version of the Generalized\n Pareto distribution (available in SciPy), with the scale set\n to one and the location set to zero.\n\n The Pareto distribution must be greater than zero, and is\n unbounded above. It is also known as the \"80-20 rule\". In\n this distribution, 80 percent of the weights are in the lowest\n 20 percent of the range, while the other 20 percent fill the\n remaining 80 percent of the range.\n\n Parameters\n ----------\n a : float or array_like of floats\n Shape of the distribution. Should be greater than zero.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``a`` is a scalar. Otherwise,\n ``mt.array(a).size`` samples are drawn.\n chunk_size : int or tuple of int or tuple of ints, optional\n Desired chunk size on each dimension\n gpu : bool, optional\n Allocate the tensor on GPU if True, False as default\n dtype : data-type, optional\n Data-type of the returned tensor.\n\n Returns\n -------\n out : Tensor or scalar\n Drawn samples from the parameterized Pareto distribution.\n\n See Also\n --------\n scipy.stats.lomax : probability density function, distribution or\n cumulative density function, etc.\n scipy.stats.genpareto : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Pareto distribution is\n\n .. math:: p(x) = \\frac{am^a}{x^{a+1}}\n\n where :math:`a` is the shape and :math:`m` the scale.\n\n The Pareto distribution, named after the Italian economist\n Vilfredo Pareto, is a power law probability distribution\n useful in many real world problems. Outside the field of\n economics it is generally referred to as the Bradford\n distribution. Pareto developed the distribution to describe\n the distribution of wealth in an economy. It has also found\n use in insurance, web page access statistics, oil field sizes,\n and many other problems, including the download frequency for\n projects in Sourceforge [1]_. It is one of the so-called\n \"fat-tailed\" distributions.\n\n\n References\n ----------\n .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of\n Sourceforge projects.\n .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.\n .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme\n Values, Birkhauser Verlag, Basel, pp 23-30.\n .. [4] Wikipedia, \"Pareto distribution\",\n http://en.wikipedia.org/wiki/Pareto_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> import mars.tensor as mt\n\n >>> a, m = 3., 2. # shape and mode\n >>> s = (mt.random.pareto(a, 1000) + 1) * m\n\n Display the histogram of the samples, along with the probability\n density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, _ = plt.hist(s.execute(), 100, normed=True)\n >>> fit = a*m**a / bins**(a+1)\n >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')\n >>> plt.show()\n \"\"\"\n if dtype is None:\n dtype = np.random.RandomState().pareto(\n handle_array(a), size=(0,)).dtype\n size = random_state._handle_size(size)\n op = TensorPareto(size=size, state=random_state.to_numpy(), gpu=gpu, dtype=dtype)\n return op(a, chunk_size=chunk_size)\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom ... import opcodes\nfrom ...config import options\nfrom ...core import Base, Entity, OutputType\nfrom ...operands import OperandStage\nfrom ...serialize import StringField, AnyField, BoolField, Int64Field\nfrom ..align import align_dataframe_dataframe, align_dataframe_series, align_series_series\nfrom ..core import DATAFRAME_TYPE\nfrom ..operands import DataFrameOperandMixin, DataFrameOperand\nfrom ..utils import validate_axis\n\n\nclass FillNA(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.FILL_NA\n\n _value = AnyField('value', on_serialize=lambda x: x.data if isinstance(x, Entity) else x)\n _method = StringField('method')\n _axis = AnyField('axis')\n _limit = Int64Field('limit')\n _downcast = AnyField('downcast')\n _use_inf_as_na = BoolField('use_inf_as_na')\n\n _output_limit = Int64Field('output_limit')\n\n def __init__(self, value=None, method=None, axis=None, limit=None, downcast=None,\n use_inf_as_na=None, sparse=None, stage=None, gpu=None, output_types=None,\n output_limit=None, **kw):\n super().__init__(_value=value, _method=method, _axis=axis, _limit=limit, _downcast=downcast,\n _use_inf_as_na=use_inf_as_na, _sparse=sparse, _stage=stage, _gpu=gpu,\n _output_types=output_types, _output_limit=output_limit, **kw)\n\n @property\n def value(self):\n return self._value\n\n @property\n def method(self):\n return self._method\n\n @property\n def axis(self):\n return self._axis\n\n @property\n def limit(self):\n return self._limit\n\n @property\n def downcast(self):\n return self._downcast\n\n @property\n def use_inf_as_na(self):\n return self._use_inf_as_na\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n if self._method is None and len(inputs) > 1:\n self._value = self._inputs[1]\n\n @property\n def output_limit(self):\n return self._output_limit or 1\n\n @staticmethod\n def _get_first_slice(op, df, end):\n if op.method == 'bfill':\n if op.output_types[0] == OutputType.series:\n return df.iloc[:end]\n else:\n if op.axis == 1:\n return df.iloc[:, :end]\n else:\n return df.iloc[:end, :]\n else:\n if op.output_types[0] == OutputType.series:\n return df.iloc[-end:]\n else:\n if op.axis == 1:\n return df.iloc[:, -end:]\n else:\n return df.iloc[-end:, :]\n\n @classmethod\n def _execute_map(cls, ctx, op):\n input_data = ctx[op.inputs[0].key]\n limit = op.limit\n axis = op.axis\n method = op.method\n\n filled = input_data.fillna(method=method, axis=axis, limit=limit, downcast=op.downcast)\n ctx[op.outputs[0].key] = cls._get_first_slice(op, filled, 1)\n del filled\n\n @classmethod\n def _execute_combine(cls, ctx, op):\n axis = op.axis\n method = op.method\n limit = op.limit\n\n input_data = ctx[op.inputs[0].key]\n if limit is not None:\n n_summaries = (len(op.inputs) - 1) // 2\n summaries = [ctx[inp.key] for inp in op.inputs[1:1 + n_summaries]]\n else:\n summaries = [ctx[inp.key] for inp in op.inputs[1:]]\n\n if not summaries:\n ctx[op.outputs[0].key] = input_data.fillna(method=method, axis=axis, limit=limit,\n downcast=op.downcast)\n return\n\n valid_summary = cls._get_first_slice(\n op, pd.concat(summaries, axis=axis).fillna(method=method, axis=axis), 1)\n\n if method == 'bfill':\n concat_df = pd.concat([input_data, valid_summary], axis=axis)\n else:\n concat_df = pd.concat([valid_summary, input_data], axis=axis)\n\n concat_df.fillna(method=method, axis=axis, inplace=True, limit=limit,\n downcast=op.downcast)\n ctx[op.outputs[0].key] = cls._get_first_slice(op, concat_df, -1)\n\n @classmethod\n def execute(cls, ctx, op):\n try:\n pd.set_option('mode.use_inf_as_na', op.use_inf_as_na)\n if op.stage == OperandStage.map:\n cls._execute_map(ctx, op)\n elif op.stage == OperandStage.combine:\n cls._execute_combine(ctx, op)\n else:\n input_data = ctx[op.inputs[0].key]\n value = getattr(op, 'value', None)\n if isinstance(op.value, (Base, Entity)):\n value = ctx[op.value.key]\n ctx[op.outputs[0].key] = input_data.fillna(\n value=value, method=op.method, axis=op.axis, limit=op.limit, downcast=op.downcast)\n finally:\n pd.reset_option('mode.use_inf_as_na')\n\n @classmethod\n def _tile_one_by_one(cls, op):\n in_df = op.inputs[0]\n in_value_df = op.value if isinstance(op.value, (Base, Entity)) else None\n df = op.outputs[0]\n\n new_chunks = []\n for c in in_df.chunks:\n inputs = [c] if in_value_df is None else [c, in_value_df.chunks[0]]\n kw = c.params\n new_op = op.copy().reset_key()\n new_chunks.append(new_op.new_chunk(inputs, **kw))\n\n kw = df.params.copy()\n kw.update(dict(chunks=new_chunks, nsplits=in_df.nsplits))\n new_op = op.copy().reset_key()\n return new_op.new_tileables(op.inputs, **kw)\n\n @classmethod\n def _build_combine(cls, op, input_chunks, summary_chunks, idx, is_forward=True):\n c = input_chunks[idx]\n\n summaries_to_concat = []\n\n idx_range = list(range(idx) if is_forward else range(idx + 1, len(summary_chunks)))\n for i in idx_range:\n summaries_to_concat.append(summary_chunks[i])\n\n new_chunk_op = op.copy().reset_key()\n new_chunk_op._stage = OperandStage.combine\n\n chunks_to_concat = [c] + summaries_to_concat\n return new_chunk_op.new_chunk(chunks_to_concat, **c.params)\n\n @classmethod\n def _tile_directional_dataframe(cls, op):\n in_df = op.inputs[0]\n df = op.outputs[0]\n is_forward = op.method == 'ffill'\n\n n_rows, n_cols = in_df.chunk_shape\n\n # map to get individual results and summaries\n src_chunks = np.empty(in_df.chunk_shape, dtype=np.object)\n summary_chunks = np.empty(in_df.chunk_shape, dtype=np.object)\n for c in in_df.chunks:\n new_chunk_op = op.copy().reset_key()\n new_chunk_op._stage = OperandStage.map\n if op.axis == 1:\n summary_shape = (c.shape[0], 1)\n else:\n summary_shape = (1, c.shape[1])\n src_chunks[c.index] = c\n summary_chunks[c.index] = new_chunk_op.new_chunk(\n [c], shape=summary_shape, dtypes=df.dtypes)\n\n # combine summaries into results\n output_chunk_array = np.empty(in_df.chunk_shape, dtype=np.object)\n if op.axis == 1:\n for row in range(n_rows):\n row_src = src_chunks[row, :]\n row_summaries = summary_chunks[row, :]\n for col in range(n_cols):\n output_chunk_array[row, col] = cls._build_combine(\n op, row_src, row_summaries, col, is_forward)\n else:\n for col in range(n_cols):\n col_src = src_chunks[:, col]\n col_summaries = summary_chunks[:, col]\n for row in range(n_rows):\n output_chunk_array[row, col] = cls._build_combine(\n op, col_src, col_summaries, row, is_forward)\n\n output_chunks = list(output_chunk_array.reshape((n_rows * n_cols,)))\n new_op = op.copy().reset_key()\n return new_op.new_tileables(op.inputs, shape=in_df.shape, nsplits=in_df.nsplits,\n chunks=output_chunks, dtypes=df.dtypes,\n index_value=df.index_value, columns_value=df.columns_value)\n\n @classmethod\n def _tile_directional_series(cls, op):\n in_series = op.inputs[0]\n series = op.outputs[0]\n forward = op.method == 'ffill'\n\n # map to get individual results and summaries\n summary_chunks = np.empty(in_series.chunk_shape, dtype=np.object)\n for c in in_series.chunks:\n new_chunk_op = op.copy().reset_key()\n new_chunk_op._stage = OperandStage.map\n summary_chunks[c.index] = new_chunk_op.new_chunk([c], shape=(1,), dtype=series.dtype)\n\n # combine summaries into results\n output_chunks = [\n cls._build_combine(op, in_series.chunks, summary_chunks, i, forward)\n for i in range(len(in_series.chunks))\n ]\n new_op = op.copy().reset_key()\n return new_op.new_tileables(op.inputs, shape=in_series.shape, nsplits=in_series.nsplits,\n chunks=output_chunks, dtype=series.dtype,\n index_value=series.index_value)\n\n @classmethod\n def _tile_both_dataframes(cls, op):\n in_df = op.inputs[0]\n in_value = op.inputs[1]\n df = op.outputs[0]\n\n nsplits, out_shape, left_chunks, right_chunks = align_dataframe_dataframe(in_df, in_value)\n out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))\n\n out_chunks = []\n for idx, left_chunk, right_chunk in zip(out_chunk_indexes, left_chunks, right_chunks):\n out_chunk = op.copy().reset_key().new_chunk([left_chunk, right_chunk],\n shape=(np.nan, np.nan), index=idx)\n out_chunks.append(out_chunk)\n\n new_op = op.copy()\n return new_op.new_dataframes(op.inputs, df.shape,\n nsplits=tuple(tuple(ns) for ns in nsplits),\n chunks=out_chunks, dtypes=df.dtypes,\n index_value=df.index_value, columns_value=df.columns_value)\n\n @classmethod\n def _tile_dataframe_series(cls, op):\n left, right = op.inputs[0], op.inputs[1]\n df = op.outputs[0]\n\n nsplits, out_shape, left_chunks, right_chunks = align_dataframe_series(left, right, axis=1)\n out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))\n\n out_chunks = []\n for out_idx, df_chunk in zip(out_chunk_indexes, left_chunks):\n series_chunk = right_chunks[out_idx[1]]\n kw = dict(shape=(nsplits[0][out_idx[0]], nsplits[1][out_idx[1]]),\n columns_value=df_chunk.columns_value)\n out_chunk = op.copy().reset_key().new_chunk([df_chunk, series_chunk], index=out_idx, **kw)\n out_chunks.append(out_chunk)\n\n new_op = op.copy().reset_key()\n return new_op.new_dataframes(op.inputs, df.shape,\n nsplits=tuple(tuple(ns) for ns in nsplits),\n chunks=out_chunks, dtypes=df.dtypes,\n index_value=df.index_value, columns_value=df.columns_value)\n\n @classmethod\n def _tile_both_series(cls, op):\n left, right = op.inputs[0], op.inputs[1]\n df = op.outputs[0]\n\n nsplits, out_shape, left_chunks, right_chunks = align_series_series(left, right)\n\n out_chunks = []\n for idx, left_chunk, right_chunk in zip(range(out_shape[0]), left_chunks, right_chunks):\n out_chunk = op.copy().reset_key().new_chunk([left_chunk, right_chunk],\n shape=(np.nan,), index=(idx,))\n out_chunks.append(out_chunk)\n\n new_op = op.copy()\n return new_op.new_seriess(op.inputs, df.shape,\n nsplits=tuple(tuple(ns) for ns in nsplits),\n chunks=out_chunks, dtype=df.dtype,\n index_value=df.index_value, name=df.name)\n\n @classmethod\n def tile(cls, op):\n in_df = op.inputs[0]\n if len(in_df.chunks) == 1 and \\\n (not isinstance(op.value, (Base, Entity)) or len(op.value.chunks) == 1):\n return cls._tile_one_by_one(op)\n elif op.method is not None:\n if op.output_types[0] == OutputType.dataframe:\n return cls._tile_directional_dataframe(op)\n else:\n return cls._tile_directional_series(op)\n elif not isinstance(op.value, (Base, Entity)):\n return cls._tile_one_by_one(op)\n elif isinstance(op.value, DATAFRAME_TYPE):\n return cls._tile_both_dataframes(op)\n elif op.output_types[0] == OutputType.dataframe:\n return cls._tile_dataframe_series(op)\n else:\n return cls._tile_both_series(op)\n\n def __call__(self, a, value_df=None):\n method = getattr(self, 'method', None)\n if method == 'backfill':\n method = 'bfill'\n elif method == 'pad':\n method = 'ffill'\n self._method = method\n axis = getattr(self, 'axis', None) or 0\n self._axis = validate_axis(axis, a)\n\n inputs = [a]\n if value_df is not None:\n inputs.append(value_df)\n if isinstance(a, DATAFRAME_TYPE):\n return self.new_dataframe(inputs, shape=a.shape, dtypes=a.dtypes, index_value=a.index_value,\n columns_value=a.columns_value)\n else:\n return self.new_series(inputs, shape=a.shape, dtype=a.dtype, index_value=a.index_value)\n\n\ndef fillna(df, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None):\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : {0 or 'index', 1 or 'columns'}\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n DataFrame or None\n Object with missing values filled or None if ``inplace=True``.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> import mars.tensor as mt\n >>> import mars.dataframe as md\n >>> df = md.DataFrame([[mt.nan, 2, mt.nan, 0],\n ... [3, 4, mt.nan, 1],\n ... [mt.nan, mt.nan, mt.nan, 5],\n ... [mt.nan, 3, mt.nan, 4]],\n ... columns=list('ABCD'))\n >>> df.execute()\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0).execute()\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill').execute()\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values).execute()\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n \"\"\"\n if value is None and method is None:\n raise ValueError(\"Must specify a fill 'value' or 'method'.\")\n elif value is not None and method is not None:\n raise ValueError(\"Cannot specify both 'value' and 'method'.\")\n\n if df.op.output_types[0] == OutputType.series and isinstance(value, (DATAFRAME_TYPE, pd.DataFrame)):\n raise ValueError('\"value\" parameter must be a scalar, dict or Series, but you passed a \"%s\"'\n % type(value).__name__)\n\n if downcast is not None:\n raise NotImplementedError('Currently argument \"downcast\" is not implemented yet')\n if limit is not None:\n raise NotImplementedError('Currently argument \"limit\" is not implemented yet')\n\n if isinstance(value, (Base, Entity)):\n value, value_df = None, value\n else:\n value_df = None\n\n use_inf_as_na = options.dataframe.mode.use_inf_as_na\n op = FillNA(value=value, method=method, axis=axis, limit=limit, downcast=downcast,\n use_inf_as_na=use_inf_as_na, output_types=df.op.output_types)\n out_df = op(df, value_df=value_df)\n if inplace:\n df.data = out_df.data\n else:\n return out_df\n\n\ndef ffill(df, axis=None, inplace=False, limit=None, downcast=None):\n return fillna(df, method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast)\n\n\ndef bfill(df, axis=None, inplace=False, limit=None, downcast=None):\n return fillna(df, method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast)\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport operator\n\nimport numpy as np\n\nimport pandas as pd\n\nfrom .. import opcodes as OperandDef\nfrom ..operands import OperandStage, OutputType\nfrom ..serialize import ValueType, AnyField, BoolField, Int32Field, KeyField, ListField\nfrom ..utils import get_shuffle_input_keys_idxes\nfrom .core import SERIES_CHUNK_TYPE\nfrom .utils import hash_dtypes, filter_dtypes\nfrom .operands import DataFrameMapReduceOperand, DataFrameOperandMixin, \\\n DataFrameShuffleProxy\nfrom .utils import parse_index, split_monotonic_index_min_max, \\\n build_split_idx_to_origin_idx, filter_index_value, hash_index\n\n\nclass DataFrameIndexAlign(DataFrameMapReduceOperand, DataFrameOperandMixin):\n _op_type_ = OperandDef.DATAFRAME_INDEX_ALIGN\n\n _index_min = AnyField('index_min')\n _index_min_close = BoolField('index_min_close')\n _index_max = AnyField('index_max')\n _index_max_close = BoolField('index_max_close')\n _index_shuffle_size = Int32Field('index_shuffle_size')\n _column_min = AnyField('column_min')\n _column_min_close = BoolField('column_min_close')\n _column_max = AnyField('column_max')\n _column_max_close = BoolField('column_max_close')\n _column_shuffle_size = Int32Field('column_shuffle_size')\n _column_shuffle_segments = ListField('column_shuffle_segments', ValueType.series)\n\n _input = KeyField('input')\n\n def __init__(self, index_min_max=None, index_shuffle_size=None, column_min_max=None,\n column_shuffle_size=None, column_shuffle_segments=None,\n sparse=None, dtype=None, dtypes=None, gpu=None, stage=None, shuffle_key=None,\n output_types=None, **kw):\n if index_min_max is not None:\n kw.update(dict(_index_min=index_min_max[0], _index_min_close=index_min_max[1],\n _index_max=index_min_max[2], _index_max_close=index_min_max[3]))\n if column_min_max is not None:\n kw.update(dict(_column_min=column_min_max[0], _column_min_close=column_min_max[1],\n _column_max=column_min_max[2], _column_max_close=column_min_max[3]))\n super().__init__(\n _index_shuffle_size=index_shuffle_size, _column_shuffle_size=column_shuffle_size,\n _column_shuffle_segments=column_shuffle_segments, _sparse=sparse,\n _dtype=dtype, _dtypes=dtypes, _gpu=gpu, _stage=stage, _shuffle_key=shuffle_key,\n _output_types=output_types, **kw)\n\n @property\n def index_min(self):\n return self._index_min\n\n @property\n def index_min_close(self):\n return self._index_min_close\n\n @property\n def index_max(self):\n return self._index_max\n\n @property\n def index_max_close(self):\n return self._index_max_close\n\n @property\n def index_min_max(self):\n if getattr(self, '_index_min', None) is None:\n return None\n return self._index_min, self._index_min_close, \\\n self._index_max, self._index_max_close\n\n @property\n def index_shuffle_size(self):\n return self._index_shuffle_size\n\n @property\n def column_min(self):\n return self._column_min\n\n @property\n def column_min_close(self):\n return self._column_min_close\n\n @property\n def column_max(self):\n return self._column_max\n\n @property\n def column_max_close(self):\n return self._column_max_close\n\n @property\n def column_min_max(self):\n if getattr(self, '_column_min', None) is None:\n return None\n return self._column_min, self._column_min_close, \\\n self._column_max, self._column_max_close\n\n @property\n def column_shuffle_size(self):\n return self._column_shuffle_size\n\n @property\n def column_shuffle_segments(self):\n return self._column_shuffle_segments\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def _build_map_chunk_kw(self, **kw):\n inputs = self.inputs\n if kw.get('index_value', None) is None and inputs[0].index_value is not None:\n input_index_value = inputs[0].index_value\n index_min_max = self.index_min_max\n if index_min_max is not None:\n kw['index_value'] = filter_index_value(input_index_value, index_min_max)\n else:\n kw['index_value'] = parse_index(inputs[0].index_value.to_pandas(),\n input_index_value, type(self).__name__)\n if kw.get('columns_value', None) is None and getattr(inputs[0], 'columns_value', None) is not None:\n input_columns_value = inputs[0].columns_value\n input_dtypes = inputs[0].dtypes\n column_min_max = self.column_min_max\n if column_min_max is not None:\n kw['columns_value'] = filter_index_value(input_columns_value, column_min_max,\n store_data=True)\n else:\n kw['columns_value'] = parse_index(inputs[0].columns_value.to_pandas(), input_columns_value,\n type(self).__name__)\n kw['dtypes'] = input_dtypes[kw['columns_value'].to_pandas()]\n column_shuffle_size = self.column_shuffle_size\n if column_shuffle_size is not None:\n self._column_shuffle_segments = hash_dtypes(input_dtypes, column_shuffle_size)\n if kw.get('dtype', None) and getattr(inputs[0], 'dtype', None) is not None:\n kw['dtype'] = inputs[0].dtype\n if kw.get('name', None) and getattr(inputs[0], 'name', None) is not None:\n kw['name'] = inputs[0].dtype\n return kw\n\n def _build_reduce_chunk_kw(self, index, **kw):\n inputs = self.inputs\n if kw.get('index_value', None) is None and inputs[0].inputs[0].index_value is not None:\n index_align_map_chunks = inputs[0].inputs\n if index_align_map_chunks[0].op.index_min_max is not None:\n # shuffle on columns, all the DataFrameIndexAlignMap has the same index\n kw['index_value'] = filter_index_value(index_align_map_chunks[0].index_value,\n index_align_map_chunks[0].op.index_min_max)\n else:\n # shuffle on index\n kw['index_value'] = parse_index(index_align_map_chunks[0].index_value.to_pandas(),\n [c.key for c in index_align_map_chunks], type(self).__name__)\n if kw.get('columns_value', None) is None and getattr(inputs[0].inputs[0], 'columns_value', None) is not None:\n index_align_map_chunks = inputs[0].inputs\n if index_align_map_chunks[0].op.column_min_max is not None:\n # shuffle on index\n kw['columns_value'] = filter_index_value(index_align_map_chunks[0].columns_value,\n index_align_map_chunks[0].op.column_min_max,\n store_data=True)\n kw['dtypes'] = index_align_map_chunks[0].dtypes[kw['columns_value'].to_pandas()]\n else:\n # shuffle on columns\n all_dtypes = [c.op.column_shuffle_segments[index[1]] for c in index_align_map_chunks\n if c.index[0] == index_align_map_chunks[0].index[0]]\n kw['dtypes'] = pd.concat(all_dtypes)\n kw['columns_value'] = parse_index(kw['dtypes'].index, store_data=True)\n if kw.get('dtype', None) and getattr(inputs[0].inputs[0], 'dtype', None) is not None:\n kw['dtype'] = inputs[0].inputs[0].dtype\n if kw.get('name', None) and getattr(inputs[0].inputs[0], 'name', None) is not None:\n kw['name'] = inputs[0].inputs[0].dtype\n return kw\n\n def _create_chunk(self, output_idx, index, **kw):\n if self.stage == OperandStage.map:\n kw = self._build_map_chunk_kw(**kw)\n else:\n kw = self._build_reduce_chunk_kw(index, **kw)\n return super()._create_chunk(output_idx, index, **kw)\n\n @classmethod\n def execute_map(cls, ctx, op):\n # TODO(QIN): add GPU support here\n df = ctx[op.inputs[0].key]\n\n filters = [[], []]\n\n chunk = op.outputs[0]\n if op.index_shuffle_size == -1:\n # no shuffle and no min-max filter on index\n filters[0].append(slice(None, None, None))\n elif op.index_shuffle_size is None:\n # no shuffle on index\n comp_op = operator.ge if op.index_min_close else operator.gt\n index_cond = comp_op(df.index, op.index_min)\n comp_op = operator.le if op.index_max_close else operator.lt\n index_cond = index_cond & comp_op(df.index, op.index_max)\n filters[0].append(index_cond)\n else:\n # shuffle on index\n shuffle_size = op.index_shuffle_size\n filters[0].extend(hash_index(df.index, shuffle_size))\n\n if chunk.ndim == 1:\n if len(filters[0]) == 1:\n # no shuffle\n ctx[chunk.key] = df.loc[filters[0][0]]\n else:\n for index_idx, index_filter in enumerate(filters[0]):\n group_key = str(index_idx)\n ctx[(chunk.key, group_key)] = df.loc[index_filter]\n return\n\n if op.column_shuffle_size == -1:\n # no shuffle and no min-max filter on columns\n filters[1].append(slice(None, None, None))\n if op.column_shuffle_size is None:\n # no shuffle on columns\n comp_op = operator.ge if op.column_min_close else operator.gt\n columns_cond = comp_op(df.columns, op.column_min)\n comp_op = operator.le if op.column_max_close else operator.lt\n columns_cond = columns_cond & comp_op(df.columns, op.column_max)\n filters[1].append(columns_cond)\n else:\n # shuffle on columns\n shuffle_size = op.column_shuffle_size\n filters[1].extend(hash_index(df.columns, shuffle_size))\n\n if all(len(it) == 1 for it in filters):\n # no shuffle\n ctx[chunk.key] = df.loc[filters[0][0], filters[1][0]]\n elif len(filters[0]) == 1:\n # shuffle on columns\n for column_idx, column_filter in enumerate(filters[1]):\n group_key = ','.join([str(chunk.index[0]), str(column_idx)])\n ctx[(chunk.key, group_key)] = df.loc[filters[0][0], column_filter]\n elif len(filters[1]) == 1:\n # shuffle on index\n for index_idx, index_filter in enumerate(filters[0]):\n group_key = ','.join([str(index_idx), str(chunk.index[1])])\n ctx[(chunk.key, group_key)] = df.loc[index_filter, filters[1][0]]\n else:\n # full shuffle\n shuffle_index_size = op.index_shuffle_size\n shuffle_column_size = op.column_shuffle_size\n out_idxes = itertools.product(range(shuffle_index_size), range(shuffle_column_size))\n out_index_columns = itertools.product(*filters)\n for out_idx, out_index_column in zip(out_idxes, out_index_columns):\n index_filter, column_filter = out_index_column\n group_key = ','.join(str(i) for i in out_idx)\n ctx[(chunk.key, group_key)] = df.loc[index_filter, column_filter]\n\n @classmethod\n def execute_reduce(cls, ctx, op):\n chunk = op.outputs[0]\n input_keys, input_idxes = get_shuffle_input_keys_idxes(op.inputs[0])\n input_idx_to_df = {idx: ctx[inp_key, ','.join(str(ix) for ix in chunk.index)]\n for inp_key, idx in zip(input_keys, input_idxes)}\n row_idxes = sorted({idx[0] for idx in input_idx_to_df})\n if chunk.ndim == 2:\n col_idxes = sorted({idx[1] for idx in input_idx_to_df})\n\n ress = []\n for row_idx in row_idxes:\n if chunk.ndim == 2:\n row_dfs = []\n for col_idx in col_idxes:\n row_dfs.append(input_idx_to_df[row_idx, col_idx])\n row_df = pd.concat(row_dfs, axis=1)\n else:\n row_df = input_idx_to_df[(row_idx,)]\n\n ress.append(row_df)\n\n ctx[chunk.key] = pd.concat(ress, axis=0)\n\n @classmethod\n def execute(cls, ctx, op):\n if op.stage == OperandStage.map:\n cls.execute_map(ctx, op)\n else:\n cls.execute_reduce(ctx, op)\n\n\nclass _AxisMinMaxSplitInfo(object):\n def __init__(self, left_split, left_increase, right_split, right_increase, dummy=False):\n self._left_split = left_split\n self._right_split = right_split\n self._dummy = dummy\n\n self._left_split_idx_to_origin_idx = \\\n build_split_idx_to_origin_idx(self._left_split, left_increase)\n self._right_split_idx_to_origin_idx = \\\n build_split_idx_to_origin_idx(self._right_split, right_increase)\n\n def isdummy(self):\n return self._dummy\n\n def get_origin_left_idx(self, idx):\n return self._left_split_idx_to_origin_idx[idx][0]\n\n def get_origin_left_split(self, idx):\n left_idx, left_inner_idx = \\\n self._left_split_idx_to_origin_idx[idx]\n return self._left_split[left_idx][left_inner_idx]\n\n def get_origin_right_idx(self, idx):\n return self._right_split_idx_to_origin_idx[idx][0]\n\n def get_origin_right_split(self, idx):\n right_idx, right_inner_idx = \\\n self._right_split_idx_to_origin_idx[idx]\n return self._right_split[right_idx][right_inner_idx]\n\n\nclass _MinMaxSplitInfo(object):\n def __init__(self, row_min_max_split_info=None, col_min_max_split_info=None):\n self.row_min_max_split_info = row_min_max_split_info\n self.col_min_max_split_info = col_min_max_split_info\n\n def all_axes_can_split(self):\n return self.row_min_max_split_info is not None and \\\n self.col_min_max_split_info is not None\n\n def one_axis_can_split(self):\n return (self.row_min_max_split_info is None) ^ \\\n (self.col_min_max_split_info is None)\n\n def no_axis_can_split(self):\n return self.row_min_max_split_info is None and \\\n self.col_min_max_split_info is None\n\n def __getitem__(self, i):\n return [self.row_min_max_split_info, self.col_min_max_split_info][i]\n\n def __setitem__(self, axis, axis_min_max_split_info):\n assert axis in {0, 1}\n if axis == 0:\n self.row_min_max_split_info = axis_min_max_split_info\n else:\n self.col_min_max_split_info = axis_min_max_split_info\n\n def get_row_left_idx(self, out_idx):\n return self.row_min_max_split_info.get_origin_left_idx(out_idx)\n\n def get_row_left_split(self, out_idx):\n return self.row_min_max_split_info.get_origin_left_split(out_idx)\n\n def get_col_left_idx(self, out_idx):\n return self.col_min_max_split_info.get_origin_left_idx(out_idx)\n\n def get_col_left_split(self, out_idx):\n return self.col_min_max_split_info.get_origin_left_split(out_idx)\n\n def get_row_right_idx(self, out_idx):\n return self.row_min_max_split_info.get_origin_right_idx(out_idx)\n\n def get_row_right_split(self, out_idx):\n return self.row_min_max_split_info.get_origin_right_split(out_idx)\n\n def get_col_right_idx(self, out_idx):\n return self.col_min_max_split_info.get_origin_right_idx(out_idx)\n\n def get_col_right_split(self, out_idx):\n return self.col_min_max_split_info.get_origin_right_split(out_idx)\n\n def get_axis_idx(self, axis, left_or_right, out_idx):\n if axis == 0:\n if left_or_right == 0:\n return self.get_row_left_idx(out_idx)\n else:\n assert left_or_right == 1\n return self.get_row_right_idx(out_idx)\n else:\n assert axis == 1\n if left_or_right == 0:\n return self.get_col_left_idx(out_idx)\n else:\n assert left_or_right == 1\n return self.get_col_right_idx(out_idx)\n\n def get_axis_split(self, axis, left_or_right, out_idx):\n if axis == 0:\n if left_or_right == 0:\n return self.get_row_left_split(out_idx)\n else:\n assert left_or_right == 1\n return self.get_row_right_split(out_idx)\n else:\n assert axis == 1\n if left_or_right == 0:\n return self.get_col_left_split(out_idx)\n else:\n assert left_or_right == 1\n return self.get_col_right_split(out_idx)\n\n\ndef _get_chunk_index_min_max(index_chunks):\n chunk_index_min_max = []\n for chunk in index_chunks:\n min_val = chunk.min_val\n min_val_close = chunk.min_val_close\n max_val = chunk.max_val\n max_val_close = chunk.max_val_close\n if min_val is None or max_val is None:\n return\n chunk_index_min_max.append((min_val, min_val_close, max_val, max_val_close))\n return chunk_index_min_max\n\n\ndef _get_monotonic_chunk_index_min_max(index, index_chunks):\n chunk_index_min_max = _get_chunk_index_min_max(index_chunks)\n if index.is_monotonic_decreasing:\n return list(reversed(chunk_index_min_max)), False\n\n for j in range(len(chunk_index_min_max) - 1):\n # overlap only if the prev max is close and curr min is close\n # and they are identical\n prev_max, prev_max_close = chunk_index_min_max[j][2:]\n curr_min, curr_min_close = chunk_index_min_max[j + 1][:2]\n if prev_max_close and curr_min_close and prev_max == curr_min:\n return\n return chunk_index_min_max, True\n\n\ndef _need_align_map(input_chunk, index_min_max, column_min_max,\n dummy_index_splits=False, dummy_column_splits=False):\n if not dummy_index_splits:\n assert not index_min_max[0] is None and not index_min_max[2] is None\n if isinstance(input_chunk, SERIES_CHUNK_TYPE):\n if input_chunk.index_value is None:\n return True\n if input_chunk.index_value.min_max != index_min_max:\n return True\n else:\n if not dummy_index_splits:\n if input_chunk.index_value is None or input_chunk.index_value.min_max != index_min_max:\n return True\n if not dummy_column_splits:\n if input_chunk.columns_value is None or input_chunk.columns_value.min_max != column_min_max:\n return True\n return False\n\n\ndef _is_index_identical(left, right):\n if len(left) != len(right):\n return False\n for left_item, right_item in zip(left, right):\n if left_item.key != right_item.key:\n return False\n return True\n\n\ndef _axis_need_shuffle(left_axis, right_axis, left_axis_chunks, right_axis_chunks):\n if _is_index_identical(left_axis_chunks, right_axis_chunks):\n return False\n if not left_axis.is_monotonic_increasing_or_decreasing and len(left_axis_chunks) > 1:\n return True\n if not right_axis.is_monotonic_increasing_or_decreasing and len(right_axis_chunks) > 1:\n return True\n return False\n\n\ndef _calc_axis_splits(left_axis, right_axis, left_axis_chunks, right_axis_chunks):\n if _axis_need_shuffle(left_axis, right_axis, left_axis_chunks, right_axis_chunks):\n # do shuffle\n out_chunk_size = max(len(left_axis_chunks), len(right_axis_chunks))\n return None, [np.nan for _ in range(out_chunk_size)]\n else:\n # no need to do shuffle on this axis\n if _is_index_identical(left_axis_chunks, right_axis_chunks):\n left_chunk_index_min_max = _get_chunk_index_min_max(left_axis_chunks)\n right_splits = left_splits = [[c] for c in left_chunk_index_min_max]\n right_increase = left_increase = None\n elif len(left_axis_chunks) == 1 and len(right_axis_chunks) == 1:\n left_splits = [_get_chunk_index_min_max(left_axis_chunks)]\n left_increase = left_axis_chunks[0].is_monotonic_decreasing\n right_splits = [_get_chunk_index_min_max(right_axis_chunks)]\n right_increase = right_axis_chunks[0].is_monotonic_decreasing\n else:\n left_chunk_index_min_max, left_increase = _get_monotonic_chunk_index_min_max(left_axis,\n left_axis_chunks)\n right_chunk_index_min_max, right_increase = _get_monotonic_chunk_index_min_max(right_axis,\n right_axis_chunks)\n left_splits, right_splits = split_monotonic_index_min_max(\n left_chunk_index_min_max, left_increase, right_chunk_index_min_max, right_increase)\n splits = _AxisMinMaxSplitInfo(left_splits, left_increase, right_splits, right_increase)\n nsplits = [np.nan for _ in itertools.chain(*left_splits)]\n return splits, nsplits\n\n\ndef _build_dummy_axis_split(chunk_shape):\n axis_index_min_max, axis_increase = [(i, True, i + 1, True) for i in range(chunk_shape)], True\n if len(axis_index_min_max) == 1:\n left_splits, right_splits = [axis_index_min_max], [axis_index_min_max]\n else:\n left_splits, right_splits = split_monotonic_index_min_max(\n axis_index_min_max, axis_increase, axis_index_min_max, axis_increase)\n return _AxisMinMaxSplitInfo(left_splits, axis_increase,\n right_splits, axis_increase, dummy=True)\n\n\ndef _gen_series_chunks(splits, out_shape, left_or_right, series):\n out_chunks = []\n if splits[0] is not None:\n # need no shuffle\n for out_idx in range(out_shape[0]):\n idx = splits.get_axis_idx(0, left_or_right, out_idx)\n index_min_max = splits.get_axis_split(0, left_or_right, out_idx)\n chunk = series.cix[(idx,)]\n if _need_align_map(chunk, index_min_max, None):\n align_op = DataFrameIndexAlign(\n stage=OperandStage.map, index_min_max=index_min_max, column_min_max=None,\n dtype=chunk.dtype, sparse=series.issparse(), output_types=[OutputType.series])\n out_chunk = align_op.new_chunk([chunk], shape=(np.nan,), index=(out_idx,))\n else:\n out_chunk = chunk\n out_chunks.append(out_chunk)\n else:\n # gen map chunks\n map_chunks = []\n for chunk in series.chunks:\n map_op = DataFrameIndexAlign(\n stage=OperandStage.map, sparse=chunk.issparse(), index_shuffle_size=out_shape[0],\n output_types=[OutputType.series])\n map_chunks.append(map_op.new_chunk([chunk], shape=(np.nan,), index=chunk.index))\n\n proxy_chunk = DataFrameShuffleProxy(output_types=[OutputType.series]).new_chunk(\n map_chunks, shape=())\n\n # gen reduce chunks\n for out_idx in range(out_shape[0]):\n reduce_op = DataFrameIndexAlign(stage=OperandStage.reduce, i=out_idx,\n sparse=proxy_chunk.issparse(), shuffle_key=str(out_idx),\n output_types=[OutputType.series])\n out_chunks.append(\n reduce_op.new_chunk([proxy_chunk], shape=(np.nan,), index=(out_idx,)))\n\n return out_chunks\n\n\ndef _gen_dataframe_chunks(splits, out_shape, left_or_right, df):\n out_chunks = []\n if splits.all_axes_can_split():\n # no shuffle for all axes\n kw = {\n 'index_shuffle_size': -1 if splits[0].isdummy() else None,\n 'column_shuffle_size': -1 if splits[1].isdummy() else None,\n }\n for out_idx in itertools.product(*(range(s) for s in out_shape)):\n row_idx = splits.get_axis_idx(0, left_or_right, out_idx[0])\n col_idx = splits.get_axis_idx(1, left_or_right, out_idx[1])\n index_min_max = splits.get_axis_split(0, left_or_right, out_idx[0])\n column_min_max = splits.get_axis_split(1, left_or_right, out_idx[1])\n chunk = df.cix[row_idx, col_idx]\n if _need_align_map(chunk, index_min_max, column_min_max,\n splits[0].isdummy(), splits[1].isdummy()):\n if splits[1].isdummy():\n dtypes = chunk.dtypes\n else:\n dtypes = filter_dtypes(chunk.dtypes, column_min_max)\n chunk_kw = {\n 'index_value': chunk.index_value if splits[0].isdummy() else None,\n 'columns_value': chunk.columns_value if splits[1].isdummy() else None,\n 'dtypes': chunk.dtypes if splits[1].isdummy() else None\n }\n align_op = DataFrameIndexAlign(\n stage=OperandStage.map, index_min_max=index_min_max,\n column_min_max=column_min_max, dtypes=dtypes, sparse=chunk.issparse(),\n output_types=[OutputType.dataframe], **kw)\n out_chunk = align_op.new_chunk([chunk], shape=(np.nan, np.nan), index=out_idx, **chunk_kw)\n else:\n out_chunk = chunk\n out_chunks.append(out_chunk)\n elif splits.one_axis_can_split():\n # one axis needs shuffle\n shuffle_axis = 0 if splits[0] is None else 1\n align_axis = 1 - shuffle_axis\n\n for align_axis_idx in range(out_shape[align_axis]):\n if align_axis == 0:\n kw = {\n 'index_min_max': splits.get_axis_split(align_axis, left_or_right, align_axis_idx),\n 'index_shuffle_size': -1 if splits[0].isdummy() else None,\n 'column_shuffle_size': out_shape[shuffle_axis],\n }\n input_idx = splits.get_axis_idx(align_axis, left_or_right, align_axis_idx)\n else:\n kw = {\n 'column_min_max': splits.get_axis_split(align_axis, left_or_right, align_axis_idx),\n 'index_shuffle_size': out_shape[shuffle_axis],\n 'column_shuffle_size': -1 if splits[1].isdummy() else None,\n }\n input_idx = splits.get_axis_idx(align_axis, left_or_right, align_axis_idx)\n input_chunks = [c for c in df.chunks if c.index[align_axis] == input_idx]\n map_chunks = []\n for j, input_chunk in enumerate(input_chunks):\n chunk_kw = dict()\n if align_axis == 0:\n chunk_kw['index_value'] = input_chunk.index_value if splits[0].isdummy() else None\n else:\n chunk_kw['columns_value'] = input_chunk.columns_value if splits[1].isdummy() else None\n map_op = DataFrameIndexAlign(stage=OperandStage.map, sparse=input_chunk.issparse(),\n output_types=[OutputType.dataframe], **kw)\n idx = [None, None]\n idx[align_axis] = align_axis_idx\n idx[shuffle_axis] = j\n map_chunks.append(map_op.new_chunk([input_chunk], shape=(np.nan, np.nan), index=tuple(idx), **chunk_kw))\n proxy_chunk = DataFrameShuffleProxy(\n sparse=df.issparse(), output_types=[OutputType.dataframe]).new_chunk(map_chunks, shape=())\n for j in range(out_shape[shuffle_axis]):\n chunk_kw = dict()\n if align_axis == 0:\n chunk_kw['index_value'] = proxy_chunk.inputs[0].inputs[0].index_value \\\n if splits[0].isdummy() else None\n else:\n chunk_kw['columns_value'] = proxy_chunk.inputs[0].inputs[0].columns_value \\\n if splits[1].isdummy() else None\n reduce_idx = (align_axis_idx, j) if align_axis == 0 else (j, align_axis_idx)\n reduce_op = DataFrameIndexAlign(stage=OperandStage.reduce, i=j, sparse=proxy_chunk.issparse(),\n shuffle_key=','.join(str(idx) for idx in reduce_idx),\n output_types=[OutputType.dataframe])\n out_chunks.append(\n reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=reduce_idx, **chunk_kw))\n out_chunks.sort(key=lambda c: c.index)\n else:\n # all axes need shuffle\n assert splits.no_axis_can_split()\n\n # gen map chunks\n map_chunks = []\n for chunk in df.chunks:\n map_op = DataFrameIndexAlign(\n stage=OperandStage.map, sparse=chunk.issparse(), index_shuffle_size=out_shape[0],\n column_shuffle_size=out_shape[1], output_types=[OutputType.dataframe])\n map_chunks.append(map_op.new_chunk([chunk], shape=(np.nan, np.nan), index=chunk.index))\n\n proxy_chunk = DataFrameShuffleProxy(output_types=[OutputType.dataframe]).new_chunk(\n map_chunks, shape=())\n\n # gen reduce chunks\n for out_idx in itertools.product(*(range(s) for s in out_shape)):\n reduce_op = DataFrameIndexAlign(stage=OperandStage.reduce, i=out_idx,\n sparse=proxy_chunk.issparse(),\n shuffle_key=','.join(str(idx) for idx in out_idx),\n output_types=[OutputType.dataframe])\n out_chunks.append(\n reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=out_idx))\n\n return out_chunks\n\n\ndef align_dataframe_dataframe(left, right):\n left_index_chunks = [c.index_value for c in left.cix[:, 0]]\n left_columns_chunks = [c.columns_value for c in left.cix[0, :]]\n right_index_chunks = [c.index_value for c in right.cix[:, 0]]\n right_columns_chunks = [c.columns_value for c in right.cix[0, :]]\n\n index_splits, index_nsplits = _calc_axis_splits(left.index_value, right.index_value,\n left_index_chunks, right_index_chunks)\n if _is_index_identical(left_index_chunks, right_index_chunks):\n index_nsplits = left.nsplits[0]\n\n columns_splits, columns_nsplits = _calc_axis_splits(left.columns_value, right.columns_value,\n left_columns_chunks, right_columns_chunks)\n if _is_index_identical(left_columns_chunks, right_columns_chunks):\n columns_nsplits = left.nsplits[1]\n\n nsplits = [index_nsplits, columns_nsplits]\n out_chunk_shape = tuple(len(ns) for ns in nsplits)\n splits = _MinMaxSplitInfo(index_splits, columns_splits)\n\n left_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 0, left)\n right_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 1, right)\n\n return nsplits, out_chunk_shape, left_chunks, right_chunks\n\n\ndef align_dataframe_series(left, right, axis='columns'):\n if axis == 'columns' or axis == 1:\n left_columns_chunks = [c.columns_value for c in left.cix[0, :]]\n right_index_chunks = [c.index_value for c in right.chunks]\n index_splits, index_nsplits = _calc_axis_splits(left.columns_value, right.index_value,\n left_columns_chunks, right_index_chunks)\n if _is_index_identical(left_columns_chunks, right_index_chunks):\n index_nsplits = left.nsplits[1]\n dummy_splits, dummy_nsplits = _build_dummy_axis_split(left.chunk_shape[0]), left.nsplits[0]\n nsplits = [dummy_nsplits, index_nsplits]\n out_chunk_shape = tuple(len(ns) for ns in nsplits)\n left_chunks = _gen_dataframe_chunks(_MinMaxSplitInfo(dummy_splits, index_splits), out_chunk_shape, 0, left)\n right_chunks = _gen_series_chunks(_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[1],), 1, right)\n else:\n assert axis == 'index' or axis == 0\n left_index_chunks = [c.index_value for c in left.cix[:, 0]]\n right_index_chunks = [c.index_value for c in right.chunks]\n index_splits, index_nsplits = _calc_axis_splits(left.index_value, right.index_value,\n left_index_chunks, right_index_chunks)\n if _is_index_identical(left_index_chunks, right_index_chunks):\n index_nsplits = left.nsplits[0]\n dummy_splits, dummy_nsplits = _build_dummy_axis_split(left.chunk_shape[1]), left.nsplits[1]\n nsplits = [index_nsplits, dummy_nsplits]\n out_chunk_shape = tuple(len(ns) for ns in nsplits)\n left_chunks = _gen_dataframe_chunks(_MinMaxSplitInfo(index_splits, dummy_splits), out_chunk_shape, 0, left)\n right_chunks = _gen_series_chunks(_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[0],), 1, right)\n\n return nsplits, out_chunk_shape, left_chunks, right_chunks\n\n\ndef align_series_series(left, right):\n left_index_chunks = [c.index_value for c in left.chunks]\n right_index_chunks = [c.index_value for c in right.chunks]\n\n index_splits, index_nsplits = _calc_axis_splits(left.index_value, right.index_value,\n left_index_chunks, right_index_chunks)\n if _is_index_identical(left_index_chunks, right_index_chunks):\n index_nsplits = left.nsplits[0]\n nsplits = [index_nsplits]\n out_chunk_shape = (len(index_nsplits),)\n splits = _MinMaxSplitInfo(index_splits, None)\n\n left_chunks = _gen_series_chunks(splits, out_chunk_shape, 0, left)\n right_chunks = _gen_series_chunks(splits, out_chunk_shape, 1, right)\n\n return nsplits, out_chunk_shape, left_chunks, right_chunks\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport os\nimport sys\nimport unittest\nimport uuid\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom mars import tensor as mt\nfrom mars.serialize.dataserializer import loads\nfrom mars.scheduler.tests.integrated.base import SchedulerIntegratedTest\nfrom mars.actors.core import new_client\nfrom mars.scheduler.graph import GraphState\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](sys.platform == 'win32', \"plasma don't support windows\")\nclass Test(SchedulerIntegratedTest):\n def testCommonOperandFailover(self):\n delay_file = self.add_state_file('OP_DELAY_STATE_FILE')\n open(delay_file, 'w').close()\n\n terminate_file = self.add_state_file('OP_TERMINATE_STATE_FILE')\n\n self.start_processes(modules=['mars.scheduler.tests.integrated.op_delayer'], log_worker=True)\n\n session_id = uuid.uuid1()\n actor_client = new_client()\n session_ref = actor_client.actor_ref(self.session_manager_ref.create_session(session_id))\n\n np_a = np.random.random((100, 100))\n np_b = np.random.random((100, 100))\n\n a = mt.array(np_a, chunk_size=30) * 2 + 1\n b = mt.array(np_b, chunk_size=30) * 2 + 1\n c = a.dot(b) * 2 + 1\n graph = c.build_graph()\n targets = [c.key]\n graph_key = uuid.uuid1()\n session_ref.submit_tileable_graph(\n json.dumps(graph.to_json()), graph_key, target_tileables=targets)\n\n while not os.path.exists(terminate_file):\n actor_client.sleep(0.01)\n\n self.kill_process_tree(self.proc_workers[0])\n logger.warning('Worker %s KILLED!\\n\\n', self.proc_workers[0].pid)\n self.proc_workers = self.proc_workers[1:]\n os.unlink(delay_file)\n\n state = self.wait_for_termination(actor_client, session_ref, graph_key)\n self.assertEqual(state, GraphState.SUCCEEDED)\n\n result = session_ref.fetch_result(graph_key, c.key)\n expected = (np_a * 2 + 1).dot(np_b * 2 + 1) * 2 + 1\n assert_allclose(loads(result), expected)\n\n def testShuffleFailoverBeforeSuccStart(self):\n pred_finish_file = self.add_state_file('SHUFFLE_ALL_PRED_FINISHED_FILE')\n succ_start_file = self.add_state_file('SHUFFLE_START_SUCC_FILE')\n\n self.start_processes(modules=['mars.scheduler.tests.integrated.op_delayer'], log_worker=True)\n\n session_id = uuid.uuid1()\n actor_client = new_client()\n session_ref = actor_client.actor_ref(self.session_manager_ref.create_session(session_id))\n\n a = mt.ones((31, 27), chunk_size=10)\n b = a.reshape(27, 31)\n b.op.extra_params['_reshape_with_shuffle'] = True\n graph = b.build_graph()\n targets = [b.key]\n graph_key = uuid.uuid1()\n session_ref.submit_tileable_graph(json.dumps(graph.to_json()),\n graph_key, target_tileables=targets)\n actor_client.sleep(1)\n\n while not os.path.exists(pred_finish_file):\n actor_client.sleep(0.01)\n\n self.kill_process_tree(self.proc_workers[0])\n logger.warning('Worker %s KILLED!\\n\\n', self.proc_workers[0].pid)\n self.proc_workers = self.proc_workers[1:]\n open(succ_start_file, 'w').close()\n\n state = self.wait_for_termination(actor_client, session_ref, graph_key)\n self.assertEqual(state, GraphState.SUCCEEDED)\n\n result = session_ref.fetch_result(graph_key, b.key)\n assert_allclose(loads(result), np.ones((27, 31)))\n\n def testShuffleFailoverBeforeAllSuccFinish(self):\n pred_finish_file = self.add_state_file('SHUFFLE_ALL_PRED_FINISHED_FILE')\n succ_finish_file = self.add_state_file('SHUFFLE_HAS_SUCC_FINISH_FILE')\n\n self.start_processes(modules=['mars.scheduler.tests.integrated.op_delayer'], log_worker=True)\n\n session_id = uuid.uuid1()\n actor_client = new_client()\n session_ref = actor_client.actor_ref(self.session_manager_ref.create_session(session_id))\n\n a = mt.ones((31, 27), chunk_size=10)\n b = a.reshape(27, 31)\n b.op.extra_params['_reshape_with_shuffle'] = True\n r = mt.inner(b + 1, b + 1)\n graph = r.build_graph()\n targets = [r.key]\n graph_key = uuid.uuid1()\n session_ref.submit_tileable_graph(json.dumps(graph.to_json()),\n graph_key, target_tileables=targets)\n actor_client.sleep(1)\n\n while not os.path.exists(succ_finish_file):\n actor_client.sleep(0.01)\n\n self.kill_process_tree(self.proc_workers[0])\n logger.warning('Worker %s KILLED!\\n\\n', self.proc_workers[0].pid)\n self.proc_workers = self.proc_workers[1:]\n\n os.unlink(pred_finish_file)\n os.unlink(succ_finish_file)\n\n state = self.wait_for_termination(actor_client, session_ref, graph_key)\n self.assertEqual(state, GraphState.SUCCEEDED)\n\n result = session_ref.fetch_result(graph_key, r.key)\n assert_allclose(loads(result), np.inner(np.ones((27, 31)) + 1, np.ones((27, 31)) + 1))\n\n def testShuffleFailoverAfterAllSuccFinish(self):\n all_succ_finish_file = self.add_state_file('SHUFFLE_ALL_SUCC_FINISH_FILE')\n\n self.start_processes(modules=['mars.scheduler.tests.integrated.op_delayer'],\n log_worker=True)\n\n session_id = uuid.uuid1()\n actor_client = new_client()\n session_ref = actor_client.actor_ref(self.session_manager_ref.create_session(session_id))\n\n a = mt.ones((31, 27), chunk_size=10)\n b = a.reshape(27, 31)\n b.op.extra_params['_reshape_with_shuffle'] = True\n r = mt.inner(b + 1, b + 1)\n graph = r.build_graph()\n targets = [r.key]\n graph_key = uuid.uuid1()\n session_ref.submit_tileable_graph(json.dumps(graph.to_json()),\n graph_key, target_tileables=targets)\n actor_client.sleep(1)\n\n while not os.path.exists(all_succ_finish_file):\n actor_client.sleep(0.01)\n\n self.kill_process_tree(self.proc_workers[0])\n logger.warning('Worker %s KILLED!\\n\\n', self.proc_workers[0].pid)\n self.proc_workers = self.proc_workers[1:]\n\n os.unlink(all_succ_finish_file)\n\n state = self.wait_for_termination(actor_client, session_ref, graph_key)\n self.assertEqual(state, GraphState.SUCCEEDED)\n\n result = session_ref.fetch_result(graph_key, r.key)\n assert_allclose(loads(result), np.inner(np.ones((27, 31)) + 1, np.ones((27, 31)) + 1))\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport uuid\nimport weakref\nfrom io import BytesIO\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom mars.errors import StorageFull\nfrom mars.serialize import dataserializer\nfrom mars.tests.core import patch_method\nfrom mars.utils import get_next_port\nfrom mars.worker import WorkerDaemonActor, QuotaActor, MemQuotaActor\nfrom mars.worker.tests.base import WorkerCase\nfrom mars.worker.storage import StorageManagerActor, PlasmaKeyMapActor, SharedHolderActor, \\\n InProcHolderActor, StorageHandler, DataStorageDevice\n\n\ndef mock_transfer_in_global_runner(self, session_id, data_key, src_handler, fallback=None):\n if fallback:\n return fallback()\n\n\n@patch_method(StorageHandler.transfer_in_runner, new=mock_transfer_in_global_runner)\nclass Test(WorkerCase):\n plasma_storage_size = 1024 * 1024 * 10\n\n def testSharedReadAndWrite(self, *_):\n test_addr = '127.0.0.1:%d' % get_next_port()\n io_size = dataserializer.HEADER_LENGTH * 2\n with self.create_pool(n_process=1, address=test_addr) as pool, \\\n self.run_actor_test(pool) as test_actor:\n pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())\n storage_manager_ref = pool.create_actor(\n StorageManagerActor, uid=StorageManagerActor.default_uid())\n\n pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())\n pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid())\n\n data1 = np.random.random((100, 100))\n ser_data1 = dataserializer.serialize(data1)\n\n session_id = str(uuid.uuid4())\n data_key1 = str(uuid.uuid4())\n\n storage_client = test_actor.storage_client\n handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY))\n\n def _write_data(ser, writer):\n self.assertEqual(writer.nbytes, ser_data1.total_bytes)\n with writer:\n ser.write_to(writer)\n\n handler.create_bytes_writer(session_id, data_key1, ser_data1.total_bytes, _promise=True) \\\n .then(functools.partial(_write_data, ser_data1)) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n handler.delete(session_id, [data_key1])\n\n def _write_data(ser, writer):\n with writer:\n for start in range(0, len(ser), io_size):\n writer.write(ser[start:start + io_size])\n\n handler.create_bytes_writer(session_id, data_key1, ser_data1.total_bytes, _promise=True) \\\n .then(functools.partial(_write_data, ser_data1.to_buffer())) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n\n def _read_data_all(reader):\n with reader:\n return dataserializer.deserialize(reader.read())\n\n handler.create_bytes_reader(session_id, data_key1, _promise=True) \\\n .then(_read_data_all) \\\n .then(functools.partial(test_actor.set_result),\n lambda *exc: test_actor.set_result(exc, accept=False))\n assert_allclose(self.get_result(5), data1)\n\n def _read_data_batch(reader):\n bio = BytesIO()\n with reader:\n while True:\n buf = reader.read(io_size)\n if buf:\n bio.write(buf)\n else:\n break\n return dataserializer.deserialize(bio.getvalue())\n\n handler.create_bytes_reader(session_id, data_key1, _promise=True) \\\n .then(_read_data_batch) \\\n .then(functools.partial(test_actor.set_result),\n lambda *exc: test_actor.set_result(exc, accept=False))\n assert_allclose(self.get_result(5), data1)\n handler.delete(session_id, [data_key1])\n\n def testSharedReadAndWritePacked(self, *_):\n test_addr = '127.0.0.1:%d' % get_next_port()\n io_size = dataserializer.HEADER_LENGTH * 2\n with self.create_pool(n_process=1, address=test_addr) as pool, \\\n self.run_actor_test(pool) as test_actor:\n pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())\n storage_manager_ref = pool.create_actor(\n StorageManagerActor, uid=StorageManagerActor.default_uid())\n\n pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())\n pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid())\n\n data1 = np.random.random((100, 100))\n ser_data1 = dataserializer.serialize(data1)\n block_data1 = dataserializer.dumps(data1, compress=dataserializer.CompressType.NONE)\n\n session_id = str(uuid.uuid4())\n data_key1 = str(uuid.uuid4())\n\n storage_client = test_actor.storage_client\n handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY))\n\n def _write_data(ser, writer):\n with writer:\n writer.write(ser)\n\n handler.create_bytes_writer(session_id, data_key1, ser_data1.total_bytes,\n packed=True, _promise=True) \\\n .then(functools.partial(_write_data, block_data1)) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n handler.delete(session_id, [data_key1])\n\n def _write_data(ser, writer):\n with writer:\n with self.assertRaises(IOError):\n writer.write(ser[:1])\n\n for start in range(0, len(ser), io_size):\n writer.write(ser[start:start + io_size])\n\n handler.create_bytes_writer(session_id, data_key1, ser_data1.total_bytes,\n packed=True, _promise=True) \\\n .then(functools.partial(_write_data, block_data1)) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n\n def _read_data_all(reader):\n with reader:\n return dataserializer.loads(reader.read())\n\n handler.create_bytes_reader(session_id, data_key1, packed=True, _promise=True) \\\n .then(_read_data_all) \\\n .then(functools.partial(test_actor.set_result),\n lambda *exc: test_actor.set_result(exc, accept=False))\n assert_allclose(self.get_result(5), data1)\n\n def _read_data_batch(reader):\n bio = BytesIO()\n with reader:\n while True:\n buf = reader.read(io_size)\n if buf:\n bio.write(buf)\n else:\n break\n return dataserializer.loads(bio.getvalue())\n\n handler.create_bytes_reader(session_id, data_key1, packed=True, _promise=True) \\\n .then(_read_data_batch) \\\n .then(functools.partial(test_actor.set_result),\n lambda *exc: test_actor.set_result(exc, accept=False))\n assert_allclose(self.get_result(5), data1)\n handler.delete(session_id, [data_key1])\n\n def testSharedPutAndGet(self, *_):\n test_addr = '127.0.0.1:%d' % get_next_port()\n with self.create_pool(n_process=1, address=test_addr) as pool, \\\n self.run_actor_test(pool) as test_actor:\n pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())\n storage_manager_ref = pool.create_actor(\n StorageManagerActor, uid=StorageManagerActor.default_uid())\n\n pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())\n pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid())\n\n data1 = np.random.random((10, 10))\n data2 = np.random.random((10, 10))\n ser_data2 = dataserializer.serialize(data2)\n bytes_data2 = ser_data2.to_buffer()\n\n session_id = str(uuid.uuid4())\n data_key1 = str(uuid.uuid4())\n data_key2 = str(uuid.uuid4())\n\n storage_client = test_actor.storage_client\n handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY))\n\n handler.put_objects(session_id, [data_key1], [data1])\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n assert_allclose(data1, handler.get_objects(session_id, [data_key1])[0])\n\n handler.delete(session_id, [data_key1])\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]), [])\n with self.assertRaises(KeyError):\n handler.get_objects(session_id, [data_key1])\n\n handler.put_objects(session_id, [data_key2], [ser_data2], serialize=True)\n assert_allclose(data2, handler.get_objects(session_id, [data_key2])[0])\n handler.delete(session_id, [data_key2])\n\n handler.put_objects(session_id, [data_key2], [bytes_data2], serialize=True)\n assert_allclose(data2, handler.get_objects(session_id, [data_key2])[0])\n handler.delete(session_id, [data_key2])\n\n def testSharedLoadFromBytes(self, *_):\n import logging\n logging.basicConfig(level=logging.DEBUG)\n test_addr = '127.0.0.1:%d' % get_next_port()\n with self.create_pool(n_process=1, address=test_addr) as pool, \\\n self.run_actor_test(pool) as test_actor:\n pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())\n storage_manager_ref = pool.create_actor(\n StorageManagerActor, uid=StorageManagerActor.default_uid())\n\n pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid())\n pool.create_actor(InProcHolderActor)\n\n pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())\n pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid())\n\n data1 = np.random.random((10, 10))\n ser_data1 = dataserializer.serialize(data1)\n\n session_id = str(uuid.uuid4())\n data_key1 = str(uuid.uuid4())\n\n storage_client = test_actor.storage_client\n handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY))\n\n # load from bytes io\n disk_handler = storage_client.get_storage_handler((0, DataStorageDevice.DISK))\n with disk_handler.create_bytes_writer(\n session_id, data_key1, ser_data1.total_bytes) as writer:\n ser_data1.write_to(writer)\n\n handler.load_from_bytes_io(session_id, [data_key1], disk_handler) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY), (0, DataStorageDevice.DISK)])\n\n disk_handler.delete(session_id, [data_key1])\n handler.delete(session_id, [data_key1])\n\n # load from bytes io till no capacity\n data_list = [np.random.randint(0, 32767, (655360,), np.int16)\n for _ in range(20)]\n data_keys = [str(uuid.uuid4()) for _ in range(20)]\n for key, data in zip(data_keys, data_list):\n ser_data = dataserializer.serialize(data)\n with disk_handler.create_bytes_writer(\n session_id, key, ser_data.total_bytes) as writer:\n ser_data.write_to(writer)\n\n handler.load_from_bytes_io(session_id, data_keys, disk_handler) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n\n affected_keys = set()\n try:\n self.get_result(5)\n except StorageFull as ex:\n affected_keys.update(ex.affected_keys)\n\n storage_client.delete(session_id, data_keys, [DataStorageDevice.DISK])\n\n self.assertLess(len(affected_keys), len(data_keys))\n self.assertGreater(len(affected_keys), 1)\n for k, size in zip(data_keys, storage_client.get_data_sizes(session_id, data_keys)):\n if k in affected_keys:\n self.assertIsNone(size)\n else:\n self.assertIsNotNone(size)\n\n def testSharedLoadFromObjects(self, *_):\n test_addr = '127.0.0.1:%d' % get_next_port()\n with self.create_pool(n_process=1, address=test_addr) as pool, \\\n self.run_actor_test(pool) as test_actor:\n pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())\n storage_manager_ref = pool.create_actor(\n StorageManagerActor, uid=StorageManagerActor.default_uid())\n\n pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid())\n pool.create_actor(InProcHolderActor)\n\n pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())\n pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid())\n\n data1 = np.random.random((10, 10))\n\n session_id = str(uuid.uuid4())\n data_key1 = str(uuid.uuid4())\n\n storage_client = test_actor.storage_client\n handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY))\n\n # load from object io\n ref_data1 = weakref.ref(data1)\n\n proc_handler = storage_client.get_storage_handler((0, DataStorageDevice.PROC_MEMORY))\n proc_handler.put_objects(session_id, [data_key1], [data1])\n del data1\n\n handler.load_from_object_io(session_id, [data_key1], proc_handler) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]),\n [(0, DataStorageDevice.PROC_MEMORY), (0, DataStorageDevice.SHARED_MEMORY)])\n\n proc_handler.delete(session_id, [data_key1])\n self.assertIsNone(ref_data1())\n handler.delete(session_id, [data_key1])\n\n def testSharedSpill(self, *_):\n test_addr = '127.0.0.1:%d' % get_next_port()\n with self.create_pool(n_process=1, address=test_addr) as pool, \\\n self.run_actor_test(pool) as test_actor:\n pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())\n storage_manager_ref = pool.create_actor(\n StorageManagerActor, uid=StorageManagerActor.default_uid())\n\n pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())\n holder_ref = pool.create_actor(\n SharedHolderActor, self.plasma_storage_size,\n uid=SharedHolderActor.default_uid())\n\n session_id = str(uuid.uuid4())\n data_list = [np.random.randint(0, 32767, (655360,), np.int16)\n for _ in range(20)]\n data_keys = [str(uuid.uuid4()) for _ in range(20)]\n\n storage_client = test_actor.storage_client\n handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY))\n idx = 0\n\n def _fill_data():\n i = 0\n for i, (key, data) in enumerate(zip(data_keys[idx:], data_list)):\n try:\n handler.put_objects(session_id, [key], [data])\n except StorageFull:\n break\n return i + idx\n\n def _do_spill():\n data_size = storage_manager_ref.get_data_sizes(session_id, [data_keys[0]])[0]\n handler.spill_size(2 * data_size) \\\n .then(lambda *_: test_actor.set_result(None),\n lambda *exc: test_actor.set_result(exc, accept=False))\n self.get_result(5)\n\n # test lift data key\n idx = _fill_data()\n handler.lift_data_keys(session_id, [data_keys[0]])\n _do_spill()\n\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[0]])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[1]])[0]),\n [(0, DataStorageDevice.DISK)])\n\n handler.put_objects(session_id, [data_keys[idx]], [data_list[idx]])\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[idx]])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n idx += 1\n\n # test pin data key\n idx = _fill_data()\n holder_ref.lift_data_keys(session_id, [data_keys[0]], last=False)\n pin_token = str(uuid.uuid4())\n pinned_keys = handler.pin_data_keys(session_id, (data_keys[0],), pin_token)\n self.assertIn(data_keys[0], pinned_keys)\n _do_spill()\n\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[0]])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[1]])[0]),\n [(0, DataStorageDevice.DISK)])\n\n handler.put_objects(session_id, [data_keys[idx]], [data_list[idx]])\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[idx]])[0]),\n [(0, DataStorageDevice.SHARED_MEMORY)])\n idx += 1\n\n # test unpin data key\n idx = _fill_data()\n handler.unpin_data_keys(session_id, (data_keys[0],), pin_token)\n _do_spill()\n\n self.assertEqual(list(storage_manager_ref.get_data_locations(session_id, [data_keys[0]])[0]),\n [(0, DataStorageDevice.DISK)])\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections.abc import Sequence\n\nimport numpy as np\ntry:\n from scipy.sparse.base import spmatrix\nexcept ImportError: # pragma: no cover\n spmatrix = None\n\nfrom ... import opcodes as OperandDef\nfrom ... import tensor as mt\nfrom ...core import Base, Entity\nfrom ...serialize import KeyField, BoolField, TupleField, DataTypeField, AnyField, ListField\nfrom ...tensor.core import TensorOrder\nfrom ...tiles import TilesError\nfrom ...utils import recursive_tile\nfrom ..operands import LearnOperand, LearnOperandMixin, OutputType\nfrom ..utils import assert_all_finite\n\n\nclass IsMultilabel(LearnOperand, LearnOperandMixin):\n _op_type_ = OperandDef.IS_MULTILABEL\n\n _y = AnyField('y')\n _unique_y = KeyField('unique_y')\n # for chunk\n _is_y_sparse = BoolField('is_y_sparse')\n\n def __init__(self, y=None, unique_y=None, is_y_sparse=None, **kw):\n super().__init__(_y=y, _unique_y=unique_y,\n _is_y_sparse=is_y_sparse, **kw)\n self.output_types = [OutputType.tensor]\n\n @property\n def y(self):\n return self._y\n\n @property\n def unique_y(self):\n return self._unique_y\n\n @property\n def is_y_sparse(self):\n return self._is_y_sparse\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n if isinstance(self._y, (Base, Entity)):\n self._y = self._inputs[0]\n if self._unique_y is not None:\n self._unique_y = self._inputs[-1]\n\n def __call__(self, y, y_unique=None):\n inputs = [y] if isinstance(y, (Base, Entity)) else []\n if y_unique is not None:\n inputs.append(y_unique)\n return self.new_tileable(inputs, shape=(), dtype=np.dtype(bool),\n order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n y = op.y\n out = op.outputs[0]\n\n if not (hasattr(y, 'shape') and y.ndim == 2 and y.shape[1] > 1):\n result = mt.array(False)._inplace_tile()\n return [result]\n else:\n unique_y = op.unique_y\n assert len(unique_y.chunks) == 1\n unique_y_chunk = unique_y.chunks[0]\n chunk_op = IsMultilabel(unique_y=unique_y_chunk,\n is_y_sparse=y.issparse())\n chunk = chunk_op.new_chunk([unique_y_chunk], dtype=out.dtype,\n order=out.order, index=(0,),\n shape=())\n\n new_op = op.copy()\n params = out.params\n params['nsplits'] = ()\n params['chunks'] = [chunk]\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def execute(cls, ctx, op):\n unique_y = ctx[op.unique_y.key]\n\n if op.is_y_sparse:\n # sparse\n result = (unique_y.size in (0, 1) and\n (unique_y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(unique_y)))\n else:\n # dense\n labels = unique_y\n result = len(labels) < 3 and (unique_y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(labels))\n\n ctx[op.outputs[0].key] = result\n\n\ndef _is_integral_float(y):\n return y.dtype.kind == 'f' and np.all(y.astype(int) == y)\n\n\ndef is_multilabel(y):\n \"\"\" Check if ``y`` is in a multilabel format.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n out : bool,\n Return ``True``, if ``y`` is in a multilabel format, else ```False``.\n\n Examples\n --------\n >>> import mars.tensor as mt\n >>> from mars.learn.utils.multiclass import is_multilabel\n >>> is_multilabel([0, 1, 0, 1]).execute()\n False\n >>> is_multilabel([[1], [0, 2], []]).execute()\n False\n >>> is_multilabel(mt.array([[1, 0], [0, 0]])).execute()\n True\n >>> is_multilabel(mt.array([[1], [0], [0]])).execute()\n False\n >>> is_multilabel(mt.array([[1, 0, 0]])).execute()\n True\n \"\"\"\n if not isinstance(y, (Base, Entity)):\n if hasattr(y, '__array__') or isinstance(y, Sequence):\n y = np.asarray(y)\n if hasattr(y, 'shape'):\n yt = y = mt.asarray(y)\n else:\n yt = None\n else:\n yt = y = mt.tensor(y)\n\n if hasattr(y, 'dtype') and y.dtype != np.object_:\n unique_y = mt.unique(y, aggregate_size=1)\n else:\n unique_y = None\n op = IsMultilabel(y=y, unique_y=unique_y)\n return op(yt, unique_y)\n\n\nclass TypeOfTarget(LearnOperand, LearnOperandMixin):\n __slots__ = ('_unique_y_chunk', '_check_all_finite_chunk')\n _op_type_ = OperandDef.TYPE_OF_TARGET\n\n _y = AnyField('y')\n # for chunks\n _is_multilabel = KeyField('is_multilabel')\n _first_value = KeyField('first_value')\n _check_float = KeyField('check_float')\n _assert_all_finite = KeyField('assert_all_finite')\n _unique_y = KeyField('unique_y')\n _y_shape = TupleField('y_shape')\n _y_dtype = DataTypeField('y_dtype')\n _checked_targets = ListField('checked_targets')\n\n def __init__(self, y=None, is_multilabel=None, first_value=None,\n check_float=None, assert_all_finite=None,\n unique_y=None, y_shape=None, y_dtype=None,\n checked_targets=None, **kw):\n super().__init__(_y=y, _is_multilabel=is_multilabel,\n _first_value=first_value, _check_float=check_float,\n _assert_all_finite=assert_all_finite,\n _unique_y=unique_y, _y_shape=y_shape,\n _y_dtype=y_dtype, _checked_targets=checked_targets, **kw)\n self.output_types = [OutputType.tensor]\n\n @property\n def y(self):\n return self._y\n\n @property\n def is_multilabel(self):\n return self._is_multilabel\n\n @property\n def first_value(self):\n return self._first_value\n\n @property\n def check_float(self):\n return self._check_float\n\n @property\n def assert_all_finite(self):\n return self._assert_all_finite\n\n @property\n def unique_y(self):\n return self._unique_y\n\n @property\n def y_shape(self):\n return self._y_shape\n\n @property\n def y_dtype(self):\n return self._y_dtype\n\n @property\n def checked_targets(self):\n return self._checked_targets\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n inputs_iter = iter(self._inputs)\n for attr in ['_y', '_is_multilabel', '_first_value',\n '_check_float', '_assert_all_finite',\n '_unique_y']:\n v = getattr(self, attr)\n if isinstance(v, (Base, Entity)):\n setattr(self, attr, next(inputs_iter))\n\n def __call__(self, y):\n inputs = [y] if isinstance(y, (Base, Entity)) else []\n return self.new_tileable(inputs, shape=(), order=TensorOrder.C_ORDER,\n dtype=np.dtype(object))\n\n @classmethod\n def tile(cls, op):\n out = op.outputs[0]\n y = op.y\n\n chunk_inputs = []\n is_multilabel_chunk = recursive_tile(is_multilabel(y)).chunks[0]\n chunk_inputs.append(is_multilabel_chunk)\n\n if not isinstance(y, (Base, Entity)):\n if hasattr(y, '__array__'):\n y = np.asarray(y)\n y = mt.asarray(y)\n if np.isnan(y.size): # pragma: no cover\n raise TilesError('y has unknown shape')\n\n chunk_op = TypeOfTarget(is_multilabel=is_multilabel_chunk,\n y_shape=y.shape, y_dtype=y.dtype)\n\n if y.ndim <= 2 and y.size > 0 and y.dtype == object:\n first_value_chunk = recursive_tile(y[(0,) * y.ndim]).chunks[0]\n chunk_inputs.append(first_value_chunk)\n chunk_op._first_value = first_value_chunk\n\n if y.dtype.kind == 'f':\n check_float_chunk = recursive_tile(mt.any(y != y.astype(int))).chunks[0]\n chunk_inputs.append(check_float_chunk)\n chunk_op._check_float = check_float_chunk\n\n assert_all_finite_chunk = recursive_tile(assert_all_finite(y)).chunks[0]\n chunk_inputs.append(assert_all_finite_chunk)\n chunk_op._assert_all_finite = assert_all_finite_chunk\n\n if y.size > 0:\n unique_y_chunk = recursive_tile(mt.unique(y, aggregate_size=1)).chunks[0]\n chunk_inputs.append(unique_y_chunk)\n chunk_op._unique_y = unique_y_chunk\n\n chunk = chunk_op.new_chunk(chunk_inputs, dtype=out.dtype,\n shape=out.shape, order=out.order, index=())\n params = out.params\n params['nsplits'] = ()\n params['chunks'] = [chunk]\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def _execute(cls, ctx, op):\n is_multilabel_ = ctx[op.is_multilabel.key]\n shape = op.y_shape\n ndim = len(shape)\n dtype = op.y_dtype\n\n if is_multilabel_:\n return 'multilabel-indicator'\n\n if ndim > 2 or (dtype == object and shape[0] and\n not isinstance(ctx[op.first_value.key], str)):\n return 'unknown' # [[[1, 2]]] or [obj_1] and not [\"label_1\"]\n\n if ndim == 2 and shape[1] == 0:\n return 'unknown' # [[]]\n\n if ndim == 2 and shape[1] > 1:\n suffix = '-multioutput' # [[1, 2], [1, 2]]\n else:\n suffix = \"\" # [1, 2, 3] or [[1], [2], [3]]\n\n # check float and contains non-integer float values\n if dtype.kind == 'f' and ctx[op.check_float.key]:\n # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]\n assert ctx[op.assert_all_finite.key]\n return 'continuous' + suffix\n\n if op.unique_y is not None:\n unique_y_len = len(ctx[op.unique_y.key])\n else:\n # y.size == 0\n unique_y_len = 0\n if (unique_y_len > 2) or (ndim >= 2 and shape[1] > 1):\n return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]\n else:\n return 'binary' # [1, 2] or [[\"a\"], [\"b\"]]\n\n @classmethod\n def execute(cls, ctx, op):\n target = cls._execute(ctx, op)\n if op.checked_targets is not None and len(op.checked_targets) > 0:\n if target not in op.checked_targets:\n raise ValueError('Unknown label type: {}'.format(target))\n ctx[op.outputs[0].key] = target\n\n\ndef type_of_target(y):\n \"\"\"Determine the type of data indicated by the target.\n\n Note that this type is the most specific type that can be inferred.\n For example:\n\n * ``binary`` is more specific but compatible with ``multiclass``.\n * ``multiclass`` of integers is more specific but compatible with\n ``continuous``.\n * ``multilabel-indicator`` is more specific but compatible with\n ``multiclass-multioutput``.\n\n Parameters\n ----------\n y : array-like\n\n Returns\n -------\n target_type : string\n One of:\n\n * 'continuous': `y` is an array-like of floats that are not all\n integers, and is 1d or a column vector.\n * 'continuous-multioutput': `y` is a 2d tensor of floats that are\n not all integers, and both dimensions are of size > 1.\n * 'binary': `y` contains <= 2 discrete values and is 1d or a column\n vector.\n * 'multiclass': `y` contains more than two discrete values, is not a\n sequence of sequences, and is 1d or a column vector.\n * 'multiclass-multioutput': `y` is a 2d tensor that contains more\n than two discrete values, is not a sequence of sequences, and both\n dimensions are of size > 1.\n * 'multilabel-indicator': `y` is a label indicator matrix, a tensor\n of two dimensions with at least two columns, and at most 2 unique\n values.\n * 'unknown': `y` is array-like but none of the above, such as a 3d\n tensor, sequence of sequences, or a tensor of non-sequence objects.\n\n Examples\n --------\n >>> import mars.tensor as mt\n >>> from mars.learn.utils.multiclass import type_of_target\n >>> type_of_target([0.1, 0.6]).execute()\n 'continuous'\n >>> type_of_target([1, -1, -1, 1]).execute()\n 'binary'\n >>> type_of_target(['a', 'b', 'a']).execute()\n 'binary'\n >>> type_of_target([1.0, 2.0]).execute()\n 'binary'\n >>> type_of_target([1, 0, 2]).execute()\n 'multiclass'\n >>> type_of_target([1.0, 0.0, 3.0]).execute()\n 'multiclass'\n >>> type_of_target(['a', 'b', 'c']).execute()\n 'multiclass'\n >>> type_of_target(mt.array([[1, 2], [3, 1]])).execute()\n 'multiclass-multioutput'\n >>> type_of_target([[1, 2]]).execute()\n 'multiclass-multioutput'\n >>> type_of_target(mt.array([[1.5, 2.0], [3.0, 1.6]])).execute()\n 'continuous-multioutput'\n >>> type_of_target(mt.array([[0, 1], [1, 1]])).execute()\n 'multilabel-indicator'\n \"\"\"\n valid_types = (Sequence, spmatrix) if spmatrix is not None else (Sequence,)\n valid = ((isinstance(y, valid_types) or hasattr(y, '__array__'))\n and not isinstance(y, str))\n\n if not valid:\n raise ValueError('Expected array-like (array or non-string sequence), '\n 'got %r' % y)\n\n sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])\n if sparse_pandas: # pragma: no cover\n raise ValueError(\"y cannot be class 'SparseSeries' or 'SparseArray'\")\n\n if isinstance(y, (Base, Entity)):\n y = mt.tensor(y)\n\n op = TypeOfTarget(y=y)\n return op(y)\n\n\ndef check_classification_targets(y):\n \"\"\"Ensure that target y is of a non-regression type.\n\n Only the following target types (as defined in type_of_target) are allowed:\n 'binary', 'multiclass', 'multiclass-multioutput',\n 'multilabel-indicator', 'multilabel-sequences'\n\n Parameters\n ----------\n y : array-like\n \"\"\"\n y_type = type_of_target(y)\n y_type.op._checked_targets = ['binary', 'multiclass', 'multiclass-multioutput',\n 'multilabel-indicator', 'multilabel-sequences']\n return y_type\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import namedtuple\n\nimport numpy as np\nimport pandas as pd\n\nfrom .... import opcodes\nfrom ....serialize import BoolField, Float64Field\nfrom ..aggregation import BaseDataFrameExpandingAgg\n\n_stage_info = namedtuple('_stage_info', ('map_groups', 'map_sources', 'combine_sources',\n 'combine_columns', 'combine_funcs', 'key_to_funcs',\n 'valid_columns', 'min_periods_func_name'))\n\n_cum_alpha_coeff_func = '_cum_alpha_coeff'\n_cum_square_alpha_coeff_func = '_cum_square_alpha_coeff'\n\n\ndef _add_pred_results(pred_results, local_results, axis=0, alpha=None, order=1,\n alpha_ignore_na=False, pred_exponent=None, alpha_data=None):\n if pred_results[0].ndim == 1:\n df_filler = 0\n else:\n df_filler = pred_results[0].iloc[-1, :].dropna()\n df_filler[:] = 0\n\n new_locals = []\n combine_axis = pred_results[0].ndim - axis - 1\n weight = (1 - alpha) ** order\n pred_coeff = weight ** pred_exponent\n for idx, (pred_result, local_result) in enumerate(zip(pred_results, local_results)):\n local_result.fillna(df_filler, inplace=True)\n pred_result = pred_result.mul(pred_coeff).sum(axis=axis)\n\n if alpha_ignore_na:\n pred_df = pred_result * weight ** alpha_data.notna().cumsum()\n else:\n weights = np.arange(1, len(local_result) + 1)\n if local_result.ndim == 2:\n weights_df = pd.DataFrame(\n np.repeat(weights.reshape((len(local_result), 1)), len(local_result.columns), axis=1),\n columns=local_result.columns, index=local_result.index)\n else:\n weights_df = pd.Series(weights, index=local_result.index)\n weights_df[alpha_data.isna()] = np.nan\n weights_df.ffill(inplace=True)\n weights_df.fillna(0, inplace=True)\n\n weights_df = weight ** weights_df\n pred_df = weights_df.mul(pred_result, axis=combine_axis)\n\n new_locals.append(local_result.add(pred_df, axis=combine_axis))\n return new_locals\n\n\ndef _combine_mean(pred_results, local_results, axis=0, alpha=None, alpha_ignore_na=False,\n pred_exponent=None):\n if pred_results is None:\n return (local_results[0] / local_results[1]).ffill()\n\n alpha_data = local_results[1]\n local_results[0].ffill(inplace=True)\n local_results[1] = alpha_data.ffill()\n\n local_sum_data, local_count_data = local_results\n\n if pred_results is not None:\n local_sum_data, local_count_data = _add_pred_results(\n pred_results, local_results, axis=axis, alpha=alpha, alpha_ignore_na=alpha_ignore_na,\n pred_exponent=pred_exponent, alpha_data=alpha_data\n )\n return local_sum_data / local_count_data\n\n\ndef _combine_var(pred_results, local_results, axis=0, alpha=None, alpha_ignore_na=False,\n pred_exponent=None):\n local_results[0].ffill(inplace=True)\n alpha_data = local_results[1]\n local_results[1] = alpha_data.ffill()\n\n local_results[2].ffill(inplace=True)\n alpha2_data = local_results[3]\n local_results[3] = alpha2_data.ffill()\n\n local_sum_data, local_count_data, local_sum_square, local_count2_data = local_results\n if pred_results is None:\n return (local_sum_square - local_sum_data ** 2 / local_count_data) \\\n / (local_count_data - local_count2_data / local_count_data)\n\n pred_sum_data, pred_count_data, pred_sum_square, pred_count2_data = pred_results\n\n local_count2_data, = _add_pred_results(\n [pred_count2_data], [local_count2_data], axis=axis, alpha=alpha, order=2,\n alpha_ignore_na=alpha_ignore_na, pred_exponent=pred_exponent, alpha_data=alpha_data)\n\n local_sum_square, local_sum_data, local_count_data = \\\n _add_pred_results(\n [pred_sum_square, pred_sum_data, pred_count_data],\n [local_sum_square, local_sum_data, local_count_data],\n axis=axis, alpha=alpha, alpha_ignore_na=alpha_ignore_na,\n pred_exponent=pred_exponent, alpha_data=alpha_data\n )\n\n return (local_sum_square - local_sum_data ** 2 / local_count_data) \\\n / (local_count_data - local_count2_data / local_count_data)\n\n\ndef _combine_std(pred_results, local_results, axis=0, alpha=None, alpha_ignore_na=False,\n pred_exponent=None):\n return np.sqrt(_combine_var(\n pred_results, local_results, axis=axis, alpha=alpha, alpha_ignore_na=alpha_ignore_na,\n pred_exponent=pred_exponent))\n\n\ndef _combine_data_count(pred_results, local_results, axis=0, **__):\n if pred_results is None:\n return local_results[0]\n return local_results[0].add(pred_results[0].sum(), axis=pred_results[0].ndim - axis - 1)\n\n\nclass DataFrameEwmAgg(BaseDataFrameExpandingAgg):\n _op_type_ = opcodes.EWM_AGG\n\n _alpha = Float64Field('alpha')\n _adjust = BoolField('adjust')\n _alpha_ignore_na = BoolField('alpha_ignore_na')\n\n _validate_columns = BoolField('_validate_columns')\n\n _exec_cache = dict()\n\n def __init__(self, alpha=None, adjust=None, alpha_ignore_na=None, validate_columns=None, **kw):\n super().__init__(_alpha=alpha, _adjust=adjust, _alpha_ignore_na=alpha_ignore_na,\n _validate_columns=validate_columns, **kw)\n\n @property\n def alpha(self) -> float:\n return self._alpha\n\n @property\n def adjust(self) -> bool:\n return self._adjust\n\n @property\n def alpha_ignore_na(self) -> bool:\n return self._alpha_ignore_na\n\n @property\n def validate_columns(self) -> bool:\n return self._validate_columns\n\n @classmethod\n def _get_stage_functions(cls, op: \"DataFrameEwmAgg\", func):\n if func == '_data_count':\n return ['_data_count'], _combine_data_count\n elif func == 'mean':\n return ['cumsum', _cum_alpha_coeff_func], _combine_mean\n elif func in {'var', 'std'}:\n return ['cumsum', _cum_alpha_coeff_func, 'cumsum2', _cum_square_alpha_coeff_func], \\\n _combine_var if func == 'var' else _combine_std\n else: # pragma: no cover\n raise NotImplementedError\n\n @classmethod\n def _calc_data_alphas(cls, op: \"DataFrameEwmAgg\", in_data, order):\n exec_cache = cls._exec_cache[op.key]\n cache_key = ('_calc_data_alphas', order, id(in_data))\n try:\n return exec_cache[cache_key]\n except KeyError:\n pass\n\n cum_df = in_data.copy()\n cum_df[cum_df.notna()] = 1\n if not op.alpha_ignore_na:\n cum_df.ffill(inplace=True)\n cum_df = cum_df.cumsum(axis=op.axis) - 1\n if not op.alpha_ignore_na:\n cum_df[in_data.isna()] = np.nan\n\n result = exec_cache[cache_key] = (1 - op.alpha) ** (order * cum_df)\n return result\n\n @classmethod\n def _execute_cum_alpha_coeff(cls, op: \"DataFrameEwmAgg\", in_data, order, final=True):\n exec_cache = cls._exec_cache[op.key]\n cache_key = ('cum_alpha_coeff', order, id(in_data))\n summary = None\n\n try:\n result = exec_cache[cache_key]\n except KeyError:\n alphas = cls._calc_data_alphas(op, in_data, order)\n result = alphas.cumsum()\n exec_cache[cache_key] = result\n\n if final:\n if op.output_agg:\n summary = result.ffill()[-1:]\n return result, summary\n\n @classmethod\n def _execute_cumsum(cls, op: \"DataFrameEwmAgg\", in_data):\n exec_cache = cls._exec_cache[op.key]\n cache_key = ('cumsum', id(in_data))\n summary = None\n\n try:\n result = exec_cache[cache_key]\n except KeyError:\n min_periods = 1 if op.min_periods > 0 else 0\n\n try:\n data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,\n min_periods=min_periods).mean()\n except ValueError:\n in_data = in_data.copy()\n data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,\n min_periods=min_periods).mean()\n\n alpha_sum, _ = op._execute_cum_alpha_coeff(op, in_data, 1, final=False)\n result = exec_cache[cache_key] = data * alpha_sum\n\n if op.output_agg:\n summary = result.ffill()[-1:]\n return result, summary\n\n @classmethod\n def _execute_cumsum2(cls, op: \"DataFrameEwmAgg\", in_data):\n summary = None\n min_periods = 1 if op.min_periods > 0 else 0\n\n try:\n data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,\n min_periods=min_periods).var(bias=True)\n except ValueError:\n in_data = in_data.copy()\n data = in_data.ewm(alpha=op.alpha, ignore_na=op.alpha_ignore_na, adjust=op.adjust,\n min_periods=min_periods).var(bias=True)\n\n alpha_sum, _ = op._execute_cum_alpha_coeff(op, in_data, 1)\n cumsum, _ = op._execute_cumsum(op, in_data)\n result = alpha_sum * data + cumsum ** 2 / alpha_sum\n\n if op.output_agg:\n summary = result.ffill()[-1:]\n\n return result, summary\n\n @classmethod\n def _execute_map_function(cls, op: \"DataFrameEwmAgg\", func, in_data):\n in_data = in_data._get_numeric_data()\n\n summary = None\n min_periods = 1 if op.min_periods > 0 else 0\n if func == '_data_count':\n result = in_data.expanding(min_periods=min_periods).count()\n elif func in (_cum_alpha_coeff_func, _cum_square_alpha_coeff_func):\n order = 1 if func == _cum_alpha_coeff_func else 2\n result, summary = cls._execute_cum_alpha_coeff(op, in_data, order)\n elif func == 'cumsum':\n result, summary = cls._execute_cumsum(op, in_data)\n elif func == 'cumsum2':\n result, summary = cls._execute_cumsum2(op, in_data)\n else: # pragma: no cover\n raise ValueError('Map function %s not supported')\n\n if op.output_agg:\n summary = summary if summary is not None else result.iloc[-1:]\n else:\n summary = None\n return result, summary\n\n @classmethod\n def _execute_map(cls, ctx, op: \"DataFrameEwmAgg\"):\n try:\n cls._exec_cache[op.key] = dict()\n\n super()._execute_map(ctx, op)\n if op.output_agg:\n in_data = ctx[op.inputs[0].key]\n summaries = list(ctx[op.outputs[1].key])\n\n if op.alpha_ignore_na:\n in_count = in_data.count()\n if not isinstance(in_count, pd.Series):\n in_count = pd.Series([in_count])\n summary = in_count\n if in_data.ndim == 2:\n summary = in_count.to_frame().T\n summary.index = summaries[-1].index\n else:\n remain_counts = in_data.notna()[::-1].to_numpy().argmax(axis=0)\n if in_data.ndim > 1:\n remain_counts = remain_counts.reshape((1, len(in_data.columns)))\n summary = pd.DataFrame(remain_counts, columns=in_data.columns, index=summaries[-1].index)\n else:\n summary = pd.Series(remain_counts, index=summaries[-1].index)\n summaries.insert(-1, summary)\n\n ctx[op.outputs[1].key] = tuple(summaries)\n finally:\n cls._exec_cache.pop(op.key, None)\n\n @classmethod\n def _execute_combine_function(cls, op: \"DataFrameEwmAgg\", func, prev_inputs, local_inputs,\n func_cols):\n exec_cache = cls._exec_cache[op.key]\n pred_exponent = exec_cache.get('pred_exponent')\n if func_cols and pred_exponent is not None:\n pred_exponent = pred_exponent[func_cols] if pred_exponent is not None else None\n return func(prev_inputs, local_inputs, axis=op.axis, alpha=op.alpha,\n alpha_ignore_na=op.alpha_ignore_na, pred_exponent=pred_exponent)\n\n @classmethod\n def _execute_combine(cls, ctx, op: \"DataFrameEwmAgg\"):\n try:\n cls._exec_cache[op.key] = dict()\n\n if len(op.inputs) != 1:\n pred_data = ctx[op.inputs[1].key]\n\n if op.alpha_ignore_na:\n pred_exponent = pred_data[-2].shift(-1)[::-1].cumsum()[::-1].fillna(0)\n else:\n succ_counts = pred_data[-1].shift(-1)\n succ_counts.iloc[-1] = 0\n pred_exponent = pred_data[-2].add(succ_counts[::-1].cumsum()[::-1], axis=op.axis)\n\n cls._exec_cache[op.key]['pred_exponent'] = pred_exponent\n\n super()._execute_combine(ctx, op)\n finally:\n cls._exec_cache.pop(op.key, None)\n\n @classmethod\n def _execute_raw_function(cls, op: \"DataFrameEwmAgg\", in_data):\n for _ in range(2):\n ewm = in_data.ewm(alpha=op.alpha, min_periods=op.min_periods, adjust=op.adjust,\n ignore_na=op.alpha_ignore_na)\n try:\n val = ewm.agg(op.func)\n if in_data.ndim == 2 and op.validate_columns \\\n and len(val.columns) != len(op.outputs[0].columns_value.to_pandas()):\n raise ValueError('Columns not consistent')\n return val\n except ValueError:\n in_data = in_data.copy()\n else: # pragma: no cover\n raise ValueError\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport operator\nimport unittest\nfrom functools import reduce\n\nimport pandas as pd\nimport numpy as np\n\nfrom mars import opcodes as OperandDef\nfrom mars.operands import OperandStage\nfrom mars.tests.core import TestBase, parameterized\nfrom mars.tensor import Tensor\nfrom mars.dataframe.core import DataFrame, IndexValue, Series, OutputType\nfrom mars.dataframe.reduction import DataFrameSum, DataFrameProd, DataFrameMin, \\\n DataFrameMax, DataFrameCount, DataFrameMean, DataFrameVar, DataFrameCummin, \\\n DataFrameCummax, DataFrameCumprod, DataFrameCumsum, DataFrameNunique\nfrom mars.dataframe.merge import DataFrameConcat\nfrom mars.dataframe.datasource.series import from_pandas as from_pandas_series\nfrom mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df\n\n\nreduction_functions = dict(\n sum=dict(func_name='sum', op=DataFrameSum, has_skipna=True),\n prod=dict(func_name='prod', op=DataFrameProd, has_skipna=True),\n min=dict(func_name='min', op=DataFrameMin, has_skipna=True),\n max=dict(func_name='max', op=DataFrameMax, has_skipna=True),\n count=dict(func_name='count', op=DataFrameCount, has_skipna=False),\n mean=dict(func_name='mean', op=DataFrameMean, has_skipna=True),\n var=dict(func_name='var', op=DataFrameVar, has_skipna=True),\n)\n\n\n@parameterized(**reduction_functions)\nclass TestReduction(TestBase):\n @property\n def op_name(self):\n return getattr(OperandDef, self.func_name.upper())\n\n def testSeriesReductionSerialize(self):\n data = pd.Series(np.random.rand(10), name='a')\n if self.has_skipna:\n kwargs = dict(axis='index', skipna=False)\n else:\n kwargs = dict()\n reduction_df = getattr(from_pandas_series(data), self.func_name)(**kwargs).tiles()\n\n # pb\n chunk = reduction_df.chunks[0]\n serials = self._pb_serial(chunk)\n op, pb = serials[chunk.op, chunk.data]\n\n self.assertEqual(tuple(pb.index), chunk.index)\n self.assertEqual(pb.key, chunk.key)\n self.assertEqual(tuple(pb.shape), chunk.shape)\n self.assertEqual(int(op.type.split('.', 1)[1]), self.op_name)\n\n chunk2 = self._pb_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n\n # json\n chunk = reduction_df.chunks[0]\n serials = self._json_serial(chunk)\n\n chunk2 = self._json_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n\n def testSeriesReduction(self):\n data = pd.Series({'a': list(range(20))}, index=[str(i) for i in range(20)])\n series = getattr(from_pandas_series(data, chunk_size=3), self.func_name)()\n\n self.assertIsInstance(series, Tensor)\n self.assertEqual(series.shape, ())\n\n series = series.tiles()\n\n self.assertEqual(len(series.chunks), 1)\n self.assertIsInstance(series.chunks[0].op, self.op)\n self.assertIsInstance(series.chunks[0].inputs[0].op, DataFrameConcat)\n self.assertEqual(len(series.chunks[0].inputs[0].inputs), 2)\n\n data = pd.Series(np.random.rand(25), name='a')\n if self.has_skipna:\n kwargs = dict(axis='index', skipna=False)\n else:\n kwargs = dict()\n series = getattr(from_pandas_series(data, chunk_size=7), self.func_name)(**kwargs)\n\n self.assertIsInstance(series, Tensor)\n self.assertEqual(series.shape, ())\n\n series = series.tiles()\n\n self.assertEqual(len(series.chunks), 1)\n self.assertIsInstance(series.chunks[0].op, self.op)\n self.assertIsInstance(series.chunks[0].inputs[0].op, DataFrameConcat)\n self.assertEqual(len(series.chunks[0].inputs[0].inputs), 4)\n\n def testDataFrameReductionSerialize(self):\n data = pd.DataFrame(np.random.rand(10, 8), columns=[np.random.bytes(10) for _ in range(8)])\n kwargs = dict(axis='index', numeric_only=True)\n if self.has_skipna:\n kwargs['skipna'] = False\n reduction_df = getattr(from_pandas_df(data, chunk_size=3), self.func_name)(**kwargs).tiles()\n\n # pb\n chunk = reduction_df.chunks[0]\n serials = self._pb_serial(chunk)\n op, pb = serials[chunk.op, chunk.data]\n\n self.assertEqual(tuple(pb.index), chunk.index)\n self.assertEqual(pb.key, chunk.key)\n self.assertEqual(tuple(pb.shape), chunk.shape)\n self.assertEqual(int(op.type.split('.', 1)[1]), self.op_name)\n\n chunk2 = self._pb_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n self.assertEqual(chunk.op.numeric_only, chunk2.op.numeric_only)\n pd.testing.assert_index_equal(chunk2.index_value.to_pandas(), chunk.index_value.to_pandas())\n\n # json\n chunk = reduction_df.chunks[0]\n serials = self._json_serial(chunk)\n\n chunk2 = self._json_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n self.assertEqual(chunk.op.numeric_only, chunk2.op.numeric_only)\n pd.testing.assert_index_equal(chunk2.index_value.to_pandas(), chunk.index_value.to_pandas())\n\n def testDataFrameReduction(self):\n data = pd.DataFrame({'a': list(range(20)), 'b': list(range(20, 0, -1))},\n index=[str(i) for i in range(20)])\n reduction_df = getattr(from_pandas_df(data, chunk_size=3), self.func_name)()\n\n self.assertIsInstance(reduction_df, Series)\n self.assertIsInstance(reduction_df.index_value._index_value, IndexValue.Index)\n self.assertEqual(reduction_df.shape, (2,))\n\n reduction_df = reduction_df.tiles()\n\n self.assertEqual(len(reduction_df.chunks), 1)\n self.assertIsInstance(reduction_df.chunks[0].op, self.op)\n self.assertIsInstance(reduction_df.chunks[0].inputs[0].op, DataFrameConcat)\n self.assertEqual(len(reduction_df.chunks[0].inputs[0].inputs), 2)\n\n data = pd.DataFrame(np.random.rand(20, 10))\n reduction_df = getattr(from_pandas_df(data, chunk_size=3), self.func_name)()\n\n self.assertIsInstance(reduction_df, Series)\n self.assertIsInstance(reduction_df.index_value._index_value, IndexValue.RangeIndex)\n self.assertEqual(reduction_df.shape, (10,))\n\n reduction_df = reduction_df.tiles()\n\n self.assertEqual(len(reduction_df.chunks), 4)\n self.assertEqual(reduction_df.nsplits, ((3, 3, 3, 1),))\n self.assertIsInstance(reduction_df.chunks[0].op, self.op)\n self.assertIsInstance(reduction_df.chunks[0].inputs[0].op, DataFrameConcat)\n self.assertEqual(len(reduction_df.chunks[0].inputs[0].inputs), 2)\n\n data = pd.DataFrame(np.random.rand(20, 20), index=[str(i) for i in range(20)])\n reduction_df = getattr(from_pandas_df(data, chunk_size=4), self.func_name)(axis='columns')\n\n self.assertEqual(reduction_df.shape, (20,))\n\n reduction_df = reduction_df.tiles()\n\n self.assertEqual(len(reduction_df.chunks), 5)\n self.assertEqual(reduction_df.nsplits, ((4,) * 5,))\n self.assertIsInstance(reduction_df.chunks[0].op, self.op)\n self.assertIsInstance(reduction_df.chunks[0].inputs[0].op, DataFrameConcat)\n self.assertEqual(len(reduction_df.chunks[0].inputs[0].inputs), 2)\n\n\ncum_reduction_functions = dict(\n cummin=dict(func_name='cummin', op=DataFrameCummin, has_skipna=True),\n cummax=dict(func_name='cummax', op=DataFrameCummax, has_skipna=True),\n cumprod=dict(func_name='cumprod', op=DataFrameCumprod, has_skipna=True),\n cumsum=dict(func_name='cumsum', op=DataFrameCumsum, has_skipna=True),\n)\n\n\n@parameterized(**cum_reduction_functions)\nclass TestCumReduction(TestBase):\n @property\n def op_name(self):\n return getattr(OperandDef, self.func_name.upper())\n\n def testSeriesReductionSerialize(self):\n data = pd.Series(np.random.rand(10), name='a')\n if self.has_skipna:\n kwargs = dict(axis='index', skipna=False)\n else:\n kwargs = dict()\n reduction_df = getattr(from_pandas_series(data), self.func_name)(**kwargs).tiles()\n\n # pb\n chunk = reduction_df.chunks[0]\n serials = self._pb_serial(chunk)\n op, pb = serials[chunk.op, chunk.data]\n\n self.assertEqual(tuple(pb.index), chunk.index)\n self.assertEqual(pb.key, chunk.key)\n self.assertEqual(tuple(pb.shape), chunk.shape)\n self.assertEqual(int(op.type.split('.', 1)[1]), self.op_name)\n\n chunk2 = self._pb_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n pd.testing.assert_index_equal(chunk.index_value.to_pandas(), chunk2.index_value.to_pandas())\n\n # json\n chunk = reduction_df.chunks[0]\n serials = self._json_serial(chunk)\n\n chunk2 = self._json_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n pd.testing.assert_index_equal(chunk.index_value.to_pandas(), chunk2.index_value.to_pandas())\n\n def testSeriesReduction(self):\n data = pd.Series({'a': list(range(20))}, index=[str(i) for i in range(20)])\n series = getattr(from_pandas_series(data, chunk_size=3), self.func_name)()\n\n self.assertIsInstance(series, Series)\n self.assertEqual(series.shape, (20,))\n\n series = series.tiles()\n\n self.assertEqual(len(series.chunks), 7)\n self.assertIsInstance(series.chunks[0].op, self.op)\n self.assertEqual(series.chunks[0].op.stage, OperandStage.combine)\n self.assertIsInstance(series.chunks[-1].inputs[-1].op, self.op)\n self.assertEqual(series.chunks[-1].inputs[-1].op.stage, OperandStage.map)\n self.assertEqual(len(series.chunks[-1].inputs), 7)\n\n data = pd.Series(np.random.rand(25), name='a')\n if self.has_skipna:\n kwargs = dict(axis='index', skipna=False)\n else:\n kwargs = dict()\n series = getattr(from_pandas_series(data, chunk_size=7), self.func_name)(**kwargs)\n\n self.assertIsInstance(series, Series)\n self.assertEqual(series.shape, (25,))\n\n series = series.tiles()\n\n self.assertEqual(len(series.chunks), 4)\n self.assertIsInstance(series.chunks[0].op, self.op)\n self.assertEqual(series.chunks[0].op.stage, OperandStage.combine)\n self.assertIsInstance(series.chunks[-1].inputs[-1].op, self.op)\n self.assertEqual(series.chunks[-1].inputs[-1].op.stage, OperandStage.map)\n self.assertEqual(len(series.chunks[-1].inputs), 4)\n\n def testDataFrameReductionSerialize(self):\n data = pd.DataFrame(np.random.rand(10, 8), columns=[np.random.bytes(10) for _ in range(8)])\n kwargs = dict(axis='index')\n if self.has_skipna:\n kwargs['skipna'] = False\n reduction_df = getattr(from_pandas_df(data, chunk_size=3), self.func_name)(**kwargs).tiles()\n\n # pb\n chunk = reduction_df.chunks[0]\n serials = self._pb_serial(chunk)\n op, pb = serials[chunk.op, chunk.data]\n\n self.assertEqual(tuple(pb.index), chunk.index)\n self.assertEqual(pb.key, chunk.key)\n self.assertEqual(tuple(pb.shape), chunk.shape)\n self.assertEqual(int(op.type.split('.', 1)[1]), self.op_name)\n\n chunk2 = self._pb_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n pd.testing.assert_index_equal(chunk2.columns_value.to_pandas(), chunk.columns_value.to_pandas())\n pd.testing.assert_index_equal(chunk2.index_value.to_pandas(), chunk.index_value.to_pandas())\n\n # json\n chunk = reduction_df.chunks[0]\n serials = self._json_serial(chunk)\n\n chunk2 = self._json_deserial(serials)[chunk.data]\n\n self.assertEqual(chunk.index, chunk2.index)\n self.assertEqual(chunk.key, chunk2.key)\n self.assertEqual(chunk.shape, chunk2.shape)\n self.assertEqual(chunk.op.skipna, chunk2.op.skipna)\n self.assertEqual(chunk.op.axis, chunk2.op.axis)\n pd.testing.assert_index_equal(chunk2.columns_value.to_pandas(), chunk.columns_value.to_pandas())\n pd.testing.assert_index_equal(chunk2.index_value.to_pandas(), chunk.index_value.to_pandas())\n\n def testDataFrameReduction(self):\n data = pd.DataFrame({'a': list(range(20)), 'b': list(range(20, 0, -1))},\n index=[str(i) for i in range(20)])\n reduction_df = getattr(from_pandas_df(data, chunk_size=3), self.func_name)()\n\n self.assertIsInstance(reduction_df, DataFrame)\n self.assertIsInstance(reduction_df.index_value._index_value, IndexValue.Index)\n self.assertEqual(reduction_df.shape, (20, 2))\n\n reduction_df = reduction_df.tiles()\n\n self.assertEqual(len(reduction_df.chunks), 7)\n self.assertIsInstance(reduction_df.chunks[0].op, self.op)\n self.assertEqual(reduction_df.chunks[0].op.stage, OperandStage.combine)\n self.assertIsInstance(reduction_df.chunks[-1].inputs[-1].op, self.op)\n self.assertEqual(reduction_df.chunks[-1].inputs[-1].op.stage, OperandStage.map)\n self.assertEqual(len(reduction_df.chunks[-1].inputs), 7)\n\n data = pd.DataFrame(np.random.rand(20, 10))\n reduction_df = getattr(from_pandas_df(data, chunk_size=3), self.func_name)()\n\n self.assertIsInstance(reduction_df, DataFrame)\n self.assertIsInstance(reduction_df.index_value._index_value, IndexValue.RangeIndex)\n self.assertEqual(reduction_df.shape, (20, 10))\n\n reduction_df = reduction_df.tiles()\n\n self.assertEqual(len(reduction_df.chunks), 28)\n self.assertEqual(reduction_df.nsplits, ((3, 3, 3, 3, 3, 3, 2), (3, 3, 3, 1)))\n self.assertEqual(reduction_df.chunks[0].op.stage, OperandStage.combine)\n self.assertIsInstance(reduction_df.chunks[-1].inputs[-1].op, self.op)\n self.assertEqual(reduction_df.chunks[-1].inputs[-1].op.stage, OperandStage.map)\n self.assertEqual(len(reduction_df.chunks[-1].inputs), 7)\n\n def testNunique(self):\n data = pd.DataFrame(np.random.randint(0, 6, size=(20, 10)),\n columns=['c' + str(i) for i in range(10)])\n df = from_pandas_df(data, chunk_size=3)\n result = df.nunique()\n\n self.assertEqual(result.shape, (10,))\n self.assertEqual(result.op.output_types[0], OutputType.series)\n self.assertIsInstance(result.op, DataFrameNunique)\n\n tiled = result.tiles()\n self.assertEqual(tiled.shape, (10,))\n self.assertEqual(len(tiled.chunks), 4)\n self.assertEqual(tiled.nsplits, ((3, 3, 3, 1,),))\n self.assertEqual(tiled.chunks[0].op.stage, OperandStage.agg)\n self.assertIsInstance(tiled.chunks[0].op, DataFrameNunique)\n\n data2 = data.copy()\n df2 = from_pandas_df(data2, chunk_size=3)\n result2 = df2.nunique(axis=1)\n\n self.assertEqual(result2.shape, (20,))\n self.assertEqual(result2.op.output_types[0], OutputType.series)\n self.assertIsInstance(result2.op, DataFrameNunique)\n\n tiled = result2.tiles()\n self.assertEqual(tiled.shape, (20,))\n self.assertEqual(len(tiled.chunks), 7)\n self.assertEqual(tiled.nsplits, ((3, 3, 3, 3, 3, 3, 2,),))\n self.assertEqual(tiled.chunks[0].op.stage, OperandStage.agg)\n self.assertIsInstance(tiled.chunks[0].op, DataFrameNunique)\n\n\nclass TestAggregate(TestBase):\n def testDataFrameAggregate(self):\n data = pd.DataFrame(np.random.rand(20, 19))\n agg_funcs = ['sum', 'min', 'max', 'mean', 'var', 'std']\n\n df = from_pandas_df(data)\n result = df.agg(agg_funcs).tiles()\n self.assertEqual(len(result.chunks), 1)\n self.assertEqual(result.shape, (6, data.shape[1]))\n self.assertListEqual(list(result.columns_value.to_pandas()), list(range(19)))\n self.assertListEqual(list(result.index_value.to_pandas()), agg_funcs)\n self.assertEqual(result.op.output_types[0], OutputType.dataframe)\n self.assertListEqual(result.op.func, agg_funcs)\n\n df = from_pandas_df(data, chunk_size=(3, 4))\n\n result = df.agg('sum').tiles()\n self.assertEqual(len(result.chunks), 5)\n self.assertEqual(result.shape, (data.shape[1],))\n self.assertListEqual(list(result.index_value.to_pandas()), list(range(data.shape[1])))\n self.assertEqual(result.op.output_types[0], OutputType.series)\n self.assertListEqual(result.op.func, ['sum'])\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (4,))\n self.assertListEqual(list(agg_chunk.index_value.to_pandas()), list(range(4)))\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n result = df.agg('sum', axis=1).tiles()\n self.assertEqual(len(result.chunks), 7)\n self.assertEqual(result.shape, (data.shape[0],))\n self.assertListEqual(list(result.index_value.to_pandas()), list(range(data.shape[0])))\n self.assertEqual(result.op.output_types[0], OutputType.series)\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (3,))\n self.assertListEqual(list(agg_chunk.index_value.to_pandas()), list(range(3)))\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n result = df.agg('var', axis=1).tiles()\n self.assertEqual(len(result.chunks), 7)\n self.assertEqual(result.shape, (data.shape[0],))\n self.assertListEqual(list(result.index_value.to_pandas()), list(range(data.shape[0])))\n self.assertEqual(result.op.output_types[0], OutputType.series)\n self.assertListEqual(result.op.func, ['var'])\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (3,))\n self.assertListEqual(list(agg_chunk.index_value.to_pandas()), list(range(3)))\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n result = df.agg(['sum', 'min', 'max', 'mean', 'var', 'std']).tiles()\n self.assertEqual(len(result.chunks), 5)\n self.assertEqual(result.shape, (len(agg_funcs), data.shape[1]))\n self.assertListEqual(list(result.columns_value.to_pandas()), list(range(data.shape[1])))\n self.assertListEqual(list(result.index_value.to_pandas()), agg_funcs)\n self.assertEqual(result.op.output_types[0], OutputType.dataframe)\n self.assertListEqual(result.op.func, agg_funcs)\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (len(agg_funcs), 4))\n self.assertListEqual(list(agg_chunk.columns_value.to_pandas()), list(range(4)))\n self.assertListEqual(list(agg_chunk.index_value.to_pandas()), agg_funcs)\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n result = df.agg(['sum', 'min', 'max', 'mean', 'var', 'std'], axis=1).tiles()\n self.assertEqual(len(result.chunks), 7)\n self.assertEqual(result.shape, (data.shape[0], len(agg_funcs)))\n self.assertListEqual(list(result.columns_value.to_pandas()), agg_funcs)\n self.assertListEqual(list(result.index_value.to_pandas()), list(range(data.shape[0])))\n self.assertEqual(result.op.output_types[0], OutputType.dataframe)\n self.assertListEqual(result.op.func, agg_funcs)\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (3, len(agg_funcs)))\n self.assertListEqual(list(agg_chunk.columns_value.to_pandas()), agg_funcs)\n self.assertListEqual(list(agg_chunk.index_value.to_pandas()), list(range(3)))\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n dict_fun = {0: 'sum', 2: ['var', 'max'], 9: ['mean', 'var', 'std']}\n all_cols = set(reduce(operator.add, [[v] if isinstance(v, str) else v for v in dict_fun.values()]))\n result = df.agg(dict_fun).tiles()\n self.assertEqual(len(result.chunks), 2)\n self.assertEqual(result.shape, (len(all_cols), len(dict_fun)))\n self.assertSetEqual(set(result.columns_value.to_pandas()), set(dict_fun.keys()))\n self.assertSetEqual(set(result.index_value.to_pandas()), all_cols)\n self.assertEqual(result.op.output_types[0], OutputType.dataframe)\n self.assertListEqual(result.op.func[0], [dict_fun[0]])\n self.assertListEqual(result.op.func[2], dict_fun[2])\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (len(all_cols), 2))\n self.assertListEqual(list(agg_chunk.columns_value.to_pandas()), [0, 2])\n self.assertSetEqual(set(agg_chunk.index_value.to_pandas()), all_cols)\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n with self.assertRaises(NotImplementedError):\n df.agg({0: ['sum', 'min', 'var'], 9: ['mean', 'var', 'std']}, axis=1)\n\n def testSeriesAggregate(self):\n data = pd.Series(np.random.rand(20), index=[str(i) for i in range(20)], name='a')\n agg_funcs = ['sum', 'min', 'max', 'mean', 'var', 'std']\n\n series = from_pandas_series(data)\n\n result = series.agg(agg_funcs).tiles()\n self.assertEqual(len(result.chunks), 1)\n self.assertEqual(result.shape, (6,))\n self.assertListEqual(list(result.index_value.to_pandas()), agg_funcs)\n self.assertEqual(result.op.output_types[0], OutputType.series)\n self.assertListEqual(result.op.func, agg_funcs)\n\n series = from_pandas_series(data, chunk_size=3)\n\n result = series.agg('sum').tiles()\n self.assertEqual(len(result.chunks), 1)\n self.assertEqual(result.shape, ())\n self.assertEqual(result.op.output_types[0], OutputType.scalar)\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, ())\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n result = series.agg(['sum', 'min', 'max', 'mean', 'var', 'std']).tiles()\n self.assertEqual(len(result.chunks), 1)\n self.assertEqual(result.shape, (len(agg_funcs),))\n self.assertListEqual(list(result.index_value.to_pandas()), agg_funcs)\n self.assertEqual(result.op.output_types[0], OutputType.series)\n self.assertListEqual(result.op.func, agg_funcs)\n agg_chunk = result.chunks[0]\n self.assertEqual(agg_chunk.shape, (len(agg_funcs),))\n self.assertListEqual(list(agg_chunk.index_value.to_pandas()), agg_funcs)\n self.assertEqual(agg_chunk.op.stage, OperandStage.agg)\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom ..array_utils import as_same_device, device\nfrom .core import TensorReduction, TensorReductionMixin, nannumel\nfrom .mean import TensorMean\n\n\nclass TensorNanMean(TensorReduction, TensorReductionMixin):\n _op_type_ = OperandDef.NANMEAN\n\n def __init__(self, axis=None, dtype=None, keepdims=None, combine_size=None, stage=None, **kw):\n stage = self._rewrite_stage(stage)\n super().__init__(_axis=axis, _dtype=dtype, _keepdims=keepdims,\n _combine_size=combine_size, _stage=stage, **kw)\n\n @classmethod\n def execute_map(cls, ctx, op):\n (in_chunk,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n axis = cls.get_axis(op.axis)\n\n with device(device_id):\n chunk_count = nannumel(in_chunk, axis=axis, dtype=np.int64,\n keepdims=bool(op.keepdims))\n chunk_sum = xp.nansum(in_chunk, axis=axis, dtype=op.dtype,\n keepdims=bool(op.keepdims))\n ctx[op.outputs[0].key] = (chunk_sum, chunk_count)\n\n @classmethod\n def execute_agg(cls, ctx, op):\n axis = cls.get_axis(op.axis)\n\n a = ctx[op.inputs[0].key]\n if not isinstance(a, (list, tuple)):\n (inp,), device_id, xp = as_same_device(\n [a], device=op.device, ret_extra=True)\n\n with device(device_id):\n ctx[op.outputs[0].key] = xp.nanmean(inp, axis=axis, dtype=op.dtype,\n keepdims=bool(op.keepdims))\n else:\n (_data, _count), device_id, xp = as_same_device(\n a, device=op.device, ret_extra=True)\n\n with device(device_id):\n chunk_count = xp.sum(_count, axis=axis, dtype=op.dtype,\n keepdims=bool(op.keepdims))\n chunk_sum = xp.sum(_data, axis=axis, dtype=op.dtype,\n keepdims=bool(op.keepdims))\n ctx[op.outputs[0].key] = xp.true_divide(chunk_sum, chunk_count,\n dtype=op.dtype)\n\n @classmethod\n def execute_combine(cls, ctx, op):\n TensorMean.execute_combine(ctx, op)\n\n\ndef nanmean(a, axis=None, dtype=None, out=None, keepdims=None, combine_size=None):\n \"\"\"\n Compute the arithmetic mean along the specified axis, ignoring NaNs.\n\n Returns the average of the tensor elements. The average is taken over\n the flattened tensor by default, otherwise over the specified axis.\n `float64` intermediate and return values are used for integer inputs.\n\n For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.\n\n Parameters\n ----------\n a : array_like\n Tensor containing numbers whose mean is desired. If `a` is not an\n tensor, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened tensor.\n dtype : data-type, optional\n Type to use in computing the mean. For integer inputs, the default\n is `float64`; for inexact inputs, it is the same as the input\n dtype.\n out : Tensor, optional\n Alternate output tensor in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output, but the type will be cast if necessary. See\n `doc.ufuncs` for details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n\n If the value is anything but the default, then\n `keepdims` will be passed through to the `mean` or `sum` methods\n of sub-classes of `Tensor`. If the sub-classes methods\n does not implement `keepdims` any exceptions will be raised.\n combine_size: int, optional\n The number of chunks to combine.\n\n Returns\n -------\n m : Tensor, see dtype parameter above\n If `out=None`, returns a new array containing the mean values,\n otherwise a reference to the output array is returned. Nan is\n returned for slices that contain only NaNs.\n\n See Also\n --------\n average : Weighted average\n mean : Arithmetic mean taken while not ignoring NaNs\n var, nanvar\n\n Notes\n -----\n The arithmetic mean is the sum of the non-NaN elements along the axis\n divided by the number of non-NaN elements.\n\n Note that for floating-point input, the mean is computed using the same\n precision the input has. Depending on the input data, this can cause\n the results to be inaccurate, especially for `float32`. Specifying a\n higher-precision accumulator using the `dtype` keyword can alleviate\n this issue.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.array([[1, mt.nan], [3, 4]])\n >>> mt.nanmean(a).execute()\n 2.6666666666666665\n >>> mt.nanmean(a, axis=0).execute()\n array([ 2., 4.])\n >>> mt.nanmean(a, axis=1).execute()\n array([ 1., 3.5])\n\n \"\"\"\n a = astensor(a)\n if dtype is None:\n dtype = np.nanmean(np.empty((1,), dtype=a.dtype)).dtype\n op = TensorNanMean(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size)\n return op(a, out=out)\n"
] |
[
[
"numpy.random.RandomState",
"numpy.dtype"
],
[
"pandas.set_option",
"pandas.concat",
"pandas.reset_option",
"numpy.empty"
],
[
"pandas.concat"
],
[
"numpy.random.random",
"numpy.ones"
],
[
"numpy.random.random",
"numpy.random.randint"
],
[
"numpy.isnan",
"numpy.asarray",
"numpy.dtype"
],
[
"pandas.Series",
"pandas.DataFrame"
],
[
"numpy.random.bytes",
"numpy.random.rand",
"numpy.random.randint"
],
[
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hcgcarry/usad
|
[
"4e99a6acd43ef109be4d89b80e96978b9ad61c2f"
] |
[
"dataSetPreprocess/WADI.A1_9 Oct 2017/label_normal.py"
] |
[
"import pandas as pd\n\nnormal = pd.read_csv(\"WADI_normal.csv\")\n \nnormal[\"Normal/Attack\"] = \"Normal\"\n\nnormal.to_csv(\"WADI_normal_2.csv\")"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Pichu123/FRC3636_2017
|
[
"667ae111ea9d710157cffaba72cf7a676a2c5da3"
] |
[
"vision.py"
] |
[
"try:\n import cv2\n import numpy as np\nexcept:\n print(\"Couldn't load cv2\")\nimport math\nimport subprocess\n\njpeg = None\n\ndef cvThread(cX):\n subprocess.call(['v4l2-ctl', '-c', 'exposure_auto=1'])\n subprocess.call(['v4l2-ctl', '-c', 'exposure_absolute=5'])\n subprocess.call(['v4l2-ctl', '-c', 'brightness=10'])\n\n global jpeg\n def findCenter(cnt):\n M = cv2.moments(cnt)\n #print(M[\"m00\"])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (cX, cY)\n\n def expand(rect):\n rect = (rect[0], (abs(rect[1][0]) + 4, abs(rect[1][1]) + 4), rect[2])\n return rect\n\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n frame = cv2.resize(frame, (160, 120))\n frame2 = cv2.resize(frame, (80, 60))\n scaleY = frame.shape[0] / frame2.shape[0]\n scaleX = frame.shape[1] / frame2.shape[1]\n offsetY = 10 * scaleY\n frame2 = frame2[10:,:]\n lower_green = np.array([50,160,27], np.uint8)\n upper_green = np.array([70,255,255], np.uint8)\n\n #gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n hsv_green = cv2.cvtColor(frame2,cv2.COLOR_BGR2HSV)\n thresh= cv2.inRange(hsv_green, lower_green, upper_green)\n\n #ret, thresh = cv2.threshold(hsv_green, 32, 255, cv2.THRESH_BINARY)\n #ret, thresh = cv2.threshold(gray, 32, 255, cv2.THRESH_BINARY)\n im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n contours = [np.int0(cv2.boxPoints(expand(cv2.minAreaRect(contour)))) for contour in contours]\n contours = [(cnt, cv2.contourArea(cnt), findCenter(cnt)) for cnt in contours]\n #contours = filter(lambda cnt: cv2.contourArea(cnt) > 0.0, contours)\n def key(cnt):\n try:\n return cnt[1]\n except:\n return 0.0\n largest_areas = sorted(contours, key=key)\n #cv2.drawContours(frame, largest_areas[-2:], -1, (0,255,0), 3)\n if len(largest_areas) >= 2:\n try:\n pair = largest_areas[-2:]\n #search for pairs\n # pair = None\n # best = None\n # for i in range(0, len(contours)):\n # for j in range(0, len(contours)):\n # if i != j:\n # try:\n # x1, y1 = contours[i][2]\n # x2, y2 = contours[j][2]\n # a1 = contours[i][1]\n # a2 = contours[j][1]\n # if a2 > 8 and a1 > 8:\n # dx = .5 *(x2 - x1)\n # dy = 3.0 * (y2 - y1)\n # score = 0.2 * (abs(x1 - 40) + abs(x2 - 40)) + abs(a1 - a2) + math.sqrt(dx * dx + dy * dy)\n # if pair is not None:\n # if score < best:\n # best = score\n # pair = (contours[i], contours[j])\n # else:\n # pair = (contours[i], contours[j])\n # best = score\n # except Exception as e:\n # print(e)\n # print(\"best: \" + str(best))\n #cv2.drawContours(frame2, [cnt[0] for cnt in contours], -1, (0,0,255), 1)\n #cv2.drawContours(frame2, [pair[0][0], pair[1][0]], -1, (0,255,0), 1)\n x1, y1 = pair[0][2]\n x2, y2 = pair[1][2]\n cv2.circle(frame, (int(x1 * scaleX), int(y1 * scaleY + offsetY)), 4, (255, 0, 0))\n cv2.circle(frame, (int(x2 * scaleX), int(y2 * scaleY + offsetY)), 4, (255, 0, 0))\n cX.cx = (x1 + x2) / 2\n #print(cX.cx)\n cX.cy = (y1 + y2) / 2\n cv2.circle(frame, (int(cX.cx * scaleX), int(cX.cy * scaleY + offsetY)), 4, (255, 0, 255))\n except Exception as e:\n cX.cx = None\n print(e)\n else:\n cX.cx = None\n print(\"No contours\")\n\n jpeg = frame\n\n #cv2.circle(frame, (cX, cY), 7, (255, 255, 255), -1)\n #cv2.putText(frame, \"center\", (cX - 20, cY - 20),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\n cap.release()\n\ndef flaskThread():\n from flask import Flask, render_template, Response\n\n app = Flask(__name__)\n\n @app.route('/')\n def index():\n return render_template('index.html')\n\n def gen():\n while True:\n if jpeg != None:\n ret, tmp = cv2.imencode('.jpg', jpeg)\n jpg = tmp.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + jpg + b'\\r\\n\\r\\n')\n\n @app.route('/video_feed')\n def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n print(\"Running\")\n app.run(host='0.0.0.0', port=5800, debug=False)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
orazve/daal4py
|
[
"8530377aac4c629102f9cad62a569f18d33e4458",
"8530377aac4c629102f9cad62a569f18d33e4458",
"8530377aac4c629102f9cad62a569f18d33e4458"
] |
[
"examples/implicit_als_batch.py",
"examples/kmeans_batch.py",
"examples/math_softmax_batch.py"
] |
[
"#*******************************************************************************\n# Copyright 2014-2019 Intel Corporation\n# All Rights Reserved.\n#\n# This software is licensed under the Apache License, Version 2.0 (the\n# \"License\"), the following terms apply:\n#\n# You may not use this file except in compliance with the License. You may\n# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#*******************************************************************************\n\n# daal4py implicit_als example for shared memory systems\n\nimport daal4py as d4p\nimport numpy as np\n\n# let's try to use pandas' fast csv reader\ntry:\n import pandas\n read_csv = lambda f, c=None, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\nexcept:\n # fall back to numpy loadtxt\n read_csv = lambda f, c=None, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)\n\n\ndef main(readcsv=read_csv, method='defaultDense'):\n nFactors = 2\n infile = \"./data/batch/implicit_als_dense.csv\"\n # We load the data\n data = readcsv(infile)\n\n # configure a implicit_als init object\n algo1 = d4p.implicit_als_training_init(nFactors=nFactors, method=method)\n # and compute initial model\n result1 = algo1.compute(data)\n\n # configure a implicit_als training object\n algo2 = d4p.implicit_als_training(nFactors=nFactors, method=method)\n # and compute model using initial model\n result2 = algo2.compute(data, result1.model)\n\n # Now do some prediction; first get prediction algorithm object\n algo3 = d4p.implicit_als_prediction_ratings(nFactors=nFactors)\n # and compute\n result3 = algo3.compute(result2.model)\n\n # implicit als prediction result objects provide prediction\n assert(result3.prediction.shape == data.shape)\n\n return result3\n\n\nif __name__ == \"__main__\":\n res = main()\n print(\"Predicted ratings:\\n\", res.prediction[:10])\n print('All looks good!')\n",
"#*******************************************************************************\n# Copyright 2014-2019 Intel Corporation\n# All Rights Reserved.\n#\n# This software is licensed under the Apache License, Version 2.0 (the\n# \"License\"), the following terms apply:\n#\n# You may not use this file except in compliance with the License. You may\n# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#*******************************************************************************\n\n# daal4py K-Means example for shared memory systems\n\nimport daal4py as d4p\nimport numpy as np\n\n# let's try to use pandas' fast csv reader\ntry:\n import pandas\n read_csv = lambda f, c, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\nexcept:\n # fall back to numpy loadtxt\n read_csv = lambda f, c, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)\n\n\ndef main(readcsv=read_csv, method='defaultDense'):\n infile = \"./data/batch/kmeans_dense.csv\"\n nClusters = 20\n maxIter = 5\n\n initrain_algo = d4p.kmeans_init(nClusters, method=\"randomDense\")\n # Load the data\n data = readcsv(infile, range(20))\n # compute initial centroids\n initrain_result = initrain_algo.compute(data)\n # The results provides the initial centroids\n assert initrain_result.centroids.shape[0] == nClusters\n\n # configure kmeans main object: we also request the cluster assignments\n algo = d4p.kmeans(nClusters, maxIter, assignFlag=True)\n # compute the clusters/centroids\n result = algo.compute(data, initrain_result.centroids)\n \n # Note: we could have done this in just one line:\n # d4p.kmeans(nClusters, maxIter, assignFlag=True).compute(data, d4p.kmeans_init(nClusters, method=\"plusPlusDense\").compute(data).centroids)\n\n # Kmeans result objects provide assignments (if requested), centroids, goalFunction, nIterations and objectiveFunction\n assert result.centroids.shape[0] == nClusters\n assert result.assignments.shape == (data.shape[0], 1)\n assert result.nIterations <= maxIter\n\n return result\n\n\nif __name__ == \"__main__\":\n result = main()\n print(\"\\nFirst 10 cluster assignments:\\n\", result.assignments[0:10])\n print(\"\\nFirst 10 dimensions of centroids:\\n\", result.centroids[:,0:10])\n print(\"\\nObjective function value:\\n\", result.objectiveFunction)\n print('All looks good!')\n",
"#*******************************************************************************\n# Copyright 2014-2019 Intel Corporation\n# All Rights Reserved.\n#\n# This software is licensed under the Apache License, Version 2.0 (the\n# \"License\"), the following terms apply:\n#\n# You may not use this file except in compliance with the License. You may\n# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#*******************************************************************************\n\n# daal4py softMax example for shared memory systems\n\nimport daal4py as d4p\nimport numpy as np\n\n# let's try to use pandas' fast csv reader\ntry:\n import pandasd\n read_csv = lambda f, c=None, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\nexcept:\n # fall back to numpy loadtxt\n read_csv = lambda f, c=None, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)\n\n\ndef main(readcsv=read_csv, method='defaultDense'):\n infile = \"./data/batch/covcormoments_dense.csv\"\n\n # configure a covariance object\n algo = d4p.math_softmax()\n \n # let's provide a file directly, not a table/array\n result1 = algo.compute(infile)\n\n # We can also load the data ourselfs and provide the numpy array\n data = readcsv(infile)\n result2 = algo.compute(data)\n\n # covariance result objects provide correlation, covariance and mean\n assert np.allclose(result1.value, result2.value)\n\n return result1\n\n\nif __name__ == \"__main__\":\n res = main()\n print(\"SoftMax result (first 5 rows):\\n\", res.value[:5])\n print('All looks good!')\n"
] |
[
[
"pandas.read_csv",
"numpy.loadtxt"
],
[
"pandas.read_csv",
"numpy.loadtxt"
],
[
"numpy.allclose",
"numpy.loadtxt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
peerdavid/capsule-network
|
[
"bb62fc90a2022a05dc7a4b206279f34f6f8316fd"
] |
[
"mnist/capsnet.py"
] |
[
"import os\nimport argparse\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom keras import callbacks, layers, models, optimizers\nfrom keras import backend as K\nfrom keras.utils import to_categorical\nfrom keras.datasets import mnist\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score, precision_score\n\nimport utils\nfrom capsule import PrimaryCaps, CapsuleLayer, Length, Mask, margin_loss, reconstruction_loss\n\n\n#\n# Set defaults\n#\nK.set_image_data_format('channels_last')\n\n\n#\n# Main\n#\ndef main(args):\n # Ensure working dirs\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n\n # Save args into file \n with open(args.save_dir+\"/args.txt\", \"w\") as out:\n out.write(str(args) + \"\\n\")\n \n # Load data\n (x_train, y_train), (x_test, y_test) = load_mnist()\n\n # Cut off training samples\n if(args.max_num_samples is not None):\n x_train = x_train[:args.max_num_samples]\n y_train = y_train[:args.max_num_samples]\n print(\"\\nUsing only %d training samples.\\n\" % len(x_train))\n\n # Create model\n model, eval_model, manipulate_model = create_capsnet(input_shape=x_train.shape[1:],\n n_class=len(np.unique(np.argmax(y_train, 1))),\n num_routing=args.num_routing)\n model.summary()\n\n # Run training / testing\n if args.weights is not None and os.path.exists(args.weights):\n model.load_weights(args.weights)\n print(\"Successfully loaded weights file %s\" % args.weights)\n \n if not args.testing:\n print(\"\\n\" + \"=\" * 40 + \" TRAIN \" + \"=\" * 40)\n train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)\n else:\n print(\"\\n\" + \"=\" * 40 + \" TEST =\" + \"=\" * 40)\n if args.weights is None:\n print('(Warning) No weights are provided, using random initialized weights.')\n\n test(model=eval_model, data=(x_test, y_test), args=args)\n manipulate_latent(manipulate_model, (x_test, y_test), args)\n \n print(\"=\" * 40 + \"=======\" + \"=\" * 40)\n\n\ndef load_mnist():\n # the data, shuffled and split between train and test sets\n \n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.\n x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.\n y_train = to_categorical(y_train.astype('float32'))\n y_test = to_categorical(y_test.astype('float32'))\n return (x_train, y_train), (x_test, y_test)\n\n\ndef create_capsnet(input_shape, n_class, num_routing):\n # Create CapsNet\n x = layers.Input(shape=input_shape)\n conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)\n primary_caps = PrimaryCaps(layer_input=conv1, name='primary_caps', dim_capsule=8, channels=32, kernel_size=9, strides=2)\n digit_caps = CapsuleLayer(num_capsule=n_class, dim_vector=16, num_routing=num_routing)(primary_caps)\n out_caps = Length(name='capsnet')(digit_caps)\n\n # Create decoder\n y = layers.Input(shape=(n_class,))\n masked_by_y = Mask()([digit_caps, y]) # The true label is used to mask the output of capsule layer for training\n masked = Mask()(digit_caps) # Mask using the capsule with maximal length for prediction\n\n # Shared Decoder model in training and prediction\n decoder = models.Sequential(name='decoder')\n decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))\n decoder.add(layers.Dense(1024, activation='relu'))\n decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))\n decoder.add(layers.Reshape(target_shape=input_shape, name='decoder_output'))\n\n # Models for training and evaluation (prediction)\n train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])\n eval_model = models.Model(x, [out_caps, decoder(masked)])\n\n # manipulate model\n noise = layers.Input(shape=(n_class, 16))\n noised_digit_caps = layers.Add()([digit_caps, noise])\n masked_noised_y = Mask()([noised_digit_caps, y])\n manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))\n\n return train_model, eval_model, manipulate_model\n\n\ndef train(model, data, args):\n # unpacking the data\n (x_train, y_train), (x_test, y_test) = data\n\n # callbacks\n log = callbacks.CSVLogger(args.save_dir + '/log.csv')\n tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',\n batch_size=args.batch_size, histogram_freq=int(args.debug))\n checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.hdf5', monitor='val_capsnet_acc',\n save_best_only=True, save_weights_only=True, verbose=1)\n lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))\n\n # compile the model\n model.compile(optimizer=optimizers.Adam(lr=args.lr),\n loss=[margin_loss, reconstruction_loss], # We scale down this reconstruction loss by 0.0005 so that\n loss_weights=[1., args.scale_reconstruction_loss], # ...it does not dominate the margin loss during training.\n metrics={'capsnet': 'accuracy'}) \n\n # Generator with data augmentation as used in [1]\n def train_generator_with_augmentation(x, y, batch_size, shift_fraction=0.):\n train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,\n height_shift_range=shift_fraction) # shift up to 2 pixel for MNIST\n generator = train_datagen.flow(x, y, batch_size=batch_size)\n while 1:\n x_batch, y_batch = generator.next()\n yield ([x_batch, y_batch], [y_batch, x_batch])\n\n generator = train_generator_with_augmentation(x_train, y_train, args.batch_size, args.shift_fraction)\n model.fit_generator(generator=generator,\n steps_per_epoch=int(y_train.shape[0] / args.batch_size),\n epochs=args.epochs,\n validation_data=[[x_test, y_test], [y_test, x_test]], # Note: For the decoder the input is the label and the output the image\n callbacks=[log, tb, checkpoint, lr_decay])\n\n model.save_weights(args.save_dir + '/trained_model.hdf5')\n print('Trained model saved to \\'%s/trained_model.hdf5\\'' % args.save_dir)\n\n utils.plot_log(args.save_dir + '/log.csv', show=True)\n\n return model\n\n\ndef test(model, data, args):\n\n # Create an augmentation function and cache augmented samples\n # to be displayed later\n x_augmented = []\n def test_generator_with_augmentation(x, batch_size, shift_range, rotation_range):\n test_datagen = ImageDataGenerator(width_shift_range=shift_range,\n height_shift_range=shift_range,\n rotation_range=rotation_range)\n generator = test_datagen.flow(x, batch_size=batch_size, shuffle=False)\n while 1:\n x_batch = generator.next()\n x_augmented.extend(x_batch)\n yield (x_batch)\n\n # Run predictions\n test_batch_size = 100\n x_true, y_true = data\n generator = test_generator_with_augmentation(x_true, test_batch_size, args.shift_fraction, args.rotation_range)\n y_pred, x_recon = model.predict_generator(generator=generator, steps=len(x_true) // test_batch_size)\n\n # Print different metrics using the top score\n y_true = np.argmax(y_true, 1)\n y_pred = np.argmax(y_pred, 1)\n\n print('Confusion matrix:\\n', confusion_matrix(y_true, y_pred))\n print('\\nAccuracy: ', accuracy_score(y_true, y_pred))\n print('Recall: ', recall_score(y_true, y_pred, average='weighted'))\n print('Precision: ', precision_score(y_true, y_pred, average='weighted'))\n print('F1-Score: ', f1_score(y_true, y_pred, average='weighted'))\n\n img = utils.combine_images(np.concatenate([x_augmented[:50], x_recon[:50]]))\n image = img * 255\n\n print('\\nReconstructed images are saved to %s/real_and_recon.png' % args.save_dir)\n Image.fromarray(image.astype(np.uint8)).save(args.save_dir + \"/real_and_recon.png\")\n plt.imshow(plt.imread(args.save_dir + \"/real_and_recon.png\"))\n plt.show()\n\n\ndef manipulate_latent(model, data, args):\n x_true, y_true = data\n\n index = np.argmax(y_true, 1) == args.digit\n number = np.random.randint(low=0, high=sum(index) - 1)\n x, y = x_true[index][number], y_true[index][number]\n x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)\n noise = np.zeros([1, 10, 16])\n x_recons = []\n\n # Change params of vect in 0.05 steps. See also [1]\n for dim in range(16):\n for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:\n tmp = np.copy(noise)\n tmp[:,:,dim] = r\n x_recon = model.predict([x, y, tmp])\n x_recons.append(x_recon)\n\n x_recons = np.concatenate(x_recons)\n\n img = utils.combine_images(x_recons, height=16)\n image = img*255\n Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)\n print('Manipulated result saved to %s/manipulate-%d.png' % (args.save_dir, args.digit))\n\n\n#\n# Main\n#\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Capsule Network on MNIST.\")\n parser.add_argument('--epochs', default=50, type=int)\n\n parser.add_argument('--batch_size', default=128, type=int)\n\n parser.add_argument('--max_num_samples', default=None, type=int,\n help=\"Max. number of training examples to use. -1 to use all\")\n\n parser.add_argument('--lr', default=0.001, type=float,\n help=\"Initial learning rate\")\n\n parser.add_argument('--lr_decay', default=0.9, type=float,\n help=\"The value multiplied by lr at each epoch. Set a larger value for larger epochs\")\n\n parser.add_argument('--scale_reconstruction_loss', default=0.0005, type=float,\n help=\"The coefficient for the loss of decoder\")\n\n parser.add_argument('-r', '--num_routing', default=3, type=int,\n help=\"Number of iterations used in routing algorithm. should > 0\")\n\n parser.add_argument('--shift_fraction', default=0.1, type=float,\n help=\"Fraction of pixels to shift at most in each direction.\")\n\n parser.add_argument('--debug', action='store_true',\n help=\"Save weights by TensorBoard\")\n\n parser.add_argument('--save_dir', default='./result-capsnet')\n\n parser.add_argument('-t', '--testing', action='store_true',\n help=\"Test the trained model on testing dataset\")\n \n parser.add_argument('--rotation_range', default=0.0, type=float,\n help=\"(TestOnly) Rotate the test dataset randomly in the given range in degrees.\")\n\n parser.add_argument('--digit', default=5, type=int,\n help=\"Digit to manipulate\")\n\n parser.add_argument('-w', '--weights', default=None,\n help=\"The path of the saved weights. Should be specified when testing\")\n args = parser.parse_args()\n\n main(args)"
] |
[
[
"sklearn.metrics.recall_score",
"numpy.expand_dims",
"matplotlib.pyplot.imread",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix",
"numpy.concatenate",
"numpy.copy",
"numpy.argmax",
"numpy.prod",
"sklearn.metrics.f1_score",
"matplotlib.pyplot.show",
"numpy.zeros",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ariasjose/nn4nlp-code
|
[
"7327ea3e93161afbc8c008e287b646daa802be4d"
] |
[
"06-rnn/sentiment-rnn.py"
] |
[
"from collections import defaultdict\r\nimport time\r\nimport random\r\nimport dynet as dy\r\nimport numpy as np\r\n\r\n# Functions to read in the corpus\r\nw2i = defaultdict(lambda: len(w2i))\r\nt2i = defaultdict(lambda: len(t2i))\r\nUNK = w2i[\"<unk>\"]\r\n\r\n\r\ndef read_dataset(filename):\r\n with open(filename, \"r\") as f:\r\n for line in f:\r\n tag, words = line.lower().strip().split(\" ||| \")\r\n yield ([w2i[x] for x in words.split(\" \")], t2i[tag])\r\n\r\n\r\n# Read in the data\r\ntrain = list(read_dataset(\"../data/classes/train.txt\"))\r\nw2i = defaultdict(lambda: UNK, w2i)\r\ndev = list(read_dataset(\"../data/classes/test.txt\"))\r\nnwords = len(w2i)\r\nntags = len(t2i)\r\n\r\n# Start DyNet and defin trainer\r\nmodel = dy.ParameterCollection()\r\ntrainer = dy.AdamTrainer(model)\r\n\r\n# Define the model\r\nEMB_SIZE = 64\r\nHID_SIZE = 64\r\nW_emb = model.add_lookup_parameters((nwords, EMB_SIZE)) # Word embeddings\r\nfwdLSTM = dy.SimpleRNNBuilder(1, EMB_SIZE, HID_SIZE, model) # Forward LSTM\r\nbwdLSTM = dy.SimpleRNNBuilder(1, EMB_SIZE, HID_SIZE, model) # Backward LSTM\r\nW_sm = model.add_parameters((ntags, 2 * HID_SIZE)) # Softmax weights\r\nb_sm = model.add_parameters((ntags)) # Softmax bias\r\n\r\n\r\n# A function to calculate scores for one value\r\ndef calc_scores(words):\r\n dy.renew_cg()\r\n word_embs = [dy.lookup(W_emb, x) for x in words]\r\n fwd_init = fwdLSTM.initial_state()\r\n fwd_embs = fwd_init.transduce(word_embs)\r\n bwd_init = bwdLSTM.initial_state()\r\n bwd_embs = bwd_init.transduce(reversed(word_embs))\r\n return W_sm * dy.concatenate([fwd_embs[-1], bwd_embs[-1]]) + b_sm\r\n\r\n\r\nfor ITER in range(100):\r\n # Perform training\r\n random.shuffle(train)\r\n train_loss = 0.0\r\n start = time.time()\r\n for words, tag in train:\r\n my_loss = dy.pickneglogsoftmax(calc_scores(words), tag)\r\n train_loss += my_loss.value()\r\n my_loss.backward()\r\n trainer.update()\r\n print(\"iter %r: train loss/sent=%.4f, time=%.2fs\" % (ITER, train_loss / len(train), time.time() - start))\r\n # Perform training\r\n test_correct = 0.0\r\n for words, tag in dev:\r\n scores = calc_scores(words).npvalue()\r\n predict = np.argmax(scores)\r\n if predict == tag:\r\n test_correct += 1\r\n print(\"iter %r: test acc=%.4f\" % (ITER, test_correct / len(dev)))\r\n"
] |
[
[
"numpy.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qbarthelemy/geomstats
|
[
"efe8f0215df802f4dd2816fcac42bd81b7a900d7",
"efe8f0215df802f4dd2816fcac42bd81b7a900d7",
"efe8f0215df802f4dd2816fcac42bd81b7a900d7"
] |
[
"tests/tests_geomstats/test_beta.py",
"tests/tests_geomstats/test_visualization.py",
"tests/tests_geomstats/test_examples.py"
] |
[
"\"\"\"Unit tests for the beta manifold.\"\"\"\n\nimport warnings\n\nfrom scipy.stats import beta\n\nimport geomstats.backend as gs\nimport geomstats.tests\nfrom geomstats.information_geometry.beta import BetaDistributions, BetaMetric\n\n\nclass TestBetaDistributions(geomstats.tests.TestCase):\n \"\"\"Class defining the beta distributions tests.\"\"\"\n\n def setUp(self):\n \"\"\"Define the parameters of the tests.\"\"\"\n warnings.simplefilter(\"ignore\", category=UserWarning)\n self.beta = BetaDistributions()\n self.metric = BetaMetric()\n self.n_samples = 10\n self.dim = self.beta.dim\n\n def test_random_uniform_and_belongs(self):\n \"\"\"Test random_uniform and belongs.\n\n Test that the random uniform method samples\n on the beta distribution space.\n \"\"\"\n point = self.beta.random_point()\n result = self.beta.belongs(point)\n expected = True\n self.assertAllClose(expected, result)\n\n def test_random_uniform_and_belongs_vectorization(self):\n \"\"\"Test random_uniform and belongs.\n\n Test that the random uniform method samples\n on the beta distribution space.\n \"\"\"\n n_samples = self.n_samples\n point = self.beta.random_point(n_samples)\n result = self.beta.belongs(point)\n expected = gs.array([True] * n_samples)\n self.assertAllClose(expected, result)\n\n def test_random_uniform(self):\n \"\"\"Test random_uniform.\n\n Test that the random uniform method samples points of the right shape\n \"\"\"\n point = self.beta.random_point(self.n_samples)\n self.assertAllClose(gs.shape(point), (self.n_samples, self.dim))\n\n def test_sample(self):\n \"\"\"Test samples.\n\n Test that the sample method samples variates from beta distributions\n with the specified parameters, using the law of large numbers\n \"\"\"\n n_samples = self.n_samples\n tol = (n_samples * 10) ** (-0.5)\n point = self.beta.random_point(n_samples)\n samples = self.beta.sample(point, n_samples * 10)\n result = gs.mean(samples, axis=1)\n expected = point[:, 0] / gs.sum(point, axis=1)\n self.assertAllClose(result, expected, rtol=tol, atol=tol)\n\n def test_maximum_likelihood_fit(self):\n \"\"\"Test maximum likelihood.\n\n Test that the maximum likelihood fit method recovers\n parameters of beta distribution.\n \"\"\"\n n_samples = self.n_samples\n point = self.beta.random_point(n_samples)\n samples = self.beta.sample(point, n_samples * 10)\n fits = self.beta.maximum_likelihood_fit(samples)\n expected = self.beta.belongs(fits)\n result = gs.array([True] * n_samples)\n self.assertAllClose(result, expected)\n\n @geomstats.tests.np_and_autograd_only\n def test_exp(self):\n \"\"\"Test Exp.\n\n Test that the Riemannian exponential at points on the first\n bisector computed in the direction of the first bisector stays\n on the first bisector.\n \"\"\"\n gs.random.seed(123)\n n_samples = self.n_samples\n points = self.beta.random_point(n_samples)\n vectors = self.beta.random_point(n_samples)\n initial_vectors = gs.array([[vec_x, vec_x] for vec_x in vectors[:, 0]])\n points = gs.array([[param_a, param_a] for param_a in points[:, 0]])\n result_points = self.metric.exp(initial_vectors, points)\n result = gs.isclose(result_points[:, 0], result_points[:, 1]).all()\n expected = gs.array([True] * n_samples)\n self.assertAllClose(expected, result)\n\n @geomstats.tests.np_and_autograd_only\n def test_log_and_exp(self):\n \"\"\"Test Log and Exp.\n\n Test that the Riemannian exponential\n and the Riemannian logarithm are inverse.\n\n Expect their composition to give the identity function.\n \"\"\"\n n_samples = self.n_samples\n gs.random.seed(123)\n base_point = self.beta.random_point(n_samples=n_samples, bound=5)\n point = self.beta.random_point(n_samples=n_samples, bound=5)\n log = self.metric.log(point, base_point, n_steps=500)\n expected = point\n result = self.metric.exp(tangent_vec=log, base_point=base_point)\n self.assertAllClose(result, expected, rtol=1e-2)\n\n @geomstats.tests.np_and_autograd_only\n def test_exp_vectorization(self):\n \"\"\"Test vectorization of Exp.\n\n Test the case with one initial point and several tangent vectors.\n \"\"\"\n point = self.beta.random_point()\n tangent_vec = gs.array([1.0, 2.0])\n n_tangent_vecs = 10\n t = gs.linspace(0.0, 1.0, n_tangent_vecs)\n tangent_vecs = gs.einsum(\"i,...k->...ik\", t, tangent_vec)\n end_points = self.metric.exp(tangent_vec=tangent_vecs, base_point=point)\n result = end_points.shape\n expected = (n_tangent_vecs, 2)\n self.assertAllClose(result, expected)\n\n @geomstats.tests.np_and_autograd_only\n def test_log_vectorization(self):\n \"\"\"Test vectorization of Log.\n\n Test the case with several base points and one end point.\n \"\"\"\n n_points = 10\n base_points = self.beta.random_point(n_samples=n_points)\n point = self.beta.random_point()\n tangent_vecs = self.metric.log(base_point=base_points, point=point)\n result = tangent_vecs.shape\n expected = (n_points, 2)\n self.assertAllClose(result, expected)\n\n @geomstats.tests.np_autograd_and_tf_only\n def test_christoffels_vectorization(self):\n \"\"\"Test Christoffel synbols.\n\n Check vectorization of Christoffel symbols.\n \"\"\"\n points = self.beta.random_point(self.n_samples)\n christoffel = self.metric.christoffels(points)\n result = christoffel.shape\n expected = gs.array([self.n_samples, self.dim, self.dim, self.dim])\n self.assertAllClose(result, expected)\n\n def test_metric_matrix(self):\n \"\"\"Test metric matrix.\n\n Check the value of the metric matrix for a particular\n point in the space of beta distributions.\"\"\"\n point = gs.array([1.0, 1.0])\n result = self.beta.metric.metric_matrix(point)\n expected = gs.array([[1.0, -0.644934066], [-0.644934066, 1.0]])\n self.assertAllClose(result, expected)\n self.assertRaises(ValueError, self.beta.metric.metric_matrix)\n\n def test_point_to_pdf(self):\n \"\"\"Test point_to_pdf.\n\n Check vectorization of the computation of the pdf.\n \"\"\"\n point = self.beta.random_point(n_samples=2)\n pdf = self.beta.point_to_pdf(point)\n x = gs.linspace(0.0, 1.0, 10)\n result = pdf(x)\n pdf1 = beta.pdf(x, a=point[0, 0], b=point[0, 1])\n pdf2 = beta.pdf(x, a=point[1, 0], b=point[1, 1])\n expected = gs.stack([gs.array(pdf1), gs.array(pdf2)], axis=1)\n self.assertAllClose(result, expected)\n",
"\"\"\"Unit tests for visualization.\"\"\"\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport geomstats.backend as gs\nimport geomstats.tests\nimport geomstats.visualization as visualization\nfrom geomstats.geometry.hyperbolic import Hyperbolic\nfrom geomstats.geometry.hypersphere import Hypersphere\nfrom geomstats.geometry.matrices import Matrices\nfrom geomstats.geometry.poincare_half_space import PoincareHalfSpace\nfrom geomstats.geometry.pre_shape import PreShapeSpace\nfrom geomstats.geometry.special_euclidean import SpecialEuclidean\nfrom geomstats.geometry.special_orthogonal import (\n SpecialOrthogonal,\n _SpecialOrthogonalMatrices,\n)\n\nmatplotlib.use(\"Agg\") # NOQA\n\n\nclass TestVisualization(geomstats.tests.TestCase):\n def setUp(self):\n self.n_samples = 10\n self.SO3_GROUP = SpecialOrthogonal(n=3, point_type=\"vector\")\n self.SE3_GROUP = SpecialEuclidean(n=3, point_type=\"vector\")\n self.S1 = Hypersphere(dim=1)\n self.S2 = Hypersphere(dim=2)\n self.H2 = Hyperbolic(dim=2)\n self.H2_half_plane = PoincareHalfSpace(dim=2)\n self.M32 = Matrices(m=3, n=2)\n self.S32 = PreShapeSpace(k_landmarks=3, m_ambient=2)\n self.KS = visualization.KendallSphere()\n self.M33 = Matrices(m=3, n=3)\n self.S33 = PreShapeSpace(k_landmarks=3, m_ambient=3)\n self.KD = visualization.KendallDisk()\n\n plt.figure()\n\n @staticmethod\n def test_tutorial_matplotlib():\n visualization.tutorial_matplotlib()\n\n def test_plot_points_so3(self):\n points = self.SO3_GROUP.random_uniform(self.n_samples)\n visualization.plot(points, space=\"SO3_GROUP\")\n\n def test_plot_points_se3(self):\n points = self.SE3_GROUP.random_point(self.n_samples)\n visualization.plot(points, space=\"SE3_GROUP\")\n\n def test_draw_pre_shape_2d(self):\n self.KS.draw()\n\n def test_draw_points_pre_shape_2d(self):\n points = self.S32.random_point(self.n_samples)\n visualization.plot(points, space=\"S32\")\n points = self.M32.random_point(self.n_samples)\n visualization.plot(points, space=\"M32\")\n self.KS.clear_points()\n\n def test_draw_curve_pre_shape_2d(self):\n self.KS.draw()\n base_point = self.S32.random_point()\n vec = self.S32.random_point()\n tangent_vec = self.S32.to_tangent(vec, base_point)\n times = gs.linspace(0.0, 1.0, 1000)\n speeds = gs.array([-t * tangent_vec for t in times])\n points = self.S32.ambient_metric.exp(speeds, base_point)\n self.KS.add_points(points)\n self.KS.draw_curve()\n self.KS.clear_points()\n\n def test_draw_vector_pre_shape_2d(self):\n self.KS.draw()\n base_point = self.S32.random_point()\n vec = self.S32.random_point()\n tangent_vec = self.S32.to_tangent(vec, base_point)\n self.KS.draw_vector(tangent_vec, base_point)\n\n def test_convert_to_spherical_coordinates_pre_shape_2d(self):\n points = self.S32.random_point(self.n_samples)\n coords = self.KS.convert_to_spherical_coordinates(points)\n x = coords[:, 0]\n y = coords[:, 1]\n z = coords[:, 2]\n result = x ** 2 + y ** 2 + z ** 2\n expected = 0.25 * gs.ones(self.n_samples)\n self.assertAllClose(result, expected)\n\n def test_rotation_pre_shape_2d(self):\n theta = gs.random.rand(1)[0]\n phi = gs.random.rand(1)[0]\n rot = self.KS.rotation(theta, phi)\n result = _SpecialOrthogonalMatrices(3).belongs(rot)\n expected = True\n self.assertAllClose(result, expected)\n\n def test_draw_pre_shape_3d(self):\n self.KD.draw()\n\n def test_draw_points_pre_shape_3d(self):\n points = self.S33.random_point(self.n_samples)\n visualization.plot(points, space=\"S33\")\n points = self.M33.random_point(self.n_samples)\n visualization.plot(points, space=\"M33\")\n self.KD.clear_points()\n\n def test_draw_curve_pre_shape_3d(self):\n self.KD.draw()\n base_point = self.S33.random_point()\n vec = self.S33.random_point()\n tangent_vec = self.S33.to_tangent(vec, base_point)\n tangent_vec = 0.5 * tangent_vec / self.S33.ambient_metric.norm(tangent_vec)\n times = gs.linspace(0.0, 1.0, 1000)\n speeds = gs.array([-t * tangent_vec for t in times])\n points = self.S33.ambient_metric.exp(speeds, base_point)\n self.KD.add_points(points)\n self.KD.draw_curve()\n self.KD.clear_points()\n\n def test_draw_vector_pre_shape_3d(self):\n self.KS.draw()\n base_point = self.S32.random_point()\n vec = self.S32.random_point()\n tangent_vec = self.S32.to_tangent(vec, base_point)\n self.KS.draw_vector(tangent_vec, base_point)\n\n def test_convert_to_planar_coordinates_pre_shape_3d(self):\n points = self.S33.random_point(self.n_samples)\n coords = self.KD.convert_to_planar_coordinates(points)\n x = coords[:, 0]\n y = coords[:, 1]\n radius = x ** 2 + y ** 2\n result = [r <= 1.0 for r in radius]\n self.assertTrue(gs.all(result))\n\n @geomstats.tests.np_autograd_and_torch_only\n def test_plot_points_s1(self):\n points = self.S1.random_uniform(self.n_samples)\n visualization.plot(points, space=\"S1\")\n\n def test_plot_points_s2(self):\n points = self.S2.random_uniform(self.n_samples)\n visualization.plot(points, space=\"S2\")\n\n def test_plot_points_h2_poincare_disk(self):\n points = self.H2.random_point(self.n_samples)\n visualization.plot(points, space=\"H2_poincare_disk\")\n\n def test_plot_points_h2_poincare_half_plane_ext(self):\n points = self.H2.random_point(self.n_samples)\n visualization.plot(\n points, space=\"H2_poincare_half_plane\", point_type=\"extrinsic\"\n )\n\n def test_plot_points_h2_poincare_half_plane_none(self):\n points = self.H2_half_plane.random_point(self.n_samples)\n visualization.plot(points, space=\"H2_poincare_half_plane\")\n\n def test_plot_points_h2_poincare_half_plane_hs(self):\n points = self.H2_half_plane.random_point(self.n_samples)\n visualization.plot(\n points, space=\"H2_poincare_half_plane\", point_type=\"half_space\"\n )\n\n def test_plot_points_h2_klein_disk(self):\n points = self.H2.random_point(self.n_samples)\n visualization.plot(points, space=\"H2_klein_disk\")\n\n @staticmethod\n def test_plot_points_se2():\n points = SpecialEuclidean(n=2, point_type=\"vector\").random_point(4)\n visu = visualization.SpecialEuclidean2(points, point_type=\"vector\")\n ax = visu.set_ax()\n visu.draw_points(ax)\n",
"\"\"\"Unit tests for the examples.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport warnings\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport examples.empirical_frechet_mean_uncertainty_sn as empirical_frechet_mean_uncertainty_sn # NOQA\nimport examples.geodesic_regression_hypersphere as geodesic_regression_hypersphere # NOQA\nimport examples.geodesic_regression_se2 as geodesic_regression_se2\nimport examples.gradient_descent_s2 as gradient_descent_s2\nimport examples.kalman_filter as kalman_filter\nimport examples.learning_graph_structured_data_h2 as learning_gsd_h2\nimport examples.loss_and_gradient_se3 as loss_and_gradient_se3\nimport examples.loss_and_gradient_so3 as loss_and_gradient_so3\nimport examples.plot_bch_so3 as plot_bch_so3\nimport examples.plot_expectation_maximization_ball as plot_em_manifolds\nimport examples.plot_geodesics_h2 as plot_geodesics_h2\nimport examples.plot_geodesics_poincare_polydisk as plot_geodesics_poincare_polydisk # NOQA\nimport examples.plot_geodesics_s2 as plot_geodesics_s2\nimport examples.plot_geodesics_se2 as plot_geodesics_se2\nimport examples.plot_geodesics_se3 as plot_geodesics_se3\nimport examples.plot_geodesics_so3 as plot_geodesics_so3\nimport examples.plot_grid_h2 as plot_grid_h2\nimport examples.plot_kernel_density_estimation_classifier_s2 as plot_kernel_density_estimation_classifier_s2 # NOQA\nimport examples.plot_kmeans_manifolds as plot_kmeans_manifolds\nimport examples.plot_kmedoids_manifolds as plot_kmedoids_manifolds\nimport examples.plot_knn_s2 as plot_knn_s2\nimport examples.plot_online_kmeans_s1 as plot_online_kmeans_s1\nimport examples.plot_online_kmeans_s2 as plot_online_kmeans_s2\nimport examples.plot_pole_ladder_s2 as plot_pole_ladder_s2\nimport examples.plot_square_h2_klein_disk as plot_square_h2_klein_disk\nimport examples.plot_square_h2_poincare_disk as plot_square_h2_poincare_disk\nimport examples.plot_square_h2_poincare_half_plane as plot_square_h2_poincare_half_plane # NOQA\nimport examples.tangent_pca_h2 as tangent_pca_h2\nimport examples.tangent_pca_s2 as tangent_pca_s2\nimport examples.tangent_pca_so3 as tangent_pca_so3\nimport geomstats.backend as gs\nimport geomstats.tests\n\nmatplotlib.use(\"Agg\") # NOQA\n\n\nclass TestExamples(geomstats.tests.TestCase):\n @classmethod\n def setUpClass(cls):\n sys.stdout = open(os.devnull, \"w\")\n\n @staticmethod\n def setUp():\n gs.random.seed(1234)\n logger = logging.getLogger()\n logger.disabled = True\n warnings.simplefilter(\"ignore\", category=ImportWarning)\n warnings.simplefilter(\"ignore\", category=UserWarning)\n plt.rcParams.update({\"figure.max_open_warning\": 0})\n plt.figure()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_empirical_frechet_mean_uncertainty_sn():\n empirical_frechet_mean_uncertainty_sn.main()\n\n @staticmethod\n @geomstats.tests.autograd_tf_and_torch_only\n def test_geodesic_regression_hypersphere():\n geodesic_regression_hypersphere.main()\n\n @staticmethod\n @geomstats.tests.autograd_and_tf_only\n def test_geodesic_regression_se2():\n geodesic_regression_se2.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_gradient_descent_s2():\n gradient_descent_s2.main(max_iter=64, output_file=None)\n\n @staticmethod\n def test_loss_and_gradient_so3():\n loss_and_gradient_so3.main()\n\n @staticmethod\n def test_loss_and_gradient_se3():\n loss_and_gradient_se3.main()\n\n @staticmethod\n def test_kalman_filter():\n kalman_filter.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_learning_graph_structured_data_h2():\n learning_gsd_h2.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_bch_so3():\n plot_bch_so3.main()\n\n @staticmethod\n def test_plot_geodesics_h2():\n plot_geodesics_h2.main()\n\n @staticmethod\n def test_plot_geodesics_poincare_polydisk():\n plot_geodesics_poincare_polydisk.main()\n\n @staticmethod\n def test_plot_geodesics_s2():\n plot_geodesics_s2.main()\n\n @staticmethod\n def test_plot_geodesics_se3():\n plot_geodesics_se3.main()\n\n @staticmethod\n def test_plot_geodesics_so3():\n plot_geodesics_so3.main()\n\n @staticmethod\n def test_plot_grid_h2():\n plot_grid_h2.main()\n\n @staticmethod\n def test_plot_square_h2_square_poincare_disk():\n plot_square_h2_poincare_disk.main()\n\n @staticmethod\n def test_plot_square_h2_square_poincare_half_plane():\n plot_square_h2_poincare_half_plane.main()\n\n @staticmethod\n def test_plot_square_h2_square_klein_disk():\n plot_square_h2_klein_disk.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_tangent_pca_s2():\n tangent_pca_h2.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_tangent_pca_h2():\n tangent_pca_s2.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_tangent_pca_so3():\n tangent_pca_so3.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_kernel_density_estimation_classifier_s2():\n plot_kernel_density_estimation_classifier_s2.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_kmeans_manifolds():\n plot_kmeans_manifolds.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_kmedoids_manifolds():\n plot_kmedoids_manifolds.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_em_manifolds():\n plot_em_manifolds.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_knn_s2():\n plot_knn_s2.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_online_kmeans_s1():\n plot_online_kmeans_s1.main()\n\n @staticmethod\n @geomstats.tests.np_and_autograd_only\n def test_plot_online_kmeans_s2():\n plot_online_kmeans_s2.main()\n\n @staticmethod\n def test_plot_pole_ladder_s2():\n plot_pole_ladder_s2.main()\n\n @staticmethod\n def test_plot_geodesics_se2():\n plot_geodesics_se2.main()\n"
] |
[
[
"scipy.stats.beta.pdf"
],
[
"matplotlib.use",
"matplotlib.pyplot.figure"
],
[
"matplotlib.use",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EmmaUnimelb/open_spiel
|
[
"680c712b4a2e4812348758407a54210a7110b747"
] |
[
"open_spiel/python/algorithms/tabular_qlearner.py"
] |
[
"# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tabular Q-learning agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\n\nfrom open_spiel.python import rl_agent\nfrom open_spiel.python import rl_tools\n\ndef valuedict():\n return collections.defaultdict(float)\n\nclass QLearner(rl_agent.AbstractAgent):\n \"\"\"Tabular Q-Learning agent.\n\n See open_spiel/python/examples/tic_tac_toe_qlearner.py for an usage example.\n \"\"\"\n\n def __init__(self,\n player_id,\n num_actions,\n step_size=0.1,\n epsilon_schedule=rl_tools.ConstantSchedule(0.2),\n discount_factor=1.0,\n centralized=False):\n \"\"\"Initialize the Q-Learning agent.\"\"\"\n self._player_id = player_id\n self._num_actions = num_actions\n self._step_size = step_size\n self._epsilon_schedule = epsilon_schedule\n self._epsilon = epsilon_schedule.value\n self._discount_factor = discount_factor\n self._centralized = centralized\n self._q_values = collections.defaultdict(valuedict)\n self._prev_info_state = None\n self._last_loss_value = None\n\n def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n \"\"\"Returns a valid epsilon-greedy action and valid action probs.\n\n If the agent has not been to `info_state`, a valid random action is chosen.\n\n Args:\n info_state: hashable representation of the information state.\n legal_actions: list of actions at `info_state`.\n epsilon: float, prob of taking an exploratory action.\n\n Returns:\n A valid epsilon-greedy action and valid action probabilities.\n \"\"\"\n probs = np.zeros(self._num_actions)\n greedy_q = max([self._q_values[info_state][a] for a in legal_actions])\n greedy_actions = [\n a for a in legal_actions if self._q_values[info_state][a] == greedy_q\n ]\n probs[legal_actions] = epsilon / len(legal_actions)\n probs[greedy_actions] += (1 - epsilon) / len(greedy_actions)\n action = np.random.choice(range(self._num_actions), p=probs)\n return action, probs\n\n def step(self, time_step, is_evaluation=False):\n \"\"\"Returns the action to be taken and updates the Q-values if needed.\n\n Args:\n time_step: an instance of rl_environment.TimeStep.\n is_evaluation: bool, whether this is a training or evaluation call.\n\n Returns:\n A `rl_agent.StepOutput` containing the action probs and chosen action.\n \"\"\"\n if self._centralized:\n info_state = str(time_step.observations[\"info_state\"])\n else:\n info_state = str(time_step.observations[\"info_state\"][self._player_id])\n legal_actions = time_step.observations[\"legal_actions\"][self._player_id]\n\n # Prevent undefined errors if this agent never plays until terminal step\n action, probs = None, None\n\n # Act step: don't act at terminal states.\n if not time_step.last():\n epsilon = 0.0 if is_evaluation else self._epsilon\n action, probs = self._epsilon_greedy(\n info_state, legal_actions, epsilon=epsilon)\n\n # Learn step: don't learn during evaluation or at first agent steps.\n if self._prev_info_state and not is_evaluation:\n target = time_step.rewards[self._player_id]\n if not time_step.last(): # Q values are zero for terminal.\n target += self._discount_factor * max(\n [self._q_values[info_state][a] for a in legal_actions])\n\n prev_q_value = self._q_values[self._prev_info_state][self._prev_action]\n self._last_loss_value = target - prev_q_value\n self._q_values[self._prev_info_state][self._prev_action] += (\n self._step_size * self._last_loss_value)\n\n # Decay epsilon, if necessary.\n self._epsilon = self._epsilon_schedule.step()\n\n if time_step.last(): # prepare for the next episode.\n self._prev_info_state = None\n return\n\n # Don't mess up with the state during evaluation.\n if not is_evaluation:\n self._prev_info_state = info_state\n self._prev_action = action\n return rl_agent.StepOutput(action=action, probs=probs)\n\n @property\n def loss(self):\n return self._last_loss_value\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jnyborg/timematch
|
[
"a652df95282de9a3fc12d2fd204f438ff4ccb122"
] |
[
"timematch.py"
] |
[
"from torch.utils.data.sampler import WeightedRandomSampler\nimport sklearn.metrics\nfrom collections import Counter\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nfrom dataset import PixelSetData\nfrom evaluation import validation\nfrom transforms import (\n Normalize,\n RandomSamplePixels,\n RandomSampleTimeSteps,\n ToTensor,\n RandomTemporalShift,\n Identity,\n)\nfrom utils.focal_loss import FocalLoss\nfrom utils.train_utils import AverageMeter, to_cuda, cycle\n\n\n\ndef train_timematch(student, config, writer, val_loader, device, best_model_path, fold_num, splits):\n source_loader, target_loader_no_aug, target_loader = get_data_loaders(splits, config, config.balance_source)\n\n # Setup model\n pretrained_path = f\"{config.weights}/fold_{fold_num}\"\n pretrained_weights = torch.load(f\"{pretrained_path}/model.pt\")[\"state_dict\"]\n student.load_state_dict(pretrained_weights)\n teacher = deepcopy(student)\n student.to(device)\n teacher.to(device)\n\n # Training setup\n global_step, best_f1 = 0, 0\n if config.use_focal_loss:\n criterion = FocalLoss(gamma=config.focal_loss_gamma)\n else:\n criterion = torch.nn.CrossEntropyLoss()\n\n steps_per_epoch = config.steps_per_epoch\n\n optimizer = torch.optim.Adam(student.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.epochs * steps_per_epoch, eta_min=0)\n\n source_iter = iter(cycle(source_loader))\n target_iter = iter(cycle(target_loader))\n min_shift, max_shift = -config.max_temporal_shift, config.max_temporal_shift\n target_to_source_shift = 0\n\n # To evaluate how well we estimate class distribution\n target_labels = target_loader_no_aug.dataset.get_labels()\n actual_class_distr = estimate_class_distribution(target_labels, config.num_classes)\n\n # estimate an initial guess for shift using Inception Score\n if config.estimate_shift:\n shift_estimator = 'IS' if config.shift_estimator == 'AM' else config.shift_estimator\n target_to_source_shift = estimate_temporal_shift(teacher, target_loader_no_aug, device, min_shift=min_shift, max_shift=max_shift, sample_size=config.sample_size, shift_estimator=shift_estimator)\n if target_to_source_shift >= 0:\n min_shift = 0\n else:\n max_shift = 0\n\n # Use estimated shift to get initial pseudo labels\n pseudo_softmaxes = get_pseudo_labels(teacher, target_loader_no_aug, device, target_to_source_shift, n=None)\n all_pseudo_labels = torch.max(pseudo_softmaxes, dim=1)[1]\n\n source_to_target_shift = 0\n for epoch in range(config.epochs):\n progress_bar = tqdm(range(steps_per_epoch), desc=f\"TimeMatch Epoch {epoch + 1}/{config.epochs}\")\n loss_meter = AverageMeter()\n\n if config.estimate_shift:\n estimated_class_distr = estimate_class_distribution(all_pseudo_labels, config.num_classes)\n writer.add_scalar(\"train/kl_d\", kl_divergence(actual_class_distr, estimated_class_distr), epoch)\n target_to_source_shift = estimate_temporal_shift(teacher,\n target_loader_no_aug, device, estimated_class_distr,\n min_shift=min_shift, max_shift=max_shift, sample_size=config.sample_size,\n shift_estimator=config.shift_estimator)\n if epoch == 0:\n if config.shift_source:\n source_to_target_shift = -target_to_source_shift\n else:\n source_to_target_shift = 0\n min_shift, max_shift = min(target_to_source_shift, 0), max(0, target_to_source_shift)\n writer.add_scalar(\"train/temporal_shift\", target_to_source_shift, epoch)\n\n student.train()\n teacher.eval() # don't update BN or use dropout for teacher\n\n all_labels, all_pseudo_labels, all_pseudo_mask = [], [], []\n for step in progress_bar:\n sample_source, (sample_target_weak, sample_target_strong) = next(source_iter), next(target_iter)\n\n # Get pseudo labels from teacher\n pixels_t_weak, mask_t_weak, position_t_weak, extra_t_weak = to_cuda(sample_target_weak, device)\n with torch.no_grad():\n teacher_preds = F.softmax(teacher.forward(pixels_t_weak, mask_t_weak, position_t_weak + target_to_source_shift, extra_t_weak), dim=1)\n pseudo_conf, pseudo_targets = torch.max(teacher_preds, dim=1)\n pseudo_mask = pseudo_conf > config.pseudo_threshold\n\n # Update student on shifted source data and pseudo-labeled target data\n pixels_s, mask_s, position_s, extra_s = to_cuda(sample_source, device)\n source_labels = sample_source['label'].cuda(device, non_blocking=True)\n pixels_t, mask_t, position_t, extra_t = to_cuda(sample_target_strong, device)\n logits_target = None\n loss_target = 0.0\n if config.domain_specific_bn:\n logits_source = student.forward(pixels_s, mask_s, position_s + source_to_target_shift, extra_s)\n if len(torch.nonzero(pseudo_mask)) >= 2: # at least 2 examples required for BN\n logits_target = student.forward(pixels_t[pseudo_mask], mask_t[pseudo_mask], position_t[pseudo_mask], extra_t[pseudo_mask])\n else:\n pixels = torch.cat([pixels_s, pixels_t[pseudo_mask]])\n mask = torch.cat([mask_s, mask_t[pseudo_mask]])\n position = torch.cat([position_s + source_to_target_shift, position_t[pseudo_mask]])\n extra = torch.cat([extra_s, extra_t[pseudo_mask]])\n logits = student.forward(pixels, mask, position, extra)\n logits_source, logits_target = logits[:config.batch_size], logits[config.batch_size:]\n\n loss_source = criterion(logits_source, source_labels)\n if logits_target is not None:\n loss_target = criterion(logits_target, pseudo_targets[pseudo_mask])\n loss = loss_source + config.trade_off * loss_target\n\n # compute loss and backprop\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n update_ema_variables(student, teacher, config.ema_decay)\n\n # Metrics\n loss_meter.update(loss.item())\n progress_bar.set_postfix(loss=f\"{loss_meter.avg:.3f}\")\n all_labels.extend(sample_target_weak['label'].tolist())\n all_pseudo_labels.extend(pseudo_targets.tolist())\n all_pseudo_mask.extend(pseudo_mask.tolist())\n\n if step % config.log_step == 0:\n writer.add_scalar(\"train/loss\", loss_meter.val, global_step)\n writer.add_scalar(\"train/lr\", optimizer.param_groups[0][\"lr\"], global_step)\n writer.add_scalar(\"train/target_updates\", len(torch.nonzero(pseudo_mask)), global_step)\n\n global_step += 1\n\n progress_bar.close()\n\n # Evaluate pseudo labels\n all_labels, all_pseudo_labels, all_pseudo_mask = np.array(all_labels), np.array(all_pseudo_labels), np.array(all_pseudo_mask)\n pseudo_count = all_pseudo_mask.sum()\n conf_pseudo_f1 = sklearn.metrics.f1_score(all_labels[all_pseudo_mask], all_pseudo_labels[all_pseudo_mask], average='macro', zero_division=0)\n print(f\"Teacher pseudo label F1 {conf_pseudo_f1:.3f} (n={pseudo_count})\")\n writer.add_scalar(\"train/pseudo_f1\", conf_pseudo_f1, epoch)\n writer.add_scalar(\"train/pseudo_count\", pseudo_count, epoch)\n\n writer.add_scalar(\"train/pseudo_f1\", conf_pseudo_f1, epoch)\n writer.add_scalar(\"train/pseudo_count\", pseudo_count, epoch)\n\n if config.run_validation:\n if config.output_student:\n student.eval()\n best_f1 = validation(best_f1, None, config, criterion, device, epoch, student, val_loader, writer)\n else:\n teacher.eval()\n best_f1 = validation(best_f1, None, config, criterion, device, epoch, teacher, val_loader, writer)\n\n # Save model final model \n if config.output_student:\n torch.save({'state_dict': student.state_dict()}, best_model_path)\n else:\n torch.save({'state_dict': teacher.state_dict()}, best_model_path)\n\ndef estimate_class_distribution(labels, num_classes):\n return np.bincount(labels, minlength=num_classes) / len(labels)\n\ndef kl_divergence(actual, estimated):\n return np.sum(actual * (np.log(actual + 1e-5) - np.log(estimated + 1e-5)))\n\[email protected]_grad()\ndef update_ema_variables(model, ema, decay=0.99):\n for ema_v, model_v in zip(ema.state_dict().values(), model.state_dict().values()):\n ema_v.copy_(decay * ema_v + (1. - decay) * model_v)\n\n\ndef get_data_loaders(splits, config, balance_source=True):\n weak_aug = transforms.Compose([\n RandomSamplePixels(config.num_pixels),\n Normalize(),\n ToTensor(),\n ])\n\n strong_aug = transforms.Compose([\n RandomSamplePixels(config.num_pixels),\n RandomSampleTimeSteps(config.seq_length),\n RandomTemporalShift(max_shift=config.max_shift_aug, p=config.shift_aug_p) if config.with_shift_aug else Identity(),\n Normalize(),\n ToTensor(),\n ])\n\n source_dataset = PixelSetData(config.data_root, config.source,\n config.classes, strong_aug,\n indices=splits[config.source]['train'],)\n\n if balance_source:\n source_labels = source_dataset.get_labels()\n freq = Counter(source_labels)\n class_weight = {x: 1.0 / freq[x] for x in freq}\n source_weights = [class_weight[x] for x in source_labels]\n sampler = WeightedRandomSampler(source_weights, len(source_labels))\n print(\"using balanced loader for source\")\n source_loader = data.DataLoader(\n source_dataset,\n num_workers=config.num_workers,\n pin_memory=True,\n sampler=sampler,\n batch_size=config.batch_size,\n drop_last=True,\n )\n else:\n source_loader = data.DataLoader(\n source_dataset,\n num_workers=config.num_workers,\n pin_memory=True,\n batch_size=config.batch_size,\n shuffle=True,\n drop_last=True,\n )\n\n target_dataset = PixelSetData(config.data_root, config.target,\n config.classes, None,\n indices=splits[config.target]['train'])\n\n strong_dataset = deepcopy(target_dataset)\n strong_dataset.transform = strong_aug\n weak_dataset = deepcopy(target_dataset)\n weak_dataset.transform = weak_aug\n target_dataset_weak_strong = TupleDataset(weak_dataset, strong_dataset)\n\n no_aug_dataset = deepcopy(target_dataset)\n no_aug_dataset.transform = weak_aug\n # For shift estimation\n target_loader_no_aug = data.DataLoader(\n no_aug_dataset,\n num_workers=config.num_workers,\n batch_size=config.batch_size,\n shuffle=True,\n )\n\n # For mean teacher training\n target_loader_weak_strong = data.DataLoader(\n target_dataset_weak_strong,\n num_workers=config.num_workers,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True,\n drop_last=True,\n )\n\n print(f'size of source dataset: {len(source_dataset)} ({len(source_loader)} batches)')\n print(f'size of target dataset: {len(target_dataset)} ({len(target_loader_weak_strong)} batches)')\n\n return source_loader, target_loader_no_aug, target_loader_weak_strong\n\n\nclass TupleDataset(data.Dataset):\n def __init__(self, dataset1, dataset2):\n super().__init__()\n self.weak = dataset1\n self.strong = dataset2\n assert len(dataset1) == len(dataset2)\n self.len = len(dataset1)\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n return (self.weak[index], self.strong[index])\n\n\[email protected]_grad()\ndef estimate_temporal_shift(model, target_loader, device, class_distribution=None, min_shift=-60, max_shift=60, sample_size=100, shift_estimator='IS'):\n shifts = list(range(min_shift, max_shift + 1))\n model.eval()\n if sample_size is None:\n sample_size = len(target_loader)\n\n target_iter = iter(target_loader)\n shift_softmaxes, labels = [], []\n for _ in tqdm(range(sample_size), desc=f'Estimating shift between [{min_shift}, {max_shift}]'):\n sample = next(target_iter)\n labels.extend(sample['label'].tolist())\n pixels, valid_pixels, positions, extra = to_cuda(sample, device)\n spatial_feats = model.spatial_encoder.forward(pixels, valid_pixels, extra)\n shift_logits = torch.stack([model.decoder(model.temporal_encoder(spatial_feats, positions + shift)) for shift in shifts], dim=1)\n shift_probs = F.softmax(shift_logits, dim=2)\n shift_softmaxes.append(shift_probs)\n shift_softmaxes = torch.cat(shift_softmaxes).cpu().numpy() # (N, n_shifts, n_classes)\n labels = np.array(labels)\n shift_predictions = np.argmax(shift_softmaxes, axis=2) # (N, n_shifts)\n\n # shift_f1_scores = [f1_score(labels, shift_predictions, num_classes) for shift_predictions in all_shift_predictions]\n shift_acc_scores = [(labels == predictions).mean() for predictions in np.moveaxis(shift_predictions, 0, 1)]\n print(f\"Most accurate shift {shifts[np.argmax(shift_acc_scores)]} with {np.max(shift_acc_scores):.3f}\")\n\n p_yx = shift_softmaxes # (N, n_shifts, n_classes)\n p_y = shift_softmaxes.mean(axis=0) # (n_shifts, n_classes)\n\n\n if shift_estimator == 'IS':\n inception_score = np.mean(np.sum(p_yx * (np.log(p_yx + 1e-5) - np.log(p_y[np.newaxis] + 1e-5)), axis=2), axis=0) # (n_shifts)\n\n shift_indices_ranked = np.argsort(inception_score)[::-1] # max is best\n best_shift_idx = shift_indices_ranked[0]\n best_shift = shifts[best_shift_idx]\n print(f\"Best Inception Score shift {best_shift} with accuracy {shift_acc_scores[best_shift_idx]:.3f}\")\n return best_shift\n\n elif shift_estimator == 'ENT':\n entropy_score = -np.mean(np.sum(p_yx * np.log(p_yx + 1e-5), axis=2), axis=0) # (n_shifts)\n shift_indices_ranked = np.argsort(entropy_score) # min is best\n best_shift_idx = shift_indices_ranked[0]\n best_shift = shifts[best_shift_idx]\n print(f\"Best Entropy Score shift {best_shift} with accuracy {shift_acc_scores[best_shift_idx]:.3f}\")\n return best_shift\n\n elif shift_estimator == 'AM':\n assert class_distribution is not None, 'Target class distribution required to compute AM score'\n\n # estimate class distribution\n one_hot_p_y = np.zeros_like(p_y)\n for i in range(len(shifts)):\n one_hot = np.zeros((shift_softmaxes.shape[0], shift_softmaxes.shape[-1])) # (n, classes)\n one_hot[np.arange(one_hot.shape[0]), shift_predictions[:, i]] = 1\n one_hot_p_y[i] = one_hot.mean(axis=0)\n\n c_train = class_distribution\n # kl_d = np.sum(c_train * (np.log(c_train + 1e-5) - np.log(p_y + 1e-5)), axis=1) # soft class distr\n kl_d = np.sum(c_train * (np.log(c_train + 1e-5) - np.log(one_hot_p_y + 1e-5)), axis=1)\n entropy = np.mean(np.sum(-p_yx * np.log(p_yx + 1e-5), axis=2), axis=0)\n am = kl_d + entropy\n shift_indices_ranked = np.argsort(am) # min is best\n best_shift_idx = shift_indices_ranked[0]\n best_shift = shifts[best_shift_idx]\n print(f\"Best AM Score shift {best_shift} with accuracy {shift_acc_scores[best_shift_idx]:.3f}\")\n\n return best_shift\n elif shift_estimator == 'ACC': # for upperbound comparison\n shift_indices_ranked = np.argsort(shift_acc_scores)[::-1] # max is best\n return shifts[np.argmax(shift_acc_scores)]\n else:\n raise NotImplementedError\n\n\n\n\[email protected]_grad()\ndef get_pseudo_labels(model, data_loader, device, best_shift, n=500):\n model.eval()\n pseudo_softmaxes = []\n indices = []\n for i, sample in enumerate(tqdm(data_loader, \"computing pseudo labels\")):\n if n is not None and i == n:\n break\n indices.extend(sample[\"index\"].tolist())\n\n pixels, valid_pixels, positions, extra = to_cuda(sample, device)\n logits = model.forward(pixels, valid_pixels, positions + best_shift, extra)\n probs = F.softmax(logits, dim=1).cpu()\n pseudo_softmaxes.extend(probs.tolist())\n\n indices = torch.as_tensor(indices)\n pseudo_softmaxes = torch.as_tensor(pseudo_softmaxes)\n pseudo_softmaxes = pseudo_softmaxes[torch.argsort(indices)]\n\n return pseudo_softmaxes\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.max",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.load",
"torch.cat",
"torch.utils.data.DataLoader",
"numpy.max",
"torch.no_grad",
"numpy.zeros_like",
"numpy.moveaxis",
"torch.nn.CrossEntropyLoss",
"numpy.arange",
"numpy.argmax",
"torch.nonzero",
"torch.argsort",
"numpy.zeros",
"numpy.log",
"numpy.argsort",
"numpy.array",
"torch.as_tensor",
"numpy.bincount"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
verryshadow/ExpDesign_SetRank
|
[
"3e3d22e52bb551bb5d80cac499ba43751cd18d7e"
] |
[
"code/SetRank/autoSetRank_ESR.py"
] |
[
"import argparse\nimport sys\nfrom collections import Counter\nfrom collections import defaultdict\nimport itertools\nimport time\nimport numpy as np\nimport math\nimport pickle\nfrom scipy import stats\n\nimport setRank_ESR\n\ndef string2dict(s):\n d = {ele.split(\":\")[0]: float(ele.split(\":\")[1]) for ele in s.split(\",\")}\n return d\n\ndef dict2string(d):\n s = \",\".join(str(k)+\":\"+str(d[k]) for k in d)\n return s\n\ndef multiSetRank(query_words_string, query_entities_string, kb, params_set, DEBUG=False):\n bulk = []\n for params in params_set:\n retrieval_query = setRank_ESR.generate_retrieval_query(\n query_string=query_words_string, entity_string=query_entities_string, field_weights=params, DEBUG=DEBUG\n )\n rescore_query = setRank_ESR.generate_rescore_query(\n query_string=query_words_string, entity_string=query_entities_string, kb=kb, params=params, DEBUG=DEBUG\n )\n search_body = {\n \"size\": 20,\n \"_source\": [\"docno\"],\n \"query\": retrieval_query,\n \"rescore\": {\n \"window_size\": 1000,\n \"query\": {\n \"rescore_query\": rescore_query,\n \"query_weight\": 0,\n \"rescore_query_weight\": 1\n }\n }\n }\n op_dict = {\"index\": setRank_ESR.FLAGS_INDEX_NAME, \"type\": setRank_ESR.FLAGS_TYPE_NAME}\n bulk.append(op_dict)\n bulk.append(search_body)\n\n start = time.time()\n resp = setRank_ESR.es.msearch(body=bulk, request_timeout=600)[\"responses\"]\n end = time.time()\n print(\"Finish retrieve %s pre-rankers' results using %s seconds\" % (len(bulk)/2, (end-start)))\n\n rankings = []\n for res in resp:\n ranking = [hit[\"_source\"][\"docno\"] for hit in res[\"hits\"][\"hits\"]]\n rankings.append(ranking)\n\n return rankings\n\ndef rankAggregate(doc_rankings, maxIters=10, distanceMetric='KT', checkConverge=False, DEBUG=False):\n ## Step 1: Construct the document pool\n docCounter = sorted(Counter(itertools.chain(*doc_rankings)).items(), key=lambda x: -x[1])\n docno2docid = {} # docid aligns with the frequency of docno in all ranking list\n docid2docno = {}\n for idx, ele in enumerate(docCounter): # notice: a small docid indicates most frequent documents\n docno2docid[ele[0]] = idx\n docid2docno[idx] = ele[0]\n rankings = []\n docid2positions = defaultdict(list) # docid -> [(position in rank list, len of rank list)]\n for i, doc_ranking in enumerate(doc_rankings):\n ranking = []\n k = len(doc_ranking) # current rank list i is of length k\n for j, docno in enumerate(doc_ranking):\n docid = docno2docid[docno]\n ranking.append(docid)\n docid2positions[docid].append((i, j, k)) # current document is at position j of rank list i which is of size k\n rankings.append(ranking)\n\n p = len(doc_rankings)\n K = len(docno2docid)\n\n if DEBUG:\n print(\"Number of ranker p = %s\" % p)\n print(\"Size of document pool K = %s\" % K)\n for _, r in enumerate(rankings):\n print(\"Ranking list %s : \\n \\t\\t%s\" % (_, r))\n for j in sorted(docid2positions.keys()):\n print(j, docid2positions[j])\n for docid in docid2docno:\n print(docid, \"=>\", docid2docno[docid])\n\n ## Step 2: Iteratively apply weighted rank aggregation\n alphas = np.ones(p) / p\n prev_aggregated_rank = None\n convergedFlag = False\n for iter in range(maxIters):\n ## weighted Borda Counting\n docid2scores = defaultdict(float)\n for docid in docid2positions:\n score = 0.0\n for pos in docid2positions[docid]:\n score += (alphas[pos[0]] * (pos[2]-pos[1]))\n docid2scores[docid] = score\n\n aggregated_rank = [ele[0] for ele in sorted(docid2scores.items(), key = lambda x:-x[1])]\n docid2rank = {docid:r for r, docid in enumerate(aggregated_rank)}\n if DEBUG:\n print(\"Iteration: %s, aggregated list: %s\" % (iter, aggregated_rank))\n print(\"Iteration: %s, docid2rank: %s\" % (iter, docid2rank))\n if aggregated_rank == prev_aggregated_rank:\n print(\"Converged at iteration %s\" % iter)\n convergedFlag = True\n break\n else:\n if DEBUG and prev_aggregated_rank:\n # print(\"alpha:\", alphas)\n differences = [] # (docno, prev_rank, current_rank)\n for i in range(len(prev_aggregated_rank)):\n if docid2rank[prev_aggregated_rank[i]] != i:\n differences.append((docid2docno[prev_aggregated_rank[i]], i, docid2rank[prev_aggregated_rank[i]]))\n # for ele in differences:\n # print(\"Position changed doc:\", ele)\n prev_aggregated_rank = aggregated_rank\n\n ## confidence score alignment\n positions2discouts = {}\n consider_not_appeared_docs = False\n rank_inversion_cnt = 0\n for r_id, r in enumerate(rankings):\n k = len(r)\n distance = 0.0\n ## Include influence of those not appeared documents\n if consider_not_appeared_docs:\n not_appeared_docs = set(docid2rank.keys()) - set(r) # set of docids that are not appeared in current rank list\n for a in range(k-1):\n for b in range(a+1,k) :\n pi_a = docid2rank[r[a]]\n pi_b = docid2rank[r[b]]\n if pi_a > pi_b: # a position inversion\n if distanceMetric == \"dKT\": # discounted KT distance\n if (pi_a, pi_b) in positions2discouts:\n discount = positions2discouts[(pi_a, pi_b)]\n else:\n # change zero-index to one-index\n discount = (1.0 / math.log(1+pi_b+1, 2)) - (1.0 / math.log(1+pi_a+1, 2))\n positions2discouts[(pi_a, pi_b)] = discount\n distance += (discount * 1.0)\n rank_inversion_cnt += 1\n elif distanceMetric == 'KT': # normal KT distance\n distance += 1.0\n rank_inversion_cnt += 1\n else:\n print(\"[ERROR] Unsupported distanceMetric: %s\" % distanceMetric)\n if consider_not_appeared_docs:\n for not_appeared_doc in not_appeared_docs:\n pi_appear = docid2rank[r[a]]\n pi_not_appeared_doc = docid2rank[not_appeared_doc]\n if pi_not_appeared_doc > pi_appear:\n if distanceMetric == \"dKT\": # discounted KT distance\n if (pi_not_appeared_doc, pi_appear) in positions2discouts:\n discount = positions2discouts[(pi_not_appeared_doc, pi_appear)]\n else:\n # change zero-index to one-index\n discount = (1.0 / math.log(1 + pi_appear + 1, 2)) - (1.0 / math.log(1 + pi_not_appeared_doc + 1, 2))\n positions2discouts[(pi_not_appeared_doc, pi_appear)] = discount\n distance += (discount * 1.0)\n rank_inversion_cnt += 1\n elif distanceMetric == 'KT': # normal KT distance\n distance += 1.0\n rank_inversion_cnt += 1\n else:\n print(\"[ERROR] Unsupported distanceMetric: %s\" % distanceMetric)\n\n alphas[r_id] = math.exp(-1.0 * distance)\n\n Z = sum(alphas)\n alphas = alphas / Z\n uniform_dist = np.ones(p) / p\n kl = stats.entropy(pk=alphas, qk=uniform_dist)\n print(\"Iteration: %s, confidence scores normalizer = %s\" % (iter, Z))\n print(\"Iteration: %s, kl to uniform = %s\" % (iter, kl))\n print(\"Iteration: %s, total rank inversion = %s\" % (iter, rank_inversion_cnt))\n print(\"Iteration: %s, confidence scores: %s\" % (iter, alphas))\n\n if not convergedFlag:\n print(\"Not converged after %s iterations\" % maxIters)\n aggregated_rank_docno = [docid2docno[docid] for docid in aggregated_rank]\n return (alphas, aggregated_rank_docno)\n\ndef rankAggregateCorpus(corpus_doc_rankings, maxIters=10, distanceMetric=\"KT\", checkVonverge=False, DEBUG=False):\n p = len(corpus_doc_rankings[0]) # number of distinct rankers\n if DEBUG:\n print(\"Number of ranker p = %s\" % p)\n\n alphas = np.ones(p) / p\n for iter in range(maxIters):\n if DEBUG:\n print(\"Iteration: %s\" % iter)\n print(\"Alphas: %s\" % alphas)\n\n alpha_distances = np.zeros(p)\n ## go through the query set\n for qid, doc_rankings in enumerate(corpus_doc_rankings):\n ## obtain the docid\n docCounter = sorted(Counter(itertools.chain(*doc_rankings)).items(), key=lambda x: -x[1])\n docno2docid = {}\n docid2docno = {}\n for idx, ele in enumerate(docCounter): # notice: a small docid indicates most frequent documents\n docno2docid[ele[0]] = idx\n docid2docno[idx] = ele[0]\n rankings = []\n docid2positions = defaultdict(list) # docid -> [(position in rank list, len of rank list)]\n for i, doc_ranking in enumerate(doc_rankings):\n ranking = []\n k = len(doc_ranking) # current rank list i is of length k\n for j, docno in enumerate(doc_ranking):\n docid = docno2docid[docno]\n ranking.append(docid)\n docid2positions[docid].append((i, j, k)) # current document is at position j of rank list i which is of size k\n rankings.append(ranking)\n\n ## weighted Borda Counting\n docid2scores = defaultdict(float)\n for docid in docid2positions:\n score = 0.0\n for pos in docid2positions[docid]:\n score += (alphas[pos[0]] * (pos[2] - pos[1]))\n docid2scores[docid] = score\n\n aggregated_rank = [ele[0] for ele in sorted(docid2scores.items(), key=lambda x: -x[1])]\n docid2rank = {docid: r for r, docid in enumerate(aggregated_rank)}\n\n ## accumlate each parameter's dKT\n positions2discouts = {}\n query_distance_sum = 0\n for r_id, r in enumerate(rankings): # r_id is the index of its corresponding parameter\n k = len(r)\n distance = 0.0\n for a in range(k - 1):\n for b in range(a + 1, k):\n pi_a = docid2rank[r[a]]\n pi_b = docid2rank[r[b]]\n if pi_a > pi_b: # a position inversion\n if distanceMetric == \"dKT\": # discounted KT distance\n if (pi_a, pi_b) in positions2discouts:\n discount = positions2discouts[(pi_a, pi_b)]\n else:\n # change zero-index to one-index\n discount = (1.0 / math.log(1 + pi_b + 1, 2)) - (1.0 / math.log(1 + pi_a + 1, 2))\n positions2discouts[(pi_a, pi_b)] = discount\n distance += (discount * 1.0)\n query_distance_sum += (discount * 1.0)\n elif distanceMetric == 'KT': # normal KT distance\n distance += 1.0\n query_distance_sum += 1.0\n else:\n print(\"[ERROR] Unsupported distanceMetric: %s\" % distanceMetric)\n # accumlate the distance\n alpha_distances[r_id] += distance\n if DEBUG:\n print(\"query_distance_sum for query %s = %s\" % (qid,query_distance_sum))\n\n if DEBUG:\n Z_distance = sum(alpha_distances)\n print(\"Sum of distances at iteration %s = %s\" % (iter, Z_distance))\n print(\"Distances at iteration %s = %s\" % (iter, alpha_distances))\n\n ## Adjust confidence score\n # alpha_distances = np.exp(-1.0 * alpha_distances)\n alpha_distances = 1.0 / alpha_distances\n Z = sum(alpha_distances)\n alphas = alpha_distances / Z\n\n return alphas\n\ndef main(args):\n queries = setRank_ESR.load_query(args)\n kb = setRank_ESR.load_kb(args)\n result_all = []\n\n ## Step 1: determine the anchor parameter and the parameters that we want to tune\n anchor_params = {\n 'title': 20.0, 'abstract': 5.0, 'keyphrase': 16.0,\n 'title_ana': 20.0, 'abstract_ana': 5.0, 'keyphrase_ana': 16.0, 'bodytext_ana': 1.0,\n 'title_mu': 1000.0, 'abstract_mu': 1100.0, 'keyphrase_mu': 1000.0,\n 'title_ana_mu': 1000.0, 'abstract_ana_mu': 1000.0, 'keyphrase_ana_mu': 1000.0, 'bodytext_ana_mu': 1000.0,\n \"entity_lambda\": 0.5, \"type_interaction\": 1.0,\n \"consider_entity_set\": 1.0, \"consider_word_set\" : 1.0, \"consider_type\":1.0, \"word_dependency\":1.0\n }\n params_names = [\"title\", \"abstract\", \"title_mu\", \"abstract_mu\", \"entity_lambda\"]\n\n ## Step 2: fix the parameter set that we want to tune, based on the mode\n if args.mode == \"tune\" or args.mode == \"tune-best-rank\":\n params_values = [\n [5.0, 10.0, 15.0, 20.0], # -> title\n [1.0, 3.0, 5.0, 7.0], # -> abstract\n [500, 1000, 1500, 2000], # -> title_mu\n [500, 1000, 1500, 2000], # -> abstract_mu\n [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], # -> entity_lambda\n ]\n all_combinations = list(itertools.product(*params_values))\n params_set = []\n for ele in all_combinations:\n tmp_params = anchor_params.copy()\n for param_index, param_value in enumerate(ele):\n tmp_params[params_names[param_index]] = param_value\n params_set.append(tmp_params)\n elif args.mode == \"rank\":\n params_values = [\n [15.0, 3.0, 15.0, 0.3, 1.0],\n [15.0, 3.0, 15.0, 0.3, 0.5],\n [15.0, 3.0, 15.0, 0.3, 1.5],\n [20.0, 3.0, 15.0, 0.3, 1.5],\n [15.0, 3.0, 15.0, 0.4, 0.5]\n ]\n params_set = []\n for ele in params_values:\n print(\"ele:\", ele)\n tmp_params = anchor_params.copy()\n for param_index, param_value in enumerate(ele):\n print(\"param_index\", param_index, \"param_value\", param_value)\n tmp_params[params_names[param_index]] = param_value\n params_set.append(tmp_params)\n else:\n print(\"Unsupported mode: %s\" % args.mode)\n return\n\n ## Step 3: auto model selection over either query or corpus level\n if args.agglevel == \"query\":\n saved_result = (int(args.load_pre_saved_rankings) == 1) ## load results from query\n if saved_result:\n print(\"=== Loading pre-saved ranking results ===\")\n with open(args.pre_saved_rankings, \"rb\") as fin:\n all_docno_rankings = pickle.load(fin) # a list of docno_rankings\n else:\n print(\"=== Cannot load pre-saved ranking results, generate rankings from scratch ===\")\n all_docno_rankings = {} # query_id -> docno_rankings\n\n confidence_over_all_queries = np.zeros(len(params_set))\n for query in queries:\n query_id = query[0]\n query_string = query[1]\n query_entities_list = []\n for k, v in query[2].items():\n for i in range(v):\n query_entities_list.append(k)\n query_entities_string = \" \".join(query_entities_list)\n\n print(\"=== Running query: %s (id = %s) ===\" % (query_string, query_id))\n if saved_result:\n rankings = all_docno_rankings[query_id]\n else:\n rankings = multiSetRank(query_string, query_entities_string, kb, params_set, DEBUG=False)\n all_docno_rankings[query_id] = rankings\n (confidences, aggregated_rank) = rankAggregate(rankings, DEBUG=True)\n confidence_over_all_queries += confidences\n\n if args.mode == \"tune-best-rank\": # use the best parameter to rank this query again\n best_parameter = params_set[np.argmax(confidences)]\n print(\"Best parameters for query %s: %s\" % (query_id, best_parameter))\n res = setRank_ESR.setRank(query_string, query_entities_string, kb, best_parameter)\n rank = 1\n for hit in res['hits']['hits']:\n result_all.append([query_id, \"Q0\", hit[\"_source\"][\"docno\"], str(rank), str(hit[\"_score\"]), \"autoSetRank\"])\n rank += 1\n else:\n rank = 1\n for docno in aggregated_rank:\n result_all.append([query_id, \"Q0\", docno, str(rank), str(100-rank), \"autoSetRank\"])\n rank += 1\n\n ## save results\n if not saved_result:\n with open(args.pre_saved_rankings, \"wb\") as fout:\n print(\"=== Save rankings for next time's usage ===\")\n pickle.dump(all_docno_rankings, fout, protocol=pickle.HIGHEST_PROTOCOL)\n\n elif args.agglevel == \"corpus\": ## corpus level aggregation\n load_data_from_pickle = False\n if load_data_from_pickle:\n with open(\"all_rankings.pickle\", \"rb\") as fin:\n all_docno_rankings = pickle.load(fin)\n else:\n ## step 1: obtain all query\n all_docno_rankings = []\n for query in queries:\n query_id = query[0]\n query_string = query[1]\n query_entities_list = []\n for k, v in query[2].items():\n for i in range(v):\n query_entities_list.append(k)\n query_entities_string = \" \".join(query_entities_list)\n\n print(\"=== Running query %s (id = %s) ===\" % (query_string, query_id))\n rankings = multiSetRank(query_string, query_entities_string, kb, params_set, DEBUG=False)\n all_docno_rankings.append(rankings)\n\n with open(args.pre_saved_rankings, \"wb\") as fout:\n pickle.dump(all_docno_rankings, fout, protocol=pickle.HIGHEST_PROTOCOL)\n\n ## step 2: rank aggregation\n confidence_over_all_queries = rankAggregateCorpus(all_docno_rankings, DEBUG=True)\n else:\n print(\"[ERROR] Unsupported agglevel configuration: %s\" % args.agglevel)\n return\n\n params2confidence = [(params, confidence_over_all_queries[i]) for i, params in enumerate(params_set)]\n for ele in sorted(params2confidence, key = lambda x:-x[1])[0:10]:\n print(\"Confidence = %s, parameters = %s\" % (ele[1], ele[0]))\n\n if args.mode == \"query\": # save results only for query level aggregation\n setRank_ESR.save_results(args, result_all)\n print(\"Finish saving results to path: %s\" % args.output)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog='autoSetRank_ESR.py',\n description='Use rank aggregation to automatically learn parameters in setRank'\n 'algorithm on S2-CS dataset.')\n parser.add_argument('-query', required=False, default=\"../../data/S2-CS/s2_query.json\",\n help='File name of test queries.')\n parser.add_argument('-output', required=False, default=\"../../results/s2/auto-tune.run\",\n help='File name of output results.')\n parser.add_argument('-kb', required=False, default=\"../../data/S2-CS/s2_entity_type.tsv\")\n parser.add_argument('-mode', required=False, default=\"tune\",\n help=\"mode can be 'tune', 'rank', 'tune-best-rank'.\"\n \"tune: aggregate over all candidate parameters and save the aggregated rank list \"\n \" based on all the candidate rank lists,\"\n \"rank: use topK (selected) parameters to obtain pre-ranked list and aggregate them\"\n \"tune-best-rank: first tune the query level best parameters and return the rank list for that\"\n \" query using the parameter suits it best, only works for aggLevel=query\")\n parser.add_argument('-agglevel', required=False, default=\"query\",\n help=\"agglevel can be 'query' or 'corpus', and it represents the level of rank aggregation\")\n parser.add_argument('-pre_saved_rankings', required=False, default=\"\",\n help=\"name of (previously saved OR about to be saved) ranking results\")\n parser.add_argument('-load_pre_saved_rankings', required=False, default=\"0\",\n help=\"set load_pre_saved_rankings to True if using presaved rankings\")\n args = parser.parse_args()\n sys.exit(main(args))\n\n"
] |
[
[
"scipy.stats.entropy",
"numpy.zeros",
"numpy.argmax",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
rutgerhartog/apocrypha
|
[
"29e475219a5e0d510899a512cfc7be030a32c758"
] |
[
"modules/chisquare.py"
] |
[
"from scipy.stats import chisquare as chi2\n\n\ndef calculate_chisquare(text: bytes) -> float:\n return chi2(text).statistics\n"
] |
[
[
"scipy.stats.chisquare"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
billpsomas/ibot
|
[
"c6fbce7e2a59780f39ad7304ed9a8b1acf038d2d",
"c6fbce7e2a59780f39ad7304ed9a8b1acf038d2d"
] |
[
"analysis/backgrounds_challenge/tools/model_utils.py",
"analysis/combine_ckpt.py"
] |
[
"# Copyright (c) ByteDance, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nCopy-paste from backgrounds_challenge library:\nhttps://github.com/MadryLab/backgrounds_challenge\n\"\"\"\n\nimport os\nimport timm\nimport torch as ch\n\nfrom tqdm import tqdm as tqdm\n\nclass InputNormalize(ch.nn.Module):\n '''\n A module (custom layer) for normalizing the input to have a fixed \n mean and standard deviation (user-specified).\n '''\n def __init__(self, new_mean, new_std):\n super(InputNormalize, self).__init__()\n new_std = new_std[..., None, None]\n new_mean = new_mean[..., None, None]\n\n self.register_buffer(\"new_mean\", new_mean)\n self.register_buffer(\"new_std\", new_std)\n\n def forward(self, x):\n x = ch.clamp(x, 0, 1)\n x_normalized = (x - self.new_mean)/self.new_std\n return x_normalized\n\nclass NormalizedModel(ch.nn.Module):\n \"\"\"\n \"\"\"\n def __init__(self, model, dataset):\n super(NormalizedModel, self).__init__()\n self.normalizer = InputNormalize(dataset.mean, dataset.std)\n self.model = model\n\n def forward(self, inp):\n \"\"\"\n \"\"\"\n normalized_inp = self.normalizer(inp)\n output = self.model(normalized_inp)\n return output\n\ndef make_and_restore_model(*_, arch, dataset, resume_path=None,\n parallel=True, pytorch_pretrained=False, use_normalization=True):\n \"\"\"\n \"\"\"\n if pytorch_pretrained:\n classifier_model = timm.create_model(arch, pretrained=pytorch_pretrained)\n else:\n classifier_model = dataset.get_model(arch, pytorch_pretrained) if \\\n isinstance(arch, str) else arch\n if use_normalization:\n # Normalize by dataset mean and std, as is standard.\n model = NormalizedModel(classifier_model, dataset)\n else:\n model = classifier_model\n\n # optionally resume from a checkpoint\n checkpoint = None\n if resume_path:\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = ch.load(resume_path)\n \n # Makes us able to load models saved with legacy versions\n state_dict_path = 'model'\n if not ('model' in checkpoint):\n state_dict_path = 'state_dict'\n\n sd = checkpoint[state_dict_path]\n sd = {k[len('module.'):] if ('module.' in k) else k: v for k, v in sd.items()}\n # sd = {k.replace('backbone.', ''):v for k, v in sd.items()}\n # To deal with some compatability issues\n # model_dict = model.state_dict()\n # sd = {k: v for k, v in sd.items() if k in model_dict}\n # model_dict.update(sd)\n if use_normalization:\n msg = model.model.load_state_dict(sd, strict=False)\n print(msg)\n else:\n msg = model.load_state_dict(sd, strict=False)\n print(msg)\n \n if parallel:\n model = ch.nn.DataParallel(model)\n model = model.cuda()\n \n print(\"=> loaded checkpoint '{}' (epoch {})\".format(resume_path, checkpoint['epoch']))\n else:\n error_msg = \"=> no checkpoint found at '{}'\".format(resume_path)\n raise ValueError(error_msg)\n\n return model, checkpoint\n\ndef eval_model(loader, model, map_to_in9, map_in_to_in9=True):\n \"\"\"\n *Internal function*\n Args:\n loader (iterable) : an iterable loader of the form \n `(image_batch, label_batch)`\n model: model to evaluate\n map_in_to_in9: whether or not to map model outputs from\n ImageNet class labels to ImageNet9 class labels\n Returns:\n The average top1 accuracy across the epoch.\n \"\"\"\n\n model = model.eval()\n\n iterator = tqdm(enumerate(loader), total=len(loader))\n correct = 0\n for i, (inp, target) in iterator:\n output = model(inp)\n \n _, pred = output.topk(1, 1, True, True)\n pred = pred.cpu().detach()[:, 0]\n if map_in_to_in9:\n if map_to_in9 is None:\n raise ValueError('Need to pass in mapping from IN to IN9')\n pred_list = list(pred.numpy())\n pred = ch.LongTensor([map_to_in9[str(x)] for x in pred_list])\n correct += (pred==target).sum().item()\n \n return correct/len(loader.dataset)\n\ndef adv_bgs_eval_model(bg_loader, model, im, mask, fg_class, batch_size, map_to_in9, map_in_to_in9=True):\n \"\"\"\n *Internal function*\n Args:\n loader (iterable) : an iterable loader of the form \n `(image_batch, label_batch)`\n model: model to evaluate\n use_mapping: whether or not to map model outputs from\n ImageNet class labels to ImageNet9 class labels\n Returns:\n The average top1 accuracy across the epoch.\n \"\"\"\n\n model = model.eval()\n\n big_im = im.repeat(batch_size, 1, 1, 1)\n big_mask = mask.repeat(batch_size, 1, 1, 1)\n \n # iterator = tqdm(enumerate(bg_loader), total=len(bg_loader))\n for i, (inp, target) in enumerate(bg_loader):\n # for i, (inp, target) in iterator:\n if inp.shape[0] != batch_size: # For handling the last batch\n big_im = im.repeat(inp.shape[0], 1, 1, 1)\n big_mask = mask.repeat(inp.shape[0], 1, 1, 1)\n combined = inp * (1 - big_mask) + big_mask * big_im\n # import matplotlib\n # matplotlib.use('Agg')\n # import matplotlib.pyplot as plt\n # from torchvision import transforms\n # for_viz = transforms.ToPILImage()(combined[0])\n # plt.imshow(for_viz)\n # plt.savefig('bg_challenge.png')\n \n output = model(combined)\n\n _, pred = output.topk(1, 1, True, True)\n pred = pred.cpu().detach()[:, 0]\n if map_in_to_in9:\n pred_list = list(pred.numpy())\n pred = ch.LongTensor([map_to_in9[str(x)] for x in pred_list])\n \n has_adversarial = (pred != fg_class).any().item()\n if has_adversarial:\n return True\n return False\n\n",
"import torch\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument('--checkpoint_backbone', required=True, type=str)\nparser.add_argument('--checkpoint_linear', required=True, type=str)\nparser.add_argument('--output_file', required=True, type=str)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n backbone = torch.load(args.checkpoint_backbone)['teacher']\n model = torch.load(args.checkpoint_linear)\n linear = model['state_dict']\n head_index = model['epoch']\n\n new_linear = {}\n for key, val in linear.items():\n splits = key.split('.')\n new_linear['.'.join(splits[1:])] = val\n backbone.update(new_linear)\n backbone = {k.replace('backbone.', ''):v for k, v in backbone.items()}\n model['state_dict'] = backbone\n \n print(f\"save {head_index}th head with acc {model['best_acc']}\")\n torch.save(model, args.output_file)"
] |
[
[
"torch.clamp",
"torch.nn.DataParallel",
"torch.load"
],
[
"torch.save",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jjepsuomi/Bayesian-maximum-variance-inclusion
|
[
"d1e8bf1464a26ff4d313bb5c4ba2800e75609c22"
] |
[
"bmvi_toolbox.py"
] |
[
"#**************************************************************\n#**************************************************************\n#**************************************************************\n# \n# - This Python file contains the functions required for \n# demonstrating the functionality of Bayesian maximum variance\n# inclusion (BMVI) sampling method. Implementations for BMVI, LPM and\n# SRS are given. The prediction method used with all samplers\n# is the standard regularized least squares (RLS) method with \n# a linear kernel. Prediction performance and hyperparameter \n# selection is implemented via 10-fold cross-validation. \n# \n# If one wishes to implement BMVI via other prediction methods\n# then the respective Hessian and gradient (see the corresponding\n# article) needs to be edited. \n#\n#**************************************************************\n#**************************************************************\n#**************************************************************\n\nimport numpy as np\n\n###############################################################\n#\n# - DESCRIPTION: This function implements the Bayesian maximum\n# variance inclusion (BMVI) sampling for a given data set. \n# \n# - INPUTS: \n # 'X' X contains the input data with first column assumed to \n# be all ones (constant term). Each row corresponds to one\n# observation.\n# 'y' corresponds to the vector of output values.\n# 'n_samples' integer, the number of data points to be sampled. \n#\n# - OUTPUTS: \n# 'inclusion_index_set' a list of data indexes sampled.\n# 'generalization_error_list' a list of estimated predicton \n# errors as a function of number of sampled data points. \n#\n###############################################################\ndef BMVI(X, y, n_samples):\n unsampled_data_set_indexes = np.array(range(0, len(y)))\n inclusion_index_set = []\n generalization_error_list = []\n posterior_variance_list = None\n if n_samples >= len(y):\n n_samples = len(y)-1\n for sample_ind in range(0, n_samples):\n if np.mod(sample_ind, 100) == 0:\n print('BMVI sampling ' + str(sample_ind+1) + 'th data point (' + str(n_samples+1) + ' in total)')\n if sample_ind == 0: # First sample, random start\n start_ind = np.random.randint(0, len(y), 2) # We need to have two initial samples because of cross-validation\n inclusion_index_set.append(unsampled_data_set_indexes[start_ind][:])\n inclusion_index_set = inclusion_index_set[0]\n inclusion_index_set = inclusion_index_set.tolist()\n unsampled_data_set_indexes = np.delete(unsampled_data_set_indexes, start_ind)\n else: # \n # Do BMVI selection\n if len(posterior_variance_list) > 0:\n max_var_ind = np.where(posterior_variance_list == np.max(posterior_variance_list))[0][0]\n inclusion_index_set.append(unsampled_data_set_indexes[max_var_ind])\n unsampled_data_set_indexes = np.delete(unsampled_data_set_indexes, max_var_ind) \n X_sampled = X[inclusion_index_set, :]\n y_sampled = y[inclusion_index_set]\n X_unsampled = X[unsampled_data_set_indexes, :]\n y_unsampled = y[unsampled_data_set_indexes]\n # Train a RLS prediction model on the currently sampled data \n w_mp, Hessian = solveRLS(X_sampled, y_sampled)\n y_predict = X_unsampled@w_mp\n posterior_variance_list = []\n for i in range(0, X_unsampled.shape[0]):\n posterior_variance_list.append(X_unsampled[i,:]@np.linalg.pinv(Hessian)@np.transpose(X_unsampled[i,:]))\n generalization_error_list.append(np.mean(np.abs(y_predict-y_unsampled)))\n print(\"\\n\")\n return inclusion_index_set, generalization_error_list\n\n###############################################################\n#\n# - DESCRIPTION: This function implements the local pivotal\n# method (LPM) for a given data set. \n# \n# - INPUTS: \n # 'X' X contains the input data with first column assumed to \n# be all ones (constant term). Each row corresponds to one\n# observation.\n# 'y' corresponds to the vector of output values.\n# 'n_samples' integer, the number of data points to be sampled. \n# 'p' initial inclusion probability. This determines how many data \n# points LPM chooses to incluse. \n#\n# - OUTPUTS: \n# 'inclusion_index_set' a list of data indexes sampled.\n# 'generalization_error_list' a list of estimated predicton \n# errors as a function of number of sampled data points. \n#\n###############################################################\ndef LPM(X, y, n_samples, p):\n unsampled_data_set_indexes = np.array(range(0, len(y)))\n inclusion_index_set = []\n generalization_error_list = []\n # index_probs contains information about selection order and inclusion probabilities. \n index_probs = np.array([range(0, len(y)), np.ones((len(y)))*p, np.zeros((len(y)))])\n # Temp is used for updating the above variable. \n temp = index_probs[:, (np.where(index_probs[1,:] > 0) and np.where(index_probs[1,:] < 1))[0]]\n cnt = 0\n # With LPM we first determine the sampling design and the do prediction estimation. \n while temp.shape[1] > 1 and np.sum(temp[1,:]) >= 1: # Latter condition is required to ensure the loop does not continue indefinitely. \n rand_ind = np.random.randint(0, temp.shape[1])\n sample_ind = int(temp[0, rand_ind])\n temp = np.delete(temp, rand_ind, axis=1)\n p1 = index_probs[1, sample_ind]\n sample_X = X[sample_ind, :]\n neighbors_X = X[temp[0,:].astype(int), :]\n dist_mat = np.sqrt(np.sum(np.power(neighbors_X-sample_X, 2), axis=1))\n min_dist_index = np.where(dist_mat == np.min(dist_mat))[0][0]\n neighbor_ind = int(temp[0, min_dist_index])\n p2 = temp[1, min_dist_index]\n randNum = np.random.uniform()\n if p1+p2 < 1:\n if randNum < p1/float((p1+p2)):\n p1 = p1+p2\n p2 = 0\n else:\n p1 = 0\n p2 = p1+p2\n elif p1+p2 >= 1:\n if randNum < (1-p1)/float((2-p1-p2)):\n p1 = p1+p2-1\n p2 = 1\n else:\n p1 = 1\n p2 = p1+p2-1\n index_probs[1, sample_ind] = p1\n index_probs[1, neighbor_ind] = p2\n if p1 == 1:\n index_probs[2, sample_ind] = cnt\n cnt += 1\n elif p2 == 1:\n index_probs[2, neighbor_ind] = cnt\n cnt += 1\n temp = index_probs[:, (np.where(index_probs[1,:] > 0) and (np.where(index_probs[1,:] < 1)))[0]]\n # Sample the last indexes last\n unsampled_inds = np.where(index_probs[1,:] < 1)[0]\n cnt = np.max(index_probs[2,:]) + 1\n if unsampled_inds.size > 0:\n cnt = np.max(index_probs[2,:]) + 1\n for ind in unsampled_inds:\n index_probs[1,ind] = 1\n index_probs[2,ind] = cnt\n cnt += 1\n # Sort the data indexes based on selection order. \n index_probs = index_probs[:, np.argsort(index_probs[2,:])]\n inclusion_index_set = index_probs[0,:]\n # Implement cross-validation and prediction. \n for i in range(2, len(inclusion_index_set)-1): # We can't sample all data and do prediction estimation. Minumum 2 data points required. \n if np.mod(i, 100) == 0:\n print('LPM sampling ' + str(i+1) + 'th data point (' + str(len(inclusion_index_set)) + ' in total)')\n X_sampled = X[inclusion_index_set[0:i].astype(int), :]\n y_sampled = y[inclusion_index_set[0:i].astype(int)]\n X_unsampled = X[inclusion_index_set[i:len(inclusion_index_set)].astype(int), :]\n y_unsampled = y[inclusion_index_set[i:len(inclusion_index_set)].astype(int)]\n # Train a RLS prediction model on the currently sampled data \n w_mp, Hessian = solveRLS(X_sampled, y_sampled)\n y_predict = X_unsampled@w_mp\n generalization_error_list.append(np.mean(np.abs(y_predict-y_unsampled)))\n print(\"\\n\")\n return inclusion_index_set, generalization_error_list\n\n###############################################################\n#\n# - DESCRIPTION: This function implements the simple random \n# sampling (SRS) method for a given data set. \n# \n# - INPUTS: \n # 'X' X contains the input data with first column assumed to \n# be all ones (constant term). Each row corresponds to one\n# observation.\n# 'y' corresponds to the vector of output values.\n# 'n_samples' integer, the number of data points to be sampled. \n#\n# - OUTPUTS: \n# 'inclusion_index_set' a list of data indexes sampled.\n# 'generalization_error_list' a list of estimated predicton \n# errors as a function of number of sampled data points. \n#\n###############################################################\ndef SRS(X, y, n_samples):\n unsampled_data_set_indexes = np.array(range(0, len(y)))\n inclusion_index_set = []\n generalization_error_list = []\n if n_samples >= len(y):\n n_samples = len(y)-1\n for sample_ind in range(0, n_samples):\n if np.mod(sample_ind, 100) == 0:\n print('SRS sampling ' + str(sample_ind+1) + 'th data point (' + str(n_samples+1) + ' in total)')\n if sample_ind == 0: # First sample, random start\n start_ind = np.random.randint(0, len(y), 2) # We need to have two initial samples because of cross-validation\n inclusion_index_set.append(unsampled_data_set_indexes[start_ind][:])\n inclusion_index_set = inclusion_index_set[0]\n inclusion_index_set = inclusion_index_set.tolist()\n unsampled_data_set_indexes = np.delete(unsampled_data_set_indexes, start_ind)\n else:\n rand_ind = np.random.randint(0, len(unsampled_data_set_indexes))\n inclusion_index_set.append(unsampled_data_set_indexes[rand_ind])\n unsampled_data_set_indexes = np.delete(unsampled_data_set_indexes, rand_ind)\n X_sampled = X[inclusion_index_set, :]\n y_sampled = y[inclusion_index_set]\n X_unsampled = X[unsampled_data_set_indexes, :]\n y_unsampled = y[unsampled_data_set_indexes]\n # Train a RLS prediction model on the currently sampled data \n w_mp, Hessian = solveRLS(X_sampled, y_sampled)\n y_predict = X_unsampled@w_mp\n generalization_error_list.append(np.mean(np.abs(y_predict-y_unsampled)))\n print(\"\\n\")\n return inclusion_index_set, generalization_error_list\n\n###############################################################\n#\n# - DESCRIPTION: This function produces a cross-validation \n# fold partitioning. \n# \n# - INPUTS: \n# 'n_samples' integer, the number of data points. \n# 'n_folds' integer, the number of folds.\n#\n# - OUTPUTS: \n# 'folds' a list of integer lists containing fold indices. \n#\n###############################################################\ndef makeFolds(n_samples, n_folds):\n folds = []\n index_list = np.random.permutation(n_samples)\n # Check that the number of data points is larger than required number of folds\n if n_samples > n_folds:\n fold_size = np.floor(n_samples/float(n_folds))\n for fold in range(0, n_folds):\n start_ind = int(fold_size*fold)\n end_ind = int(fold_size*(fold+1))\n if fold < n_folds-1:\n folds.append(index_list[start_ind:end_ind].tolist())\n else:\n folds.append(index_list[start_ind:].tolist())\n return folds\n # Otherwise, we create a leave-one-out fold partitioning. \n else:\n for i in range(0, len(index_list)):\n folds.append([index_list[i]])\n return folds\n \n###############################################################\n#\n# - DESCRIPTION: This function solves the maximum likelihood (ML)\n# regularized least squares model. Hyperparameter selection \n# is conducted using 10-fold cross-validation. \n# \n# - INPUTS: \n # 'X' X contains the input data with first column assumed to \n# be all ones (constant term). Each row corresponds to one\n# observation.\n# 'y' corresponds to the vector of output values.\n#\n# - OUTPUTS: \n# 'optimal_w_mp' the hyperparameter tuned ML weight vector for \n# RLS model.\n# 'Hessian' the optimal Hessian matrix corresponding to \n# matrix A of function S(w) in equation (8) in the article. \n#\n###############################################################\ndef solveRLS(X, y):\n # Choose alpha/beta hyperparameters from an exponential\n # grid.\n alpha_grid = float(2)**np.arange(-7,7)\n beta_grid = float(2)**np.arange(-7,7)\n # Optimal hyperparameters and auxiliary variables\n optimal_alpha = None\n optimal_beta = None\n optimal_error = np.inf\n alphabeta_matrix = np.zeros((X.shape[1], X.shape[1]))\n alphabeta_matrix[1:,1:] = np.eye(X.shape[1]-1)\n # Loop through the hyperparameter grid \n folds = makeFolds(X.shape[0], 10)\n for alpha in alpha_grid:\n for beta in beta_grid: \n prediction_error_list = []\n # Choose best hyperparameters via cross-validation\n for fold_ind in range(0, len(folds)):\n ind = folds[fold_ind]\n X_train = X\n y_train = y\n X_train = np.delete(X_train, ind, 0)\n y_train = np.delete(y_train, ind, 0)\n X_test = X[ind,:]\n y_test = y[ind]\n X_train_T = np.transpose(X_train)\n # Solve the maximum likelihood model\n w_mp = np.linalg.inv(X_train_T@X_train + alpha/float(beta)*alphabeta_matrix)@X_train_T@y_train\n # Make prediction to validation data\n y_pred = X_test@w_mp\n prediction_error_list.append(np.sum(np.abs(y_pred-y_test)))\n # Evaluate the prediction error on evaluation data and save the best found parameters\n if np.sum(prediction_error_list) < optimal_error:\n optimal_error = np.sum(prediction_error_list)\n optimal_alpha = alpha\n optimal_beta = beta\n # Get the optimal parameters and return to caller\n X_T = np.transpose(X)\n optimal_w_mp = np.linalg.inv(X_T@X + optimal_alpha/float(optimal_beta)*alphabeta_matrix)@X_T@y\n Hessian = optimal_beta*X_T@X + optimal_alpha*alphabeta_matrix\n return optimal_w_mp, Hessian\n\n"
] |
[
[
"numpy.argsort",
"numpy.sum",
"numpy.abs",
"numpy.power",
"numpy.min",
"numpy.arange",
"numpy.eye",
"numpy.max",
"numpy.delete",
"numpy.random.permutation",
"numpy.linalg.pinv",
"numpy.transpose",
"numpy.mod",
"numpy.random.uniform",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Corey-Zumar/clipper-db-queries
|
[
"e60f8d8b11c0ccc5f0287b63fe5cb86d128b72f0"
] |
[
"bench/bench_init.py"
] |
[
"import sys\nimport os\nimport errno\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.abspath(\"%s/../management\" % cur_dir))\nsys.path.append(os.path.abspath(\"%s/../examples\" % cur_dir))\n# sys.path.insert(0, os.path.abspath('%s/../containers/python/' % cur_dir))\n\nimport clipper_manager\nfrom tutorial import cifar_utils\nfrom sklearn import linear_model as lm\nfrom sklearn.externals import joblib\nfrom fabric.api import *\n\nAPP_NAME = \"bench\"\nBASE_MODEL_PATH = \"model/\"\nSKLEARN_MODEL_FILE = \"bench_sk_model.pkl\"\nSKLEARN_MODEL_NAME = \"bench_sklearn_cifar\"\n\nCOLOR_WHITE = '\\033[0m'\nCOLOR_GREEN = '\\033[32m'\n\n\nclass BenchSetup():\n def __init__(self, host, cifar_dir_path):\n self.host = host\n self.cifar_dir_path = cifar_dir_path\n\n def print_green(self, text):\n print(COLOR_GREEN)\n print(text)\n print(COLOR_WHITE)\n\n def run(self):\n self.print_green(\"Loading Sklearn Model...\")\n self.train_sklearn_model()\n\n def get_cifar_data(self):\n train_x, train_y = cifar_utils.filter_data(*cifar_utils.load_cifar(\n self.cifar_dir_path, cifar_filename=\"cifar_train.data\",\n norm=False))\n test_x, test_y = cifar_utils.filter_data(*cifar_utils.load_cifar(\n self.cifar_dir_path, cifar_filename=\"cifar_test.data\", norm=False))\n\n return test_x, test_y, train_x, train_y\n\n def create_model_directory_if_necessary(self):\n try:\n os.makedirs(BASE_MODEL_PATH)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n def train_sklearn_model(self):\n self.create_model_directory_if_necessary()\n model_location = os.path.join(BASE_MODEL_PATH, SKLEARN_MODEL_FILE)\n if os.path.isfile(model_location):\n model = joblib.load(model_location)\n print(\"Found and loaded model!\")\n else:\n print(\"Loading CIFAR data...\")\n test_x, test_y, train_x, train_y = self.get_cifar_data()\n print(\"Training model...\")\n model = lm.LogisticRegression()\n model.fit(train_x, train_y)\n joblib.dump(model, model_location)\n print(\"Model trained!\")\n print(\"Logistic Regression test score: %f\" % model.score(\n test_x, test_y))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\n \"Correct usage is 'python bench_init.py <path_to_CIFAR_data_set>\")\n raise\n cifar_dir_path = sys.argv[1]\n setup = BenchSetup(\"localhost\", cifar_dir_path)\n setup.run()\n"
] |
[
[
"sklearn.externals.joblib.dump",
"sklearn.externals.joblib.load",
"sklearn.linear_model.LogisticRegression"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaoyuchai/scikit-learn
|
[
"246795f214ec31874aa1d1e89c90c7007ab60642"
] |
[
"sklearn/compose/tests/test_column_transformer.py"
] |
[
"\"\"\"\nTest the ColumnTransformer.\n\"\"\"\nimport re\nimport pickle\n\nimport warnings\nimport numpy as np\nfrom scipy import sparse\nimport pytest\n\nfrom numpy.testing import assert_allclose\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_allclose_dense_sparse\nfrom sklearn.utils._testing import assert_almost_equal\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.compose import (\n ColumnTransformer, make_column_transformer, make_column_selector\n)\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.preprocessing import StandardScaler, Normalizer, OneHotEncoder\nfrom sklearn.feature_extraction import DictVectorizer\n\n\nclass Trans(BaseEstimator):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n # 1D Series -> 2D DataFrame\n if hasattr(X, 'to_frame'):\n return X.to_frame()\n # 1D array -> 2D array\n if X.ndim == 1:\n return np.atleast_2d(X).T\n return X\n\n\nclass DoubleTrans(BaseEstimator):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return 2*X\n\n\nclass SparseMatrixTrans(BaseEstimator):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n n_samples = len(X)\n return sparse.eye(n_samples, n_samples).tocsr()\n\n\nclass TransNo2D(BaseEstimator):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return X\n\n\nclass TransRaise(BaseEstimator):\n\n def fit(self, X, y=None):\n raise ValueError(\"specific message\")\n\n def transform(self, X, y=None):\n raise ValueError(\"specific message\")\n\n\ndef test_column_transformer():\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n\n X_res_first1D = np.array([0, 1, 2])\n X_res_second1D = np.array([2, 4, 6])\n X_res_first = X_res_first1D.reshape(-1, 1)\n X_res_both = X_array\n\n cases = [\n # single column 1D / 2D\n (0, X_res_first),\n ([0], X_res_first),\n # list-like\n ([0, 1], X_res_both),\n (np.array([0, 1]), X_res_both),\n # slice\n (slice(0, 1), X_res_first),\n (slice(0, 2), X_res_both),\n # boolean mask\n (np.array([True, False]), X_res_first),\n ([True, False], X_res_first),\n (np.array([True, True]), X_res_both),\n ([True, True], X_res_both),\n ]\n\n for selection, res in cases:\n ct = ColumnTransformer([('trans', Trans(), selection)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X_array), res)\n assert_array_equal(ct.fit(X_array).transform(X_array), res)\n\n # callable that returns any of the allowed specifiers\n ct = ColumnTransformer([('trans', Trans(), lambda x: selection)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X_array), res)\n assert_array_equal(ct.fit(X_array).transform(X_array), res)\n\n ct = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', Trans(), [1])])\n assert_array_equal(ct.fit_transform(X_array), X_res_both)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)\n assert len(ct.transformers_) == 2\n\n # test with transformer_weights\n transformer_weights = {'trans1': .1, 'trans2': 10}\n both = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', Trans(), [1])],\n transformer_weights=transformer_weights)\n res = np.vstack([transformer_weights['trans1'] * X_res_first1D,\n transformer_weights['trans2'] * X_res_second1D]).T\n assert_array_equal(both.fit_transform(X_array), res)\n assert_array_equal(both.fit(X_array).transform(X_array), res)\n assert len(both.transformers_) == 2\n\n both = ColumnTransformer([('trans', Trans(), [0, 1])],\n transformer_weights={'trans': .1})\n assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)\n assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)\n assert len(both.transformers_) == 1\n\n\ndef test_column_transformer_dataframe():\n pd = pytest.importorskip('pandas')\n\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_df = pd.DataFrame(X_array, columns=['first', 'second'])\n\n X_res_first = np.array([0, 1, 2]).reshape(-1, 1)\n X_res_both = X_array\n\n cases = [\n # String keys: label based\n\n # scalar\n ('first', X_res_first),\n # list\n (['first'], X_res_first),\n (['first', 'second'], X_res_both),\n # slice\n (slice('first', 'second'), X_res_both),\n\n # int keys: positional\n\n # scalar\n (0, X_res_first),\n # list\n ([0], X_res_first),\n ([0, 1], X_res_both),\n (np.array([0, 1]), X_res_both),\n # slice\n (slice(0, 1), X_res_first),\n (slice(0, 2), X_res_both),\n\n # boolean mask\n (np.array([True, False]), X_res_first),\n (pd.Series([True, False], index=['first', 'second']), X_res_first),\n ([True, False], X_res_first),\n ]\n\n for selection, res in cases:\n ct = ColumnTransformer([('trans', Trans(), selection)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X_df), res)\n assert_array_equal(ct.fit(X_df).transform(X_df), res)\n\n # callable that returns any of the allowed specifiers\n ct = ColumnTransformer([('trans', Trans(), lambda X: selection)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X_df), res)\n assert_array_equal(ct.fit(X_df).transform(X_df), res)\n\n ct = ColumnTransformer([('trans1', Trans(), ['first']),\n ('trans2', Trans(), ['second'])])\n assert_array_equal(ct.fit_transform(X_df), X_res_both)\n assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] != 'remainder'\n\n ct = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', Trans(), [1])])\n assert_array_equal(ct.fit_transform(X_df), X_res_both)\n assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] != 'remainder'\n\n # test with transformer_weights\n transformer_weights = {'trans1': .1, 'trans2': 10}\n both = ColumnTransformer([('trans1', Trans(), ['first']),\n ('trans2', Trans(), ['second'])],\n transformer_weights=transformer_weights)\n res = np.vstack([transformer_weights['trans1'] * X_df['first'],\n transformer_weights['trans2'] * X_df['second']]).T\n assert_array_equal(both.fit_transform(X_df), res)\n assert_array_equal(both.fit(X_df).transform(X_df), res)\n assert len(both.transformers_) == 2\n assert both.transformers_[-1][0] != 'remainder'\n\n # test multiple columns\n both = ColumnTransformer([('trans', Trans(), ['first', 'second'])],\n transformer_weights={'trans': .1})\n assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)\n assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)\n assert len(both.transformers_) == 1\n assert both.transformers_[-1][0] != 'remainder'\n\n both = ColumnTransformer([('trans', Trans(), [0, 1])],\n transformer_weights={'trans': .1})\n assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)\n assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)\n assert len(both.transformers_) == 1\n assert both.transformers_[-1][0] != 'remainder'\n\n # ensure pandas object is passed through\n\n class TransAssert(BaseEstimator):\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n assert isinstance(X, (pd.DataFrame, pd.Series))\n if isinstance(X, pd.Series):\n X = X.to_frame()\n return X\n\n ct = ColumnTransformer([('trans', TransAssert(), 'first')],\n remainder='drop')\n ct.fit_transform(X_df)\n ct = ColumnTransformer([('trans', TransAssert(), ['first', 'second'])])\n ct.fit_transform(X_df)\n\n # integer column spec + integer column names -> still use positional\n X_df2 = X_df.copy()\n X_df2.columns = [1, 0]\n ct = ColumnTransformer([('trans', Trans(), 0)], remainder='drop')\n assert_array_equal(ct.fit_transform(X_df2), X_res_first)\n assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)\n\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'drop'\n assert_array_equal(ct.transformers_[-1][2], [1])\n\n\[email protected](\"pandas\", [True, False], ids=['pandas', 'numpy'])\[email protected](\"column_selection\", [[], np.array([False, False]),\n [False, False]],\n ids=['list', 'bool', 'bool_int'])\[email protected](\"callable_column\", [False, True])\ndef test_column_transformer_empty_columns(pandas, column_selection,\n callable_column):\n # test case that ensures that the column transformer does also work when\n # a given transformer doesn't have any columns to work on\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_res_both = X_array\n\n if pandas:\n pd = pytest.importorskip('pandas')\n X = pd.DataFrame(X_array, columns=['first', 'second'])\n else:\n X = X_array\n\n if callable_column:\n column = lambda X: column_selection # noqa\n else:\n column = column_selection\n\n ct = ColumnTransformer([('trans1', Trans(), [0, 1]),\n ('trans2', TransRaise(), column)])\n assert_array_equal(ct.fit_transform(X), X_res_both)\n assert_array_equal(ct.fit(X).transform(X), X_res_both)\n assert len(ct.transformers_) == 2\n assert isinstance(ct.transformers_[1][1], TransRaise)\n\n ct = ColumnTransformer([('trans1', TransRaise(), column),\n ('trans2', Trans(), [0, 1])])\n assert_array_equal(ct.fit_transform(X), X_res_both)\n assert_array_equal(ct.fit(X).transform(X), X_res_both)\n assert len(ct.transformers_) == 2\n assert isinstance(ct.transformers_[0][1], TransRaise)\n\n ct = ColumnTransformer([('trans', TransRaise(), column)],\n remainder='passthrough')\n assert_array_equal(ct.fit_transform(X), X_res_both)\n assert_array_equal(ct.fit(X).transform(X), X_res_both)\n assert len(ct.transformers_) == 2 # including remainder\n assert isinstance(ct.transformers_[0][1], TransRaise)\n\n fixture = np.array([[], [], []])\n ct = ColumnTransformer([('trans', TransRaise(), column)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X), fixture)\n assert_array_equal(ct.fit(X).transform(X), fixture)\n assert len(ct.transformers_) == 2 # including remainder\n assert isinstance(ct.transformers_[0][1], TransRaise)\n\n\ndef test_column_transformer_output_indices():\n # Checks for the output_indices_ attribute\n X_array = np.arange(6).reshape(3, 2)\n\n ct = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', Trans(), [1])])\n X_trans = ct.fit_transform(X_array)\n assert ct.output_indices_ == {'trans1': slice(0, 1),\n 'trans2': slice(1, 2),\n 'remainder': slice(0, 0)}\n assert_array_equal(X_trans[:, [0]],\n X_trans[:, ct.output_indices_['trans1']])\n assert_array_equal(X_trans[:, [1]],\n X_trans[:, ct.output_indices_['trans2']])\n\n # test with transformer_weights and multiple columns\n ct = ColumnTransformer([('trans', Trans(), [0, 1])],\n transformer_weights={'trans': .1})\n X_trans = ct.fit_transform(X_array)\n assert ct.output_indices_ == {'trans': slice(0, 2),\n 'remainder': slice(0, 0)}\n assert_array_equal(X_trans[:, [0, 1]],\n X_trans[:, ct.output_indices_['trans']])\n assert_array_equal(X_trans[:, []],\n X_trans[:, ct.output_indices_['remainder']])\n\n # test case that ensures that the attribute does also work when\n # a given transformer doesn't have any columns to work on\n ct = ColumnTransformer([('trans1', Trans(), [0, 1]),\n ('trans2', TransRaise(), [])])\n X_trans = ct.fit_transform(X_array)\n assert ct.output_indices_ == {'trans1': slice(0, 2),\n 'trans2': slice(0, 0),\n 'remainder': slice(0, 0)}\n assert_array_equal(X_trans[:, [0, 1]],\n X_trans[:, ct.output_indices_['trans1']])\n assert_array_equal(X_trans[:, []],\n X_trans[:, ct.output_indices_['trans2']])\n assert_array_equal(X_trans[:, []],\n X_trans[:, ct.output_indices_['remainder']])\n\n ct = ColumnTransformer([('trans', TransRaise(), [])],\n remainder='passthrough')\n X_trans = ct.fit_transform(X_array)\n assert ct.output_indices_ == {'trans': slice(0, 0),\n 'remainder': slice(0, 2)}\n assert_array_equal(X_trans[:, []],\n X_trans[:, ct.output_indices_['trans']])\n assert_array_equal(X_trans[:, [0, 1]],\n X_trans[:, ct.output_indices_['remainder']])\n\n\ndef test_column_transformer_output_indices_df():\n # Checks for the output_indices_ attribute with data frames\n pd = pytest.importorskip('pandas')\n\n X_df = pd.DataFrame(np.arange(6).reshape(3, 2),\n columns=['first', 'second'])\n\n ct = ColumnTransformer([('trans1', Trans(), ['first']),\n ('trans2', Trans(), ['second'])])\n X_trans = ct.fit_transform(X_df)\n assert ct.output_indices_ == {'trans1': slice(0, 1),\n 'trans2': slice(1, 2),\n 'remainder': slice(0, 0)}\n assert_array_equal(X_trans[:, [0]],\n X_trans[:, ct.output_indices_['trans1']])\n assert_array_equal(X_trans[:, [1]],\n X_trans[:, ct.output_indices_['trans2']])\n assert_array_equal(X_trans[:, []],\n X_trans[:, ct.output_indices_['remainder']])\n\n ct = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', Trans(), [1])])\n X_trans = ct.fit_transform(X_df)\n assert ct.output_indices_ == {'trans1': slice(0, 1),\n 'trans2': slice(1, 2),\n 'remainder': slice(0, 0)}\n assert_array_equal(X_trans[:, [0]],\n X_trans[:, ct.output_indices_['trans1']])\n assert_array_equal(X_trans[:, [1]],\n X_trans[:, ct.output_indices_['trans2']])\n assert_array_equal(X_trans[:, []],\n X_trans[:, ct.output_indices_['remainder']])\n\n\ndef test_column_transformer_sparse_array():\n X_sparse = sparse.eye(3, 2).tocsr()\n\n # no distinction between 1D and 2D\n X_res_first = X_sparse[:, 0]\n X_res_both = X_sparse\n\n for col in [0, [0], slice(0, 1)]:\n for remainder, res in [('drop', X_res_first),\n ('passthrough', X_res_both)]:\n ct = ColumnTransformer([('trans', Trans(), col)],\n remainder=remainder,\n sparse_threshold=0.8)\n assert sparse.issparse(ct.fit_transform(X_sparse))\n assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)\n assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),\n res)\n\n for col in [[0, 1], slice(0, 2)]:\n ct = ColumnTransformer([('trans', Trans(), col)],\n sparse_threshold=0.8)\n assert sparse.issparse(ct.fit_transform(X_sparse))\n assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)\n assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),\n X_res_both)\n\n\ndef test_column_transformer_list():\n X_list = [\n [1, float('nan'), 'a'],\n [0, 0, 'b']\n ]\n expected_result = np.array([\n [1, float('nan'), 1, 0],\n [-1, 0, 0, 1],\n ])\n\n ct = ColumnTransformer([\n ('numerical', StandardScaler(), [0, 1]),\n ('categorical', OneHotEncoder(), [2]),\n ])\n\n assert_array_equal(ct.fit_transform(X_list), expected_result)\n assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)\n\n\ndef test_column_transformer_sparse_stacking():\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n col_trans = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', SparseMatrixTrans(), 1)],\n sparse_threshold=0.8)\n col_trans.fit(X_array)\n X_trans = col_trans.transform(X_array)\n assert sparse.issparse(X_trans)\n assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)\n assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))\n assert len(col_trans.transformers_) == 2\n assert col_trans.transformers_[-1][0] != 'remainder'\n\n col_trans = ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', SparseMatrixTrans(), 1)],\n sparse_threshold=0.1)\n col_trans.fit(X_array)\n X_trans = col_trans.transform(X_array)\n assert not sparse.issparse(X_trans)\n assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)\n assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))\n\n\ndef test_column_transformer_mixed_cols_sparse():\n df = np.array([['a', 1, True],\n ['b', 2, False]],\n dtype='O')\n\n ct = make_column_transformer(\n (OneHotEncoder(), [0]),\n ('passthrough', [1, 2]),\n sparse_threshold=1.0\n )\n\n # this shouldn't fail, since boolean can be coerced into a numeric\n # See: https://github.com/scikit-learn/scikit-learn/issues/11912\n X_trans = ct.fit_transform(df)\n assert X_trans.getformat() == 'csr'\n assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1],\n [0, 1, 2, 0]]))\n\n ct = make_column_transformer(\n (OneHotEncoder(), [0]),\n ('passthrough', [0]),\n sparse_threshold=1.0\n )\n with pytest.raises(ValueError,\n match=\"For a sparse output, all columns should\"):\n # this fails since strings `a` and `b` cannot be\n # coerced into a numeric.\n ct.fit_transform(df)\n\n\ndef test_column_transformer_sparse_threshold():\n X_array = np.array([['a', 'b'], ['A', 'B']], dtype=object).T\n # above data has sparsity of 4 / 8 = 0.5\n\n # apply threshold even if all sparse\n col_trans = ColumnTransformer([('trans1', OneHotEncoder(), [0]),\n ('trans2', OneHotEncoder(), [1])],\n sparse_threshold=0.2)\n res = col_trans.fit_transform(X_array)\n assert not sparse.issparse(res)\n assert not col_trans.sparse_output_\n\n # mixed -> sparsity of (4 + 2) / 8 = 0.75\n for thres in [0.75001, 1]:\n col_trans = ColumnTransformer(\n [('trans1', OneHotEncoder(sparse=True), [0]),\n ('trans2', OneHotEncoder(sparse=False), [1])],\n sparse_threshold=thres)\n res = col_trans.fit_transform(X_array)\n assert sparse.issparse(res)\n assert col_trans.sparse_output_\n\n for thres in [0.75, 0]:\n col_trans = ColumnTransformer(\n [('trans1', OneHotEncoder(sparse=True), [0]),\n ('trans2', OneHotEncoder(sparse=False), [1])],\n sparse_threshold=thres)\n res = col_trans.fit_transform(X_array)\n assert not sparse.issparse(res)\n assert not col_trans.sparse_output_\n\n # if nothing is sparse -> no sparse\n for thres in [0.33, 0, 1]:\n col_trans = ColumnTransformer(\n [('trans1', OneHotEncoder(sparse=False), [0]),\n ('trans2', OneHotEncoder(sparse=False), [1])],\n sparse_threshold=thres)\n res = col_trans.fit_transform(X_array)\n assert not sparse.issparse(res)\n assert not col_trans.sparse_output_\n\n\ndef test_column_transformer_error_msg_1D():\n X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T\n\n col_trans = ColumnTransformer([('trans', StandardScaler(), 0)])\n assert_raise_message(ValueError, \"1D data passed to a transformer\",\n col_trans.fit, X_array)\n assert_raise_message(ValueError, \"1D data passed to a transformer\",\n col_trans.fit_transform, X_array)\n\n col_trans = ColumnTransformer([('trans', TransRaise(), 0)])\n for func in [col_trans.fit, col_trans.fit_transform]:\n assert_raise_message(ValueError, \"specific message\", func, X_array)\n\n\ndef test_2D_transformer_output():\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n\n # if one transformer is dropped, test that name is still correct\n ct = ColumnTransformer([('trans1', 'drop', 0),\n ('trans2', TransNo2D(), 1)])\n assert_raise_message(ValueError, \"the 'trans2' transformer should be 2D\",\n ct.fit_transform, X_array)\n # because fit is also doing transform, this raises already on fit\n assert_raise_message(ValueError, \"the 'trans2' transformer should be 2D\",\n ct.fit, X_array)\n\n\ndef test_2D_transformer_output_pandas():\n pd = pytest.importorskip('pandas')\n\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_df = pd.DataFrame(X_array, columns=['col1', 'col2'])\n\n # if one transformer is dropped, test that name is still correct\n ct = ColumnTransformer([('trans1', TransNo2D(), 'col1')])\n assert_raise_message(ValueError, \"the 'trans1' transformer should be 2D\",\n ct.fit_transform, X_df)\n # because fit is also doing transform, this raises already on fit\n assert_raise_message(ValueError, \"the 'trans1' transformer should be 2D\",\n ct.fit, X_df)\n\n\[email protected](\"remainder\", ['drop', 'passthrough'])\ndef test_column_transformer_invalid_columns(remainder):\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n\n # general invalid\n for col in [1.5, ['string', 1], slice(1, 's'), np.array([1.])]:\n ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)\n assert_raise_message(ValueError, \"No valid specification\",\n ct.fit, X_array)\n\n # invalid for arrays\n for col in ['string', ['string', 'other'], slice('a', 'b')]:\n ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)\n assert_raise_message(ValueError, \"Specifying the columns\",\n ct.fit, X_array)\n\n # transformed n_features does not match fitted n_features\n col = [0, 1]\n ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)\n ct.fit(X_array)\n X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T\n msg = (\"X has 3 features, but ColumnTransformer is expecting 2 features \"\n \"as input.\")\n with pytest.raises(ValueError, match=msg):\n ct.transform(X_array_more)\n X_array_fewer = np.array([[0, 1, 2], ]).T\n err_msg = (\"X has 1 features, but ColumnTransformer is expecting 2 \"\n \"features as input.\")\n with pytest.raises(ValueError, match=err_msg):\n ct.transform(X_array_fewer)\n\n\ndef test_column_transformer_invalid_transformer():\n\n class NoTrans(BaseEstimator):\n def fit(self, X, y=None):\n return self\n\n def predict(self, X):\n return X\n\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n ct = ColumnTransformer([('trans', NoTrans(), [0])])\n assert_raise_message(TypeError,\n \"All estimators should implement fit and transform\",\n ct.fit, X_array)\n\n\ndef test_make_column_transformer():\n scaler = StandardScaler()\n norm = Normalizer()\n ct = make_column_transformer((scaler, 'first'), (norm, ['second']))\n names, transformers, columns = zip(*ct.transformers)\n assert names == (\"standardscaler\", \"normalizer\")\n assert transformers == (scaler, norm)\n assert columns == ('first', ['second'])\n\n\ndef test_make_column_transformer_pandas():\n pd = pytest.importorskip('pandas')\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_df = pd.DataFrame(X_array, columns=['first', 'second'])\n norm = Normalizer()\n ct1 = ColumnTransformer([('norm', Normalizer(), X_df.columns)])\n ct2 = make_column_transformer((norm, X_df.columns))\n assert_almost_equal(ct1.fit_transform(X_df),\n ct2.fit_transform(X_df))\n\n\ndef test_make_column_transformer_kwargs():\n scaler = StandardScaler()\n norm = Normalizer()\n ct = make_column_transformer((scaler, 'first'), (norm, ['second']),\n n_jobs=3, remainder='drop',\n sparse_threshold=0.5)\n assert ct.transformers == make_column_transformer(\n (scaler, 'first'), (norm, ['second'])).transformers\n assert ct.n_jobs == 3\n assert ct.remainder == 'drop'\n assert ct.sparse_threshold == 0.5\n # invalid keyword parameters should raise an error message\n assert_raise_message(\n TypeError,\n \"make_column_transformer() got an unexpected \"\n \"keyword argument 'transformer_weights'\",\n make_column_transformer, (scaler, 'first'), (norm, ['second']),\n transformer_weights={'pca': 10, 'Transf': 1}\n )\n\n\ndef test_make_column_transformer_remainder_transformer():\n scaler = StandardScaler()\n norm = Normalizer()\n remainder = StandardScaler()\n ct = make_column_transformer((scaler, 'first'), (norm, ['second']),\n remainder=remainder)\n assert ct.remainder == remainder\n\n\ndef test_column_transformer_get_set_params():\n ct = ColumnTransformer([('trans1', StandardScaler(), [0]),\n ('trans2', StandardScaler(), [1])])\n\n exp = {'n_jobs': None,\n 'remainder': 'drop',\n 'sparse_threshold': 0.3,\n 'trans1': ct.transformers[0][1],\n 'trans1__copy': True,\n 'trans1__with_mean': True,\n 'trans1__with_std': True,\n 'trans2': ct.transformers[1][1],\n 'trans2__copy': True,\n 'trans2__with_mean': True,\n 'trans2__with_std': True,\n 'transformers': ct.transformers,\n 'transformer_weights': None,\n 'verbose': False}\n\n assert ct.get_params() == exp\n\n ct.set_params(trans1__with_mean=False)\n assert not ct.get_params()['trans1__with_mean']\n\n ct.set_params(trans1='passthrough')\n exp = {'n_jobs': None,\n 'remainder': 'drop',\n 'sparse_threshold': 0.3,\n 'trans1': 'passthrough',\n 'trans2': ct.transformers[1][1],\n 'trans2__copy': True,\n 'trans2__with_mean': True,\n 'trans2__with_std': True,\n 'transformers': ct.transformers,\n 'transformer_weights': None,\n 'verbose': False}\n\n assert ct.get_params() == exp\n\n\ndef test_column_transformer_named_estimators():\n X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T\n ct = ColumnTransformer([('trans1', StandardScaler(), [0]),\n ('trans2', StandardScaler(with_std=False), [1])])\n assert not hasattr(ct, 'transformers_')\n ct.fit(X_array)\n assert hasattr(ct, 'transformers_')\n assert isinstance(ct.named_transformers_['trans1'], StandardScaler)\n assert isinstance(ct.named_transformers_.trans1, StandardScaler)\n assert isinstance(ct.named_transformers_['trans2'], StandardScaler)\n assert isinstance(ct.named_transformers_.trans2, StandardScaler)\n assert not ct.named_transformers_.trans2.with_std\n # check it are fitted transformers\n assert ct.named_transformers_.trans1.mean_ == 1.\n\n\ndef test_column_transformer_cloning():\n X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T\n\n ct = ColumnTransformer([('trans', StandardScaler(), [0])])\n ct.fit(X_array)\n assert not hasattr(ct.transformers[0][1], 'mean_')\n assert hasattr(ct.transformers_[0][1], 'mean_')\n\n ct = ColumnTransformer([('trans', StandardScaler(), [0])])\n ct.fit_transform(X_array)\n assert not hasattr(ct.transformers[0][1], 'mean_')\n assert hasattr(ct.transformers_[0][1], 'mean_')\n\n\ndef test_column_transformer_get_feature_names():\n X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T\n ct = ColumnTransformer([('trans', Trans(), [0, 1])])\n # raise correct error when not fitted\n with pytest.raises(NotFittedError):\n ct.get_feature_names()\n # raise correct error when no feature names are available\n ct.fit(X_array)\n assert_raise_message(AttributeError,\n \"Transformer trans (type Trans) does not provide \"\n \"get_feature_names\", ct.get_feature_names)\n\n # working example\n X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}],\n [{'c': 5}, {'c': 6}]], dtype=object).T\n ct = ColumnTransformer(\n [('col' + str(i), DictVectorizer(), i) for i in range(2)])\n ct.fit(X)\n assert ct.get_feature_names() == ['col0__a', 'col0__b', 'col1__c']\n\n # drop transformer\n ct = ColumnTransformer(\n [('col0', DictVectorizer(), 0), ('col1', 'drop', 1)])\n ct.fit(X)\n assert ct.get_feature_names() == ['col0__a', 'col0__b']\n\n # passthrough transformer\n ct = ColumnTransformer([('trans', 'passthrough', [0, 1])])\n ct.fit(X)\n assert ct.get_feature_names() == ['x0', 'x1']\n\n ct = ColumnTransformer([('trans', DictVectorizer(), 0)],\n remainder='passthrough')\n ct.fit(X)\n assert ct.get_feature_names() == ['trans__a', 'trans__b', 'x1']\n\n ct = ColumnTransformer([('trans', 'passthrough', [1])],\n remainder='passthrough')\n ct.fit(X)\n assert ct.get_feature_names() == ['x1', 'x0']\n\n ct = ColumnTransformer([('trans', 'passthrough', lambda x: [1])],\n remainder='passthrough')\n ct.fit(X)\n assert ct.get_feature_names() == ['x1', 'x0']\n\n ct = ColumnTransformer([('trans', 'passthrough', np.array([False, True]))],\n remainder='passthrough')\n ct.fit(X)\n assert ct.get_feature_names() == ['x1', 'x0']\n\n ct = ColumnTransformer([('trans', 'passthrough', slice(1, 2))],\n remainder='passthrough')\n ct.fit(X)\n assert ct.get_feature_names() == ['x1', 'x0']\n\n\ndef test_column_transformer_get_feature_names_dataframe():\n # passthough transformer with a dataframe\n pd = pytest.importorskip('pandas')\n X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}],\n [{'c': 5}, {'c': 6}]], dtype=object).T\n X_df = pd.DataFrame(X, columns=['col0', 'col1'])\n\n ct = ColumnTransformer([('trans', 'passthrough', ['col0', 'col1'])])\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col0', 'col1']\n\n ct = ColumnTransformer([('trans', 'passthrough', [0, 1])])\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col0', 'col1']\n\n ct = ColumnTransformer([('col0', DictVectorizer(), 0)],\n remainder='passthrough')\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col0__a', 'col0__b', 'col1']\n\n ct = ColumnTransformer([('trans', 'passthrough', ['col1'])],\n remainder='passthrough')\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col1', 'col0']\n\n ct = ColumnTransformer([('trans', 'passthrough',\n lambda x: x[['col1']].columns)],\n remainder='passthrough')\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col1', 'col0']\n\n ct = ColumnTransformer([('trans', 'passthrough', np.array([False, True]))],\n remainder='passthrough')\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col1', 'col0']\n\n ct = ColumnTransformer([('trans', 'passthrough', slice(1, 2))],\n remainder='passthrough')\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col1', 'col0']\n\n ct = ColumnTransformer([('trans', 'passthrough', [1])],\n remainder='passthrough')\n ct.fit(X_df)\n assert ct.get_feature_names() == ['col1', 'col0']\n\n\ndef test_column_transformer_special_strings():\n\n # one 'drop' -> ignore\n X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T\n ct = ColumnTransformer(\n [('trans1', Trans(), [0]), ('trans2', 'drop', [1])])\n exp = np.array([[0.], [1.], [2.]])\n assert_array_equal(ct.fit_transform(X_array), exp)\n assert_array_equal(ct.fit(X_array).transform(X_array), exp)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] != 'remainder'\n\n # all 'drop' -> return shape 0 array\n ct = ColumnTransformer(\n [('trans1', 'drop', [0]), ('trans2', 'drop', [1])])\n assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))\n assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] != 'remainder'\n\n # 'passthrough'\n X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T\n ct = ColumnTransformer(\n [('trans1', Trans(), [0]), ('trans2', 'passthrough', [1])])\n exp = X_array\n assert_array_equal(ct.fit_transform(X_array), exp)\n assert_array_equal(ct.fit(X_array).transform(X_array), exp)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] != 'remainder'\n\n # None itself / other string is not valid\n for val in [None, 'other']:\n ct = ColumnTransformer(\n [('trans1', Trans(), [0]), ('trans2', None, [1])])\n assert_raise_message(TypeError, \"All estimators should implement\",\n ct.fit_transform, X_array)\n assert_raise_message(TypeError, \"All estimators should implement\",\n ct.fit, X_array)\n\n\ndef test_column_transformer_remainder():\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n\n X_res_first = np.array([0, 1, 2]).reshape(-1, 1)\n X_res_second = np.array([2, 4, 6]).reshape(-1, 1)\n X_res_both = X_array\n\n # default drop\n ct = ColumnTransformer([('trans1', Trans(), [0])])\n assert_array_equal(ct.fit_transform(X_array), X_res_first)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'drop'\n assert_array_equal(ct.transformers_[-1][2], [1])\n\n # specify passthrough\n ct = ColumnTransformer([('trans', Trans(), [0])], remainder='passthrough')\n assert_array_equal(ct.fit_transform(X_array), X_res_both)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'passthrough'\n assert_array_equal(ct.transformers_[-1][2], [1])\n\n # column order is not preserved (passed through added to end)\n ct = ColumnTransformer([('trans1', Trans(), [1])],\n remainder='passthrough')\n assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'passthrough'\n assert_array_equal(ct.transformers_[-1][2], [0])\n\n # passthrough when all actual transformers are skipped\n ct = ColumnTransformer([('trans1', 'drop', [0])],\n remainder='passthrough')\n assert_array_equal(ct.fit_transform(X_array), X_res_second)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'passthrough'\n assert_array_equal(ct.transformers_[-1][2], [1])\n\n # error on invalid arg\n ct = ColumnTransformer([('trans1', Trans(), [0])], remainder=1)\n assert_raise_message(\n ValueError,\n \"remainder keyword needs to be one of \\'drop\\', \\'passthrough\\', \"\n \"or estimator.\", ct.fit, X_array)\n assert_raise_message(\n ValueError,\n \"remainder keyword needs to be one of \\'drop\\', \\'passthrough\\', \"\n \"or estimator.\", ct.fit_transform, X_array)\n\n # check default for make_column_transformer\n ct = make_column_transformer((Trans(), [0]))\n assert ct.remainder == 'drop'\n\n\[email protected](\"key\", [[0], np.array([0]), slice(0, 1),\n np.array([True, False])])\ndef test_column_transformer_remainder_numpy(key):\n # test different ways that columns are specified with passthrough\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_res_both = X_array\n\n ct = ColumnTransformer([('trans1', Trans(), key)],\n remainder='passthrough')\n assert_array_equal(ct.fit_transform(X_array), X_res_both)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'passthrough'\n assert_array_equal(ct.transformers_[-1][2], [1])\n\n\[email protected](\n \"key\", [[0], slice(0, 1), np.array([True, False]), ['first'], 'pd-index',\n np.array(['first']), np.array(['first'], dtype=object),\n slice(None, 'first'), slice('first', 'first')])\ndef test_column_transformer_remainder_pandas(key):\n # test different ways that columns are specified with passthrough\n pd = pytest.importorskip('pandas')\n if isinstance(key, str) and key == 'pd-index':\n key = pd.Index(['first'])\n\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_df = pd.DataFrame(X_array, columns=['first', 'second'])\n X_res_both = X_array\n\n ct = ColumnTransformer([('trans1', Trans(), key)],\n remainder='passthrough')\n assert_array_equal(ct.fit_transform(X_df), X_res_both)\n assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][1] == 'passthrough'\n assert_array_equal(ct.transformers_[-1][2], [1])\n\n\[email protected](\"key\", [[0], np.array([0]), slice(0, 1),\n np.array([True, False, False])])\ndef test_column_transformer_remainder_transformer(key):\n X_array = np.array([[0, 1, 2],\n [2, 4, 6],\n [8, 6, 4]]).T\n X_res_both = X_array.copy()\n\n # second and third columns are doubled when remainder = DoubleTrans\n X_res_both[:, 1:3] *= 2\n\n ct = ColumnTransformer([('trans1', Trans(), key)],\n remainder=DoubleTrans())\n\n assert_array_equal(ct.fit_transform(X_array), X_res_both)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert isinstance(ct.transformers_[-1][1], DoubleTrans)\n assert_array_equal(ct.transformers_[-1][2], [1, 2])\n\n\ndef test_column_transformer_no_remaining_remainder_transformer():\n X_array = np.array([[0, 1, 2],\n [2, 4, 6],\n [8, 6, 4]]).T\n\n ct = ColumnTransformer([('trans1', Trans(), [0, 1, 2])],\n remainder=DoubleTrans())\n\n assert_array_equal(ct.fit_transform(X_array), X_array)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_array)\n assert len(ct.transformers_) == 1\n assert ct.transformers_[-1][0] != 'remainder'\n\n\ndef test_column_transformer_drops_all_remainder_transformer():\n X_array = np.array([[0, 1, 2],\n [2, 4, 6],\n [8, 6, 4]]).T\n\n # columns are doubled when remainder = DoubleTrans\n X_res_both = 2 * X_array.copy()[:, 1:3]\n\n ct = ColumnTransformer([('trans1', 'drop', [0])],\n remainder=DoubleTrans())\n\n assert_array_equal(ct.fit_transform(X_array), X_res_both)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert isinstance(ct.transformers_[-1][1], DoubleTrans)\n assert_array_equal(ct.transformers_[-1][2], [1, 2])\n\n\ndef test_column_transformer_sparse_remainder_transformer():\n X_array = np.array([[0, 1, 2],\n [2, 4, 6],\n [8, 6, 4]]).T\n\n ct = ColumnTransformer([('trans1', Trans(), [0])],\n remainder=SparseMatrixTrans(),\n sparse_threshold=0.8)\n\n X_trans = ct.fit_transform(X_array)\n assert sparse.issparse(X_trans)\n # SparseMatrixTrans creates 3 features for each column. There is\n # one column in ``transformers``, thus:\n assert X_trans.shape == (3, 3 + 1)\n\n exp_array = np.hstack(\n (X_array[:, 0].reshape(-1, 1), np.eye(3)))\n assert_array_equal(X_trans.toarray(), exp_array)\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)\n assert_array_equal(ct.transformers_[-1][2], [1, 2])\n\n\ndef test_column_transformer_drop_all_sparse_remainder_transformer():\n X_array = np.array([[0, 1, 2],\n [2, 4, 6],\n [8, 6, 4]]).T\n ct = ColumnTransformer([('trans1', 'drop', [0])],\n remainder=SparseMatrixTrans(),\n sparse_threshold=0.8)\n\n X_trans = ct.fit_transform(X_array)\n assert sparse.issparse(X_trans)\n\n # SparseMatrixTrans creates 3 features for each column, thus:\n assert X_trans.shape == (3, 3)\n assert_array_equal(X_trans.toarray(), np.eye(3))\n assert len(ct.transformers_) == 2\n assert ct.transformers_[-1][0] == 'remainder'\n assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)\n assert_array_equal(ct.transformers_[-1][2], [1, 2])\n\n\ndef test_column_transformer_get_set_params_with_remainder():\n ct = ColumnTransformer([('trans1', StandardScaler(), [0])],\n remainder=StandardScaler())\n\n exp = {'n_jobs': None,\n 'remainder': ct.remainder,\n 'remainder__copy': True,\n 'remainder__with_mean': True,\n 'remainder__with_std': True,\n 'sparse_threshold': 0.3,\n 'trans1': ct.transformers[0][1],\n 'trans1__copy': True,\n 'trans1__with_mean': True,\n 'trans1__with_std': True,\n 'transformers': ct.transformers,\n 'transformer_weights': None,\n 'verbose': False}\n\n assert ct.get_params() == exp\n\n ct.set_params(remainder__with_std=False)\n assert not ct.get_params()['remainder__with_std']\n\n ct.set_params(trans1='passthrough')\n exp = {'n_jobs': None,\n 'remainder': ct.remainder,\n 'remainder__copy': True,\n 'remainder__with_mean': True,\n 'remainder__with_std': False,\n 'sparse_threshold': 0.3,\n 'trans1': 'passthrough',\n 'transformers': ct.transformers,\n 'transformer_weights': None,\n 'verbose': False}\n\n assert ct.get_params() == exp\n\n\ndef test_column_transformer_no_estimators():\n X_array = np.array([[0, 1, 2],\n [2, 4, 6],\n [8, 6, 4]]).astype('float').T\n ct = ColumnTransformer([], remainder=StandardScaler())\n\n params = ct.get_params()\n assert params['remainder__with_mean']\n\n X_trans = ct.fit_transform(X_array)\n assert X_trans.shape == X_array.shape\n assert len(ct.transformers_) == 1\n assert ct.transformers_[-1][0] == 'remainder'\n assert ct.transformers_[-1][2] == [0, 1, 2]\n\n\[email protected](\n ['est', 'pattern'],\n [(ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])],\n remainder=DoubleTrans()),\n (r'\\[ColumnTransformer\\].*\\(1 of 3\\) Processing trans1.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(2 of 3\\) Processing trans2.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(3 of 3\\) Processing remainder.* total=.*\\n$'\n )),\n (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])],\n remainder='passthrough'),\n (r'\\[ColumnTransformer\\].*\\(1 of 3\\) Processing trans1.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(2 of 3\\) Processing trans2.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(3 of 3\\) Processing remainder.* total=.*\\n$'\n )),\n (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', 'drop', [1])],\n remainder='passthrough'),\n (r'\\[ColumnTransformer\\].*\\(1 of 2\\) Processing trans1.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(2 of 2\\) Processing remainder.* total=.*\\n$'\n )),\n (ColumnTransformer([('trans1', Trans(), [0]),\n ('trans2', 'passthrough', [1])],\n remainder='passthrough'),\n (r'\\[ColumnTransformer\\].*\\(1 of 3\\) Processing trans1.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(2 of 3\\) Processing trans2.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(3 of 3\\) Processing remainder.* total=.*\\n$'\n )),\n (ColumnTransformer([('trans1', Trans(), [0])], remainder='passthrough'),\n (r'\\[ColumnTransformer\\].*\\(1 of 2\\) Processing trans1.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(2 of 2\\) Processing remainder.* total=.*\\n$'\n )),\n (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])],\n remainder='drop'),\n (r'\\[ColumnTransformer\\].*\\(1 of 2\\) Processing trans1.* total=.*\\n'\n r'\\[ColumnTransformer\\].*\\(2 of 2\\) Processing trans2.* total=.*\\n$')),\n (ColumnTransformer([('trans1', Trans(), [0])], remainder='drop'),\n (r'\\[ColumnTransformer\\].*\\(1 of 1\\) Processing trans1.* total=.*\\n$'))])\[email protected]('method', ['fit', 'fit_transform'])\ndef test_column_transformer_verbose(est, pattern, method, capsys):\n X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T\n\n func = getattr(est, method)\n est.set_params(verbose=False)\n func(X_array)\n assert not capsys.readouterr().out, 'Got output for verbose=False'\n\n est.set_params(verbose=True)\n func(X_array)\n assert re.match(pattern, capsys.readouterr()[0])\n\n\ndef test_column_transformer_no_estimators_set_params():\n ct = ColumnTransformer([]).set_params(n_jobs=2)\n assert ct.n_jobs == 2\n\n\ndef test_column_transformer_callable_specifier():\n # assert that function gets the full array\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_res_first = np.array([[0, 1, 2]]).T\n\n def func(X):\n assert_array_equal(X, X_array)\n return [0]\n\n ct = ColumnTransformer([('trans', Trans(), func)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X_array), X_res_first)\n assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)\n assert callable(ct.transformers[0][2])\n assert ct.transformers_[0][2] == [0]\n\n\ndef test_column_transformer_callable_specifier_dataframe():\n # assert that function gets the full dataframe\n pd = pytest.importorskip('pandas')\n X_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_res_first = np.array([[0, 1, 2]]).T\n\n X_df = pd.DataFrame(X_array, columns=['first', 'second'])\n\n def func(X):\n assert_array_equal(X.columns, X_df.columns)\n assert_array_equal(X.values, X_df.values)\n return ['first']\n\n ct = ColumnTransformer([('trans', Trans(), func)],\n remainder='drop')\n assert_array_equal(ct.fit_transform(X_df), X_res_first)\n assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)\n assert callable(ct.transformers[0][2])\n assert ct.transformers_[0][2] == ['first']\n\n\ndef test_column_transformer_negative_column_indexes():\n X = np.random.randn(2, 2)\n X_categories = np.array([[1], [2]])\n X = np.concatenate([X, X_categories], axis=1)\n\n ohe = OneHotEncoder()\n\n tf_1 = ColumnTransformer([('ohe', ohe, [-1])], remainder='passthrough')\n tf_2 = ColumnTransformer([('ohe', ohe, [2])], remainder='passthrough')\n assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X))\n\n\[email protected](\"explicit_colname\", ['first', 'second'])\ndef test_column_transformer_reordered_column_names_remainder(explicit_colname):\n \"\"\"Regression test for issue #14223: 'Named col indexing fails with\n ColumnTransformer remainder on changing DataFrame column ordering'\n\n Should raise error on changed order combined with remainder.\n Should allow for added columns in `transform` input DataFrame\n as long as all preceding columns match.\n \"\"\"\n pd = pytest.importorskip('pandas')\n\n X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T\n X_fit_df = pd.DataFrame(X_fit_array, columns=['first', 'second'])\n\n X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T\n X_trans_df = pd.DataFrame(X_trans_array, columns=['second', 'first'])\n\n tf = ColumnTransformer([('bycol', Trans(), explicit_colname)],\n remainder=Trans())\n\n tf.fit(X_fit_df)\n err_msg = (\"Given feature/column names do not match the ones for the \"\n \"data given during fit.\")\n with pytest.raises(RuntimeError, match=err_msg):\n tf.transform(X_trans_df)\n\n # ValueError for added columns\n X_extended_df = X_fit_df.copy()\n X_extended_df['third'] = [3, 6, 9]\n err_msg = (\"X has 3 features, but ColumnTransformer is expecting 2 \"\n \"features as input.\")\n with pytest.raises(ValueError, match=err_msg):\n tf.transform(X_extended_df)\n\n # No 'columns' AttributeError when transform input is a numpy array\n X_array = X_fit_array.copy()\n err_msg = 'Specifying the columns'\n with pytest.raises(ValueError, match=err_msg):\n tf.transform(X_array)\n\n\ndef test_feature_name_validation():\n \"\"\"Tests if the proper warning/error is raised if the columns do not match\n during fit and transform.\"\"\"\n pd = pytest.importorskip(\"pandas\")\n\n X = np.ones(shape=(3, 2))\n X_extra = np.ones(shape=(3, 3))\n df = pd.DataFrame(X, columns=['a', 'b'])\n df_extra = pd.DataFrame(X_extra, columns=['a', 'b', 'c'])\n\n tf = ColumnTransformer([('bycol', Trans(), ['a', 'b'])])\n tf.fit(df)\n\n msg = (\"X has 3 features, but ColumnTransformer is expecting 2 features \"\n \"as input.\")\n with pytest.raises(ValueError, match=msg):\n tf.transform(df_extra)\n\n tf = ColumnTransformer([('bycol', Trans(), [0])])\n tf.fit(df)\n\n with pytest.raises(ValueError, match=msg):\n tf.transform(X_extra)\n\n with warnings.catch_warnings(record=True) as warns:\n tf.transform(X)\n assert not warns\n\n tf = ColumnTransformer([('bycol', Trans(), ['a'])],\n remainder=Trans())\n tf.fit(df)\n with pytest.raises(ValueError, match=msg):\n tf.transform(df_extra)\n\n\[email protected](\"array_type\", [np.asarray, sparse.csr_matrix])\ndef test_column_transformer_mask_indexing(array_type):\n # Regression test for #14510\n # Boolean array-like does not behave as boolean array with NumPy < 1.12\n # and sparse matrices as well\n X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]])\n X = array_type(X)\n column_transformer = ColumnTransformer(\n [('identity', FunctionTransformer(), [False, True, False, True])]\n )\n X_trans = column_transformer.fit_transform(X)\n assert X_trans.shape == (3, 2)\n\n\ndef test_n_features_in():\n # make sure n_features_in is what is passed as input to the column\n # transformer.\n\n X = [[1, 2], [3, 4], [5, 6]]\n ct = ColumnTransformer([('a', DoubleTrans(), [0]),\n ('b', DoubleTrans(), [1])])\n assert not hasattr(ct, 'n_features_in_')\n ct.fit(X)\n assert ct.n_features_in_ == 2\n\n\[email protected]('cols, pattern, include, exclude', [\n (['col_int', 'col_float'], None, np.number, None),\n (['col_int', 'col_float'], None, None, object),\n (['col_int', 'col_float'], None, [int, float], None),\n (['col_str'], None, [object], None),\n (['col_str'], None, object, None),\n (['col_float'], None, float, None),\n (['col_float'], 'at$', [np.number], None),\n (['col_int'], None, [int], None),\n (['col_int'], '^col_int', [np.number], None),\n (['col_float', 'col_str'], 'float|str', None, None),\n (['col_str'], '^col_s', None, [int]),\n ([], 'str$', float, None),\n (['col_int', 'col_float', 'col_str'], None, [np.number, object], None),\n])\ndef test_make_column_selector_with_select_dtypes(cols, pattern, include,\n exclude):\n pd = pytest.importorskip('pandas')\n\n X_df = pd.DataFrame({\n 'col_int': np.array([0, 1, 2], dtype=int),\n 'col_float': np.array([0.0, 1.0, 2.0], dtype=float),\n 'col_str': [\"one\", \"two\", \"three\"],\n }, columns=['col_int', 'col_float', 'col_str'])\n\n selector = make_column_selector(\n dtype_include=include, dtype_exclude=exclude, pattern=pattern)\n\n assert_array_equal(selector(X_df), cols)\n\n\ndef test_column_transformer_with_make_column_selector():\n # Functional test for column transformer + column selector\n pd = pytest.importorskip('pandas')\n X_df = pd.DataFrame({\n 'col_int': np.array([0, 1, 2], dtype=int),\n 'col_float': np.array([0.0, 1.0, 2.0], dtype=float),\n 'col_cat': [\"one\", \"two\", \"one\"],\n 'col_str': [\"low\", \"middle\", \"high\"]\n }, columns=['col_int', 'col_float', 'col_cat', 'col_str'])\n X_df['col_str'] = X_df['col_str'].astype('category')\n\n cat_selector = make_column_selector(dtype_include=['category', object])\n num_selector = make_column_selector(dtype_include=np.number)\n\n ohe = OneHotEncoder()\n scaler = StandardScaler()\n\n ct_selector = make_column_transformer((ohe, cat_selector),\n (scaler, num_selector))\n ct_direct = make_column_transformer((ohe, ['col_cat', 'col_str']),\n (scaler, ['col_float', 'col_int']))\n\n X_selector = ct_selector.fit_transform(X_df)\n X_direct = ct_direct.fit_transform(X_df)\n\n assert_allclose(X_selector, X_direct)\n\n\ndef test_make_column_selector_error():\n selector = make_column_selector(dtype_include=np.number)\n X = np.array([[0.1, 0.2]])\n msg = (\"make_column_selector can only be applied to pandas dataframes\")\n with pytest.raises(ValueError, match=msg):\n selector(X)\n\n\ndef test_make_column_selector_pickle():\n pd = pytest.importorskip('pandas')\n\n X_df = pd.DataFrame({\n 'col_int': np.array([0, 1, 2], dtype=int),\n 'col_float': np.array([0.0, 1.0, 2.0], dtype=float),\n 'col_str': [\"one\", \"two\", \"three\"],\n }, columns=['col_int', 'col_float', 'col_str'])\n\n selector = make_column_selector(dtype_include=[object])\n selector_picked = pickle.loads(pickle.dumps(selector))\n\n assert_array_equal(selector(X_df), selector_picked(X_df))\n\n\[email protected](\n 'empty_col', [[], np.array([], dtype=int), lambda x: []],\n ids=['list', 'array', 'callable']\n)\ndef test_feature_names_empty_columns(empty_col):\n pd = pytest.importorskip('pandas')\n\n df = pd.DataFrame({\"col1\": [\"a\", \"a\", \"b\"], \"col2\": [\"z\", \"z\", \"z\"]})\n\n ct = ColumnTransformer(\n transformers=[\n (\"ohe\", OneHotEncoder(), [\"col1\", \"col2\"]),\n (\"empty_features\", OneHotEncoder(), empty_col),\n ],\n )\n\n ct.fit(df)\n assert ct.get_feature_names() == ['ohe__x0_a', 'ohe__x0_b', 'ohe__x1_z']\n\n\[email protected]('remainder', [\"passthrough\", StandardScaler()])\ndef test_sk_visual_block_remainder(remainder):\n # remainder='passthrough' or an estimator will be shown in repr_html\n ohe = OneHotEncoder()\n ct = ColumnTransformer(transformers=[('ohe', ohe, [\"col1\", \"col2\"])],\n remainder=remainder)\n visual_block = ct._sk_visual_block_()\n assert visual_block.names == ('ohe', 'remainder')\n assert visual_block.name_details == (['col1', 'col2'], '')\n assert visual_block.estimators == (ohe, remainder)\n\n\ndef test_sk_visual_block_remainder_drop():\n # remainder='drop' is not shown in repr_html\n ohe = OneHotEncoder()\n ct = ColumnTransformer(transformers=[('ohe', ohe, [\"col1\", \"col2\"])])\n visual_block = ct._sk_visual_block_()\n assert visual_block.names == ('ohe',)\n assert visual_block.name_details == (['col1', 'col2'],)\n assert visual_block.estimators == (ohe,)\n\n\[email protected]('remainder', [\"passthrough\", StandardScaler()])\ndef test_sk_visual_block_remainder_fitted_pandas(remainder):\n # Remainder shows the columns after fitting\n pd = pytest.importorskip('pandas')\n ohe = OneHotEncoder()\n ct = ColumnTransformer(transformers=[('ohe', ohe, [\"col1\", \"col2\"])],\n remainder=remainder)\n df = pd.DataFrame({\"col1\": [\"a\", \"b\", \"c\"], \"col2\": [\"z\", \"z\", \"z\"],\n \"col3\": [1, 2, 3], \"col4\": [3, 4, 5]})\n ct.fit(df)\n visual_block = ct._sk_visual_block_()\n assert visual_block.names == ('ohe', 'remainder')\n assert visual_block.name_details == (['col1', 'col2'], ['col3', 'col4'])\n assert visual_block.estimators == (ohe, remainder)\n\n\[email protected]('remainder', [\"passthrough\", StandardScaler()])\ndef test_sk_visual_block_remainder_fitted_numpy(remainder):\n # Remainder shows the indices after fitting\n X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)\n scaler = StandardScaler()\n ct = ColumnTransformer(transformers=[('scale', scaler, [0, 2])],\n remainder=remainder)\n ct.fit(X)\n visual_block = ct._sk_visual_block_()\n assert visual_block.names == ('scale', 'remainder')\n assert visual_block.name_details == ([0, 2], [1])\n assert visual_block.estimators == (scaler, remainder)\n\n\[email protected](\"selector\", [[], [False, False]])\ndef test_get_feature_names_empty_selection(selector):\n \"\"\"Test that get_feature_names is only called for transformers that\n were selected. Non-regression test for #19550.\n \"\"\"\n ct = ColumnTransformer([('ohe', OneHotEncoder(drop='first'), selector)])\n ct.fit([[1, 2], [3, 4]])\n assert ct.get_feature_names() == []\n"
] |
[
[
"numpy.concatenate",
"sklearn.compose.make_column_transformer",
"numpy.random.randn",
"sklearn.compose.ColumnTransformer",
"scipy.sparse.issparse",
"numpy.arange",
"numpy.eye",
"sklearn.compose.make_column_selector",
"sklearn.utils._testing.assert_array_equal",
"sklearn.utils._testing.assert_raise_message",
"numpy.atleast_2d",
"numpy.transpose",
"numpy.testing.assert_allclose",
"sklearn.feature_extraction.DictVectorizer",
"numpy.array",
"sklearn.preprocessing.FunctionTransformer",
"scipy.sparse.eye",
"sklearn.preprocessing.OneHotEncoder",
"numpy.ones",
"sklearn.preprocessing.Normalizer",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
josephjcontreras/vectormath
|
[
"a2259fb82cf5a665170f50d216b11a738400d878"
] |
[
"vectormath/vector.py"
] |
[
"\"\"\"vector.py contains definitions for Vector and VectorArray classes\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\n\nclass BaseVector(np.ndarray):\n \"\"\"Class to contain basic operations used by all Vector classes\"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"BaseVector cannot be created\"\"\"\n raise NotImplementedError('Please specify Vector2 or Vector3')\n\n @property\n def x(self):\n \"\"\"x-component of vector\"\"\"\n return self[0]\n\n @x.setter\n def x(self, value):\n self[0] = value\n\n @property\n def y(self):\n \"\"\"y-component of vector\"\"\"\n return self[1]\n\n @y.setter\n def y(self, value):\n self[1] = value\n\n @property\n def length(self):\n \"\"\"Length of vector\"\"\"\n return float(np.sqrt(np.sum(self**2)))\n\n @length.setter\n def length(self, value):\n if not np.isscalar(value):\n raise ValueError('Length must be a scalar')\n value = float(value)\n if self.length != 0:\n new_length = value/self.length\n self *= new_length\n return\n if value != 0:\n raise ZeroDivisionError('Cannot resize vector of length 0 to '\n 'nonzero length')\n\n @property\n def rho(self):\n \"\"\"Radial coordinate of this vector (equal to the length of the vector)\"\"\"\n return self.length\n\n @rho.setter\n def rho(self, value):\n self.length = value\n\n @property\n def theta(self):\n \"\"\"Angular coordinate / azimuthal angle of this vector in radians\n\n Based on polar coordinate space (or sperical coordinate space for `Vector3`)\n returns angle between this vector and the positive x-axis\n range: (-pi <= theta <= pi)\n \"\"\"\n return float(np.arctan2(self.y, self.x))\n\n # pylint: disable=fixme\n # TODO: Add `theta` and `theta_deg` setters\n # @theta.setter\n # def theta(self, value):\n # ...\n\n @property\n def theta_deg(self):\n \"\"\"Angular coordinate / azimuthal angle of this vector in degrees\n\n Based on polar coordinate space (or sperical coordinate space for `Vector3`)\n returns angle between this vector and the positive x-axis\n range: (-180 <= theta_deg <= 180)\n \"\"\"\n return self.theta * 180 / np.pi\n\n def as_length(self, value):\n \"\"\"Return a new vector scaled to given length\"\"\"\n new_vec = self.copy()\n new_vec.length = value\n return new_vec\n\n def as_percent(self, value):\n \"\"\"Return a new vector scaled by given decimal percent\"\"\"\n new_vec = self.copy()\n new_vec.length = value * self.length\n return new_vec\n\n def as_unit(self):\n \"\"\"Return a new vector scaled to length 1\"\"\"\n new_vec = self.copy()\n new_vec.normalize()\n return new_vec\n\n def normalize(self):\n \"\"\"Scale the length of a vector to 1 in place\"\"\"\n self.length = 1\n return self\n\n def dot(self, vec):\n \"\"\"Dot product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a vector')\n return np.dot(self, vec)\n\n def cross(self, vec):\n \"\"\"Cross product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Cross product operand must be a vector')\n return self.__class__(np.cross(self, vec))\n\n def angle(self, vec, unit='rad'):\n \"\"\"Calculate the angle between two Vectors\n\n unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad'\n \"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Angle operand must be of class {}'\n .format(self.__class__.__name__))\n if unit not in ['deg', 'rad']:\n raise ValueError('Only units of rad or deg are supported')\n\n denom = self.length * vec.length\n if denom == 0:\n raise ZeroDivisionError('Cannot calculate angle between '\n 'zero-length vector(s)')\n\n ang = np.arccos(self.dot(vec) / denom)\n if unit == 'deg':\n ang = ang * 180 / np.pi\n return ang\n\n def __mul__(self, multiplier):\n return self.__class__(self.view(np.ndarray) * multiplier)\n\n\nclass Vector3(BaseVector):\n \"\"\"Primitive 3D vector defined from the origin\n\n New Vector3 can be created with:\n - another Vector3\n - length-3 array\n - x, y, and y values\n - no input (returns [0., 0., 0.])\n \"\"\"\n\n # pylint: disable=fixme\n # TODO: add support for instantiating Vector3 with `polar`=True\n\n def __new__(cls, x=None, y=None, z=None): #pylint: disable=arguments-differ\n\n def read_array(X, Y, Z):\n \"\"\"Build Vector3 from another Vector3, [x, y, z], or x/y/z\"\"\"\n if isinstance(X, cls) and Y is None and Z is None:\n return cls(X.x, X.y, X.z)\n if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 3 and\n Y is None and Z is None):\n return cls(X[0], X[1], X[2])\n if X is None and Y is None and Z is None:\n return cls(0, 0, 0)\n if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):\n xyz = np.r_[X, Y, Z]\n xyz = xyz.astype(float)\n return xyz.view(cls)\n raise ValueError('Invalid input for Vector3 - must be an instance '\n 'of a Vector3, a length-3 array, 3 scalars, or '\n 'nothing for [0., 0., 0.]')\n\n return read_array(x, y, z)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n \"\"\"This is called at the end of ufuncs\n\n If the output is the wrong shape, return the ndarray view\n instead of vector view\n \"\"\"\n if out_arr.shape != (3,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n \"\"\"This is called when initializing the vector\n\n If the constructor is used, obj is None. If slicing is\n used, obj has the same class as self. In both these cases,\n we let things pass.\n\n If we are viewing another array class as a vector, then obj has\n a different class than self. In this case, if the array has\n an invalid shape a ValueError is raised\n \"\"\"\n if obj is None or obj.__class__ is Vector3:\n return\n if self.shape != (3,):\n raise ValueError(\n 'Invalid array to view as Vector3 - must be length-3 array.'\n )\n\n @property\n def z(self):\n \"\"\"z-component of vector\"\"\"\n return self[2]\n\n @z.setter\n def z(self, value):\n self[2] = value\n\n @property\n def phi(self):\n \"\"\"Polar angle / inclination of this vector in radians\n\n Based on sperical coordinate space\n returns angle between this vector and the positive z-azis\n range: (0 <= phi <= pi)\n \"\"\"\n return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)\n\n # pylint: disable=fixme\n # TODO: Add `phi` and `phi_deg` setters\n # @phi.setter\n # def phi(self, value):\n # ...\n\n @property\n def phi_deg(self):\n \"\"\"Polar angle / inclination of this vector in degrees\n\n Based on sperical coordinate space\n returns angle between this vector and the positive z-azis\n range: (0 <= phi <= pi)\n \"\"\"\n return self.phi * 180 / np.pi\n\n\nclass Vector2(BaseVector):\n \"\"\"Primitive 2D vector defined from the origin\n\n New Vector2 can be created with:\n - another Vector2\n - length-2 array\n - x and y values\n - rho and theta, if polar=True; specify unit as 'rad' (default) or 'deg'\n - no input (returns [0., 0.])\n \"\"\"\n\n def __new__(cls, x=None, y=None, polar=False, unit='rad'): #pylint: disable=arguments-differ\n\n def read_array(X, Y):\n \"\"\"Build Vector2 from another Vector2, [x, y], or x/y\"\"\"\n if isinstance(X, cls) and Y is None:\n if polar:\n raise ValueError(\n 'When copying a Vector2, polar=True is not supported'\n )\n return cls(X.x, X.y)\n if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 2 and\n Y is None):\n return cls(X[0], X[1], polar, unit)\n if X is None and Y is None:\n return cls(0, 0, polar, unit)\n if np.isscalar(X) and np.isscalar(Y):\n if polar:\n if unit not in ['deg', 'rad']:\n raise ValueError(\n 'Only units of rad or deg are supported'\n )\n if unit == 'deg':\n Y = Y / 180 * np.pi\n X, Y = X * np.cos(Y), X * np.sin(Y)\n xyz = np.r_[X, Y]\n xyz = xyz.astype(float)\n return xyz.view(cls)\n raise ValueError('Invalid input for Vector2 - must be an instance '\n 'of a Vector2, a length-2 array, 2 scalars, or '\n 'nothing for [0., 0.]')\n\n return read_array(x, y)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if out_arr.shape != (2,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector2:\n return\n if self.shape != (2,):\n raise ValueError(\n 'Invalid array to view as Vector2 - must be length-2 array.'\n )\n\n def cross(self, vec):\n \"\"\"Cross product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Cross product operand must be a vector')\n return Vector3(0, 0, np.asscalar(np.cross(self, vec)))\n\n\nclass BaseVectorArray(BaseVector):\n \"\"\"Class to contain basic operations used by all VectorArray classes\"\"\"\n\n @property\n def x(self):\n \"\"\"Array of x-component of vectors\"\"\"\n return self[:, 0]\n\n @x.setter\n def x(self, value):\n self[:, 0] = value\n\n @property\n def y(self):\n \"\"\"Array of y-component of vectors\"\"\"\n return self[:, 1]\n\n @y.setter\n def y(self, value):\n self[:, 1] = value\n\n @property\n def nV(self):\n \"\"\"Number of vectors\"\"\"\n return self.shape[0]\n\n def normalize(self):\n \"\"\"Scale the length of all vectors to 1 in place\"\"\"\n self.length = np.ones(self.nV)\n return self\n\n @property\n def dims(self):\n \"\"\"Tuple of different dimension names for Vector type\"\"\"\n raise NotImplementedError('Please use Vector2Array or Vector3Array')\n\n @property\n def length(self):\n \"\"\"Array of vector lengths\"\"\"\n return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)\n\n @length.setter\n def length(self, l):\n l = np.array(l)\n if self.nV != l.size:\n raise ValueError('Length vector must be the same number of '\n 'elements as vector.')\n # This case resizes all vectors with nonzero length\n if np.all(self.length != 0):\n new_length = l/self.length\n for dim in self.dims:\n setattr(self, dim, new_length*getattr(self, dim))\n return\n # This case only applies to single vectors\n if self.nV == 1 and l == 0:\n assert self.length == 0, \\\n 'Nonzero length should be resized in the first case'\n for dim in self.dims:\n setattr(self, dim, 0.)\n return\n # This case only applies if vectors with length == 0\n # in an array are getting resized to 0\n if self.nV > 1 and np.array_equal(self.length.nonzero(), l.nonzero()): #pylint: disable=no-member\n new_length = l/[x if x != 0 else 1 for x in self.length]\n for dim in self.dims:\n setattr(self, dim, new_length*getattr(self, dim))\n return\n # Error if length zero array is resized to nonzero value\n raise ZeroDivisionError('Cannot resize vector of length 0 to '\n 'nonzero length')\n\n def dot(self, vec):\n \"\"\"Dot product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a VectorArray')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Dot product operands must have the same '\n 'number of elements.')\n return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)\n\n def angle(self, vec, unit='rad'):\n \"\"\"Angle method is only for Vectors, not VectorArrays\"\"\"\n raise NotImplementedError('angle not implemented for VectorArrays')\n\n\nclass Vector3Array(BaseVectorArray):\n \"\"\"List of Vector3\n\n A new Vector3Array can be created with:\n - another Vector3Array\n - x/y/z lists of equal length\n - n x 3 array\n - nothing (returns [[0., 0., 0.]])\n \"\"\"\n\n def __new__(cls, x=None, y=None, z=None): #pylint: disable=arguments-differ\n\n def read_array(X, Y, Z):\n \"\"\"Build Vector3Array from various inputs\"\"\"\n if isinstance(X, cls) and Y is None and Z is None:\n X = np.atleast_2d(X)\n return cls(X.x.copy(), X.y.copy(), X.z.copy())\n if isinstance(X, (list, tuple)):\n X = np.array(X)\n if isinstance(Y, (list, tuple)):\n Y = np.array(Y)\n if isinstance(Z, (list, tuple)):\n Z = np.array(Z)\n if isinstance(X, np.ndarray) and Y is None and Z is None:\n X = np.squeeze(X)\n if X.size == 3:\n X = X.flatten()\n return cls(X[0], X[1], X[2])\n if len(X.shape) == 2 and X.shape[1] == 3:\n return cls(\n X[:, 0].copy(), X[:, 1].copy(), X[:, 2].copy()\n )\n raise ValueError(\n 'Unexpected shape for vector init: {shp}'.format(\n shp=X.shape\n )\n )\n if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):\n X, Y, Z = float(X), float(Y), float(Z)\n elif not (isinstance(X, type(Y)) and isinstance(X, type(Z))):\n raise TypeError('Must be the same types for x, y, and '\n 'z for vector init')\n if isinstance(X, np.ndarray):\n if not (X.shape == Y.shape and X.shape == Z.shape):\n raise ValueError('Must be the same shapes for x, y, '\n 'and z in vector init')\n vec_ndarray = np.c_[X, Y, Z]\n vec_ndarray = vec_ndarray.astype(float)\n return vec_ndarray.view(cls)\n if X is None:\n X, Y, Z = 0.0, 0.0, 0.0\n vec_ndarray = np.r_[X, Y, Z].reshape((1, 3))\n return np.asarray(vec_ndarray).view(cls)\n\n return read_array(x, y, z)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if len(out_arr.shape) != 2 or out_arr.shape[1] != 3:\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector3Array:\n return\n if len(self.shape) != 2 or self.shape[1] != 3: #pylint: disable=unsubscriptable-object\n raise ValueError(\n 'Invalid array to view as Vector3Array - must be '\n 'array of shape (*, 3).'\n )\n\n def __getitem__(self, i):\n \"\"\"Overriding _getitem__ allows coersion to Vector3 or ndarray\"\"\"\n item_out = super(Vector3Array, self).__getitem__(i)\n if np.isscalar(i):\n return item_out.view(Vector3)\n if isinstance(i, slice):\n return item_out\n return item_out.view(np.ndarray)\n\n @property\n def z(self):\n \"\"\"Array of z-component of vectors\"\"\"\n return self[:, 2]\n\n @z.setter\n def z(self, value):\n self[:, 2] = value\n\n @property\n def dims(self):\n return ('x', 'y', 'z')\n\n def cross(self, vec):\n \"\"\"Cross product with another Vector3Array\"\"\"\n if not isinstance(vec, Vector3Array):\n raise TypeError('Cross product operand must be a Vector3Array')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Cross product operands must have the same '\n 'number of elements.')\n return Vector3Array(np.cross(self, vec))\n\n\nclass Vector2Array(BaseVectorArray):\n \"\"\"List of Vector2\n\n A new Vector2Array can be created with:\n - another Vector2Array\n - x/y lists of equal length\n - n x 2 array\n - nothing (returns [[0., 0.]])\n \"\"\"\n\n def __new__(cls, x=None, y=None): #pylint: disable=arguments-differ\n\n def read_array(X, Y):\n \"\"\"Build Vector2Array from various inputs\"\"\"\n if isinstance(X, cls) and Y is None:\n X = np.atleast_2d(X)\n return cls(X.x.copy(), X.y.copy())\n if isinstance(X, (list, tuple)):\n X = np.array(X)\n if isinstance(Y, (list, tuple)):\n Y = np.array(Y)\n if isinstance(X, np.ndarray) and Y is None:\n X = np.squeeze(X)\n if X.size == 2:\n X = X.flatten()\n return cls(X[0], X[1])\n if len(X.shape) == 2 and X.shape[1] == 2:\n return cls(\n X[:, 0].copy(), X[:, 1].copy()\n )\n raise ValueError(\n 'Unexpected shape for vector init: {shp}'.format(\n shp=X.shape\n )\n )\n if np.isscalar(X) and np.isscalar(Y):\n X, Y = float(X), float(Y)\n elif not isinstance(X, type(Y)):\n raise TypeError('Must be the same types for x and y '\n 'for vector init')\n if isinstance(X, np.ndarray):\n if X.shape != Y.shape:\n raise ValueError('Must be the same shapes for x and y '\n 'in vector init')\n vec_ndarray = np.c_[X, Y]\n vec_ndarray = vec_ndarray.astype(float)\n return vec_ndarray.view(cls)\n if X is None:\n X, Y = 0.0, 0.0\n vec_ndarray = np.r_[X, Y].reshape((1, 2))\n return np.asarray(vec_ndarray).view(cls)\n\n return read_array(x, y)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if len(out_arr.shape) != 2 or out_arr.shape[1] != 2:\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector2Array:\n return\n if len(self.shape) != 2 or self.shape[1] != 2: #pylint: disable=unsubscriptable-object\n raise ValueError(\n 'Invalid array to view as Vector2Array - must be '\n 'array of shape (*, 2).'\n )\n\n def __getitem__(self, i):\n \"\"\"Overriding _getitem__ allows coercion to Vector2 or ndarray\"\"\"\n item_out = super(Vector2Array, self).__getitem__(i)\n if np.isscalar(i):\n return item_out.view(Vector2)\n if isinstance(i, slice):\n return item_out\n return item_out.view(np.ndarray)\n\n @property\n def dims(self):\n return ('x', 'y')\n"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"numpy.asarray",
"numpy.squeeze",
"numpy.cos",
"numpy.ones",
"numpy.all",
"numpy.arctan2",
"numpy.atleast_2d",
"numpy.sin",
"numpy.isscalar",
"numpy.cross",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
psi1104/pifuhd
|
[
"32be6642d8ee198f6186ec7ab82f329d95a9f275"
] |
[
"lightweight_human_pose_estimation_pytorch/get_pose.py"
] |
[
"import torch\nimport cv2\nimport numpy as np\nfrom .models.with_mobilenet import PoseEstimationWithMobileNet\nfrom .modules.keypoints import extract_keypoints, group_keypoints\nfrom .modules.load_state import load_state\nfrom .modules.pose import Pose\nfrom . import demo\n\ndef get_rect(net, images, height_size=512):\n\n stride = 8\n upsample_ratio = 4\n num_keypoints = Pose.num_kpts\n previous_poses = []\n delay = 33\n for image in images:\n rect_path = image.replace('.%s' % (image.split('.')[-1]), '_rect.txt')\n img = cv2.imread(image, cv2.IMREAD_COLOR)\n orig_img = img.copy()\n heatmaps, pafs, scale, pad = demo.infer_fast(net, img, height_size, stride, upsample_ratio, cpu=False)\n\n total_keypoints_num = 0\n all_keypoints_by_type = []\n for kpt_idx in range(num_keypoints): # 19th for bg\n total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type,\n total_keypoints_num)\n\n pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)\n for kpt_id in range(all_keypoints.shape[0]):\n all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale\n all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale\n current_poses = []\n\n rects = []\n for n in range(len(pose_entries)):\n if len(pose_entries[n]) == 0:\n continue\n pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1\n valid_keypoints = []\n for kpt_id in range(num_keypoints):\n if pose_entries[n][kpt_id] != -1.0: # keypoint was found\n pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])\n pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])\n valid_keypoints.append([pose_keypoints[kpt_id, 0], pose_keypoints[kpt_id, 1]])\n valid_keypoints = np.array(valid_keypoints)\n\n if pose_entries[n][10] != -1.0 or pose_entries[n][13] != -1.0:\n pmin = valid_keypoints.min(0)\n pmax = valid_keypoints.max(0)\n\n center = (0.5 * (pmax[:2] + pmin[:2])).astype(np.int)\n radius = int(0.65 * max(pmax[0] - pmin[0], pmax[1] - pmin[1]))\n elif pose_entries[n][10] == -1.0 and pose_entries[n][13] == -1.0 and pose_entries[n][8] != -1.0 and \\\n pose_entries[n][11] != -1.0:\n # if leg is missing, use pelvis to get cropping\n center = (0.5 * (pose_keypoints[8] + pose_keypoints[11])).astype(np.int)\n radius = int(1.45 * np.sqrt(((center[None, :] - valid_keypoints) ** 2).sum(1)).max(0))\n center[1] += int(0.05 * radius)\n else:\n center = np.array([img.shape[1] // 2, img.shape[0] // 2])\n radius = max(img.shape[1] // 2, img.shape[0] // 2)\n\n x1 = center[0] - radius\n y1 = center[1] - radius\n\n rects.append([x1, y1, 2 * radius, 2 * radius])\n\n np.savetxt(rect_path, np.array(rects), fmt='%d')\n\ndef get_pose_model():\n net = PoseEstimationWithMobileNet()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n checkpoint = torch.load('lightweight_human_pose_estimation_pytorch/checkpoint_iter_370000.pth', map_location=device)\n load_state(net, checkpoint)\n\n return net\n\ndef get_pose(net, image_path):\n\n get_rect(net.cuda(), [image_path], 512)\n"
] |
[
[
"numpy.ones",
"numpy.array",
"torch.cuda.is_available",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gjhartwell/cth-python
|
[
"558148a5755fd0bd3b12e1380d365f8bf51efa19",
"558148a5755fd0bd3b12e1380d365f8bf51efa19",
"558148a5755fd0bd3b12e1380d365f8bf51efa19"
] |
[
"Neural_Network/doregression.py",
"gregsprograms/test_plot_rotatonal_transform.py",
"diagnostics/thomson/Raman/raman_theory.py"
] |
[
"# --------------------------------------\n# doregression.py\n# \n# sample regression problem\n# Parameters:\n#\t\n# \n# Returns:\n#\n# Example:\n# \n#\n# Also defines:\n#\t\n# Greg Hartwell\n# 2017-11-1\n#----------------------------------------------------------------------------\n\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n\n# Load the diabetes dataset\ndiabetes = datasets.load_diabetes()\n\n\n\n# Use only one feature\ndiabetes_X = diabetes.data[:, np.newaxis, 2]\n\n# Split the data into training/testing sets\ndiabetes_X_train = diabetes_X[:-20]\ndiabetes_X_test = diabetes_X[-20:]\n\n# Split the targets into training/testing sets\ndiabetes_y_train = diabetes.target[:-20]\ndiabetes_y_test = diabetes.target[-20:]\n\n# Create linear regression object\nregr = linear_model.LinearRegression()\n\n# Train the model using the training sets\nregr.fit(diabetes_X_train, diabetes_y_train)\n\n# Make predictions using the testing set\ndiabetes_y_pred = regr.predict(diabetes_X_test)\n\n# The coefficients\nprint('Coefficients: \\n', regr.coef_)\n# The mean squared error\nprint(\"Mean squared error: %.2f\"\n % mean_squared_error(diabetes_y_test, diabetes_y_pred))\n# Explained variance score: 1 is perfect prediction\nprint('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))\n\n# Plot outputs\nplt.scatter(diabetes_X_test, diabetes_y_test, color='black')\nplt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)\n\nplt.xticks(())\nplt.yticks(())\n\nplt.show()\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 17 10:59:37 2020\n\n@author: hartwgj\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nfrom vmec import wout_file\nfrom vmec import get_iotabar\n\n\n#file='C:\\\\Users\\\\hartwgj\\\\Documents\\\\Reconstructions\\\\shots_200327\\\\shot_20032705\\\\20032705ls\\\\wout_20032705_1.64_2.nc'#\n#file=\"C:\\\\Users\\\\hartwgj\\\\Documents\\\\Reconstructions\\\\Steve_Recon_Pack_2016\\\\18102253\\\\wout_18102253_1.61_3.nc\"\nfile=r'C:\\Users\\hartwgj\\Desktop\\TestReconFiles\\20032705'\ntest=wout_file(file)\n\ns,iotabar=get_iotabar(test)\n\nplt.plot(s,iotabar,'ro')\nplt.title('rotational transform plot')\nplt.ylabel(r'iota bar - rotational transform')\nplt.xlabel('s - normalized toroidal flux')\nplt.show()\n\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 12 17:45:49 2018\n\n@author: James\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nexp_val = {} # dictionary of experimental values\n\nexp_val['anisotropy'] = (0.395 *10**-82) / (8.8541878176*10**-12)**2\n# the molecular - polarizability anisotropy for N2 in cm^6. \n# Value taken from M. J. van de Sande \"Laser scattering on low \n# temperature plasmas: high resolution and stray light rejection\" 2002\nexp_val['N2_rot_constant'] = 199.887\n# rotational constant for N2 molecule \n# This value was taken from C.M. Penney \n# \"Absolute rotational Raman corss sections for N2, O2, and CO2\" 1974\nexp_val['h'] = 6.62607004*10**-34 # Planck_constant\nexp_val['e'] = 1.60217662*10**-19\nexp_val['me']= 9.10938356*10**-31 # mass of electron in kg\nexp_val['epsilon'] = 8.8541878176*(10**-12) \nexp_val['c'] = 299792458 # speed of light\nexp_val['kB'] = 1.38064852*10**-23 # Boltzmann constant m^2 kg s^-2 K^-1\nexp_val['laser_wavelength'] = 532.0 *10**-9\nexp_val['electron_radius**2'] = (((exp_val['e'])**2)/(4*np.pi*exp_val['epsilon'] *\n exp_val['me']*exp_val['c']**2))**2 * 10**4\nexp_val['theta1'] = 86.371 * (np.pi/180)\nexp_val['theta2'] = 90.000 * (np.pi/180)\nexp_val['length_to_lens'] = 71.8\nexp_val['radius_of_lens'] = 7.5\nexp_val['L'] = 1.42 # Laser beam length (cm) imaged onton fiber\nexp_val['gjeven'] = 6\nexp_val['gjodd'] = 3\n\n# Transmissions\nexp_val['Twin1'] = 0.9\nexp_val['Tlens1'] = 0.995\nexp_val['Tmirrors1']= 0.997\nexp_val['Tfiber'] = 0.587\nexp_val['Tspect1'] = 0.72\nexp_val['Tcolllens']= 0.849\nexp_val['Tfiberimage1'] = 64/75\nexp_val['Tpmtlens1'] = 0.849\n\ndef collection_optics_solid_angle(length_to_lens, radius_of_lens):\n # The solid angle that is collected by the collection \n # optics. length_to_lens is the distance from the scattering volume to the\n # location of the collection lens. radius_of_lens is the radius of the \n # collection lens\n value = 2*np.pi*(1-np.cos(np.arctan(radius_of_lens/length_to_lens)))\n \n return value\n\n\ndef lambda_raman_stokes(l, j):\n B = exp_val['N2_rot_constant']\n value = l + l**2 * (B)*(4*j+6)\n return value\n\n\ndef lambda_thermal(Te):\n l = exp_val['laser_wavelength']\n alpha = exp_val['theta2']\n c1 = 2*l*np.sin(alpha/2)\n c2 = np.sqrt((2*Te)/(511706.544))\n value = c1 * c2\n return value\n\n\ndef laser_photons(E_pulse):\n value = E_pulse * (exp_val['laser_wavelength']/(exp_val['h'] * exp_val['c'])) * 10**-9\n return value\n\n\ndef QEPMTH742240plusH1170640(l):\n # PMT quantum efficiency\n value = -1* (3.0453*10**-4)*l + 0.565053\n return 1\n\n\ndef optical_efficiency(*args):\n value = 1\n for arg in args:\n value = value * arg\n \n return value\n \n\ndef coef(E_pulse, n, L, length_to_lens, radius_of_lens, theta):\n # LaserPhotons is the number of photons in a given laser pulse. \n # (7.9188*10^-26) is the electron radius squared in cm^2. L is the length \n # of the scattering volume along the laser beam that is being imaged. \n # ne is the electron density in the scattering volume. \n # Finally \\[Theta] is the angle between the laser polarization and the \n # collection optics (the Sin (\\[Theta])^2 term is the dipole \\\n # scattering pattern). \n c1 = laser_photons(E_pulse)\n c2 = exp_val['electron_radius**2']\n c3 = collection_optics_solid_angle(length_to_lens, radius_of_lens)\n c4 = n / np.sqrt(np.pi) * np.sin(theta)**2\n \n value = c1 *c2 * L * c3 * c4\n \n return value\n \n \ndef thomson_scattered_photons(E_pulse, n, Te, wavelength):\n c1 = coef(E_pulse, n, exp_val['L'], exp_val['length_to_lens'], \n exp_val['radius_of_lens'], exp_val['theta1'])\n\n c2 = lambda_thermal(Te)\n c3 = np.exp(-1*((wavelength - exp_val['laser_wavelength'])**2/(c2**2)))\n \n value = (c1 / c2) * c3\n\n return value\n\n\ndef thomson_channel_photons(E_pulse, n, Te, min_wavelength, max_wavelength):\n n_steps = 100\n step = (max_wavelength - min_wavelength)/n_steps\n x = np.linspace(min_wavelength, max_wavelength, n_steps)\n y = thomson_scattered_photons(E_pulse, n, Te, x)\n total_int = sum(y) * step\n total = total_int/(max_wavelength - min_wavelength)\n \n return total\n\n\ndef thomson_channel_volts(E_pulse, n, Te, min_wavelength, max_wavelength):\n n_steps = 100\n step = (max_wavelength - min_wavelength)/n_steps\n x = np.linspace(min_wavelength, max_wavelength, n_steps)\n y = thomson_scattered_photons(E_pulse, n, Te, x)\n resistance = 25\n gain = 2 * 10**5\n tau = 20 * 10**-9\n e = 1.60217662 * 10 **-19\n \n \n y = (gain * y * resistance * e)/tau\n \n total_int = sum(y) * step \n total = total_int/(max_wavelength - min_wavelength)\n \n return total\n\n\ndef raman_coef(E_pulse, n):\n # taking out the TS values for raman scattering coeff and then adding \n # in the raman specific values. \n # Note that the density is being converted into cm^-3 from m^-3. Also \n # the depolarization ratio is included as 3/4 for linear molecules \n # (all assuming perpendicular scattering geometry)\n c1 = coef(E_pulse, n, exp_val['L'], exp_val['length_to_lens'], \n exp_val['radius_of_lens'], exp_val['theta1'])\n c2 = np.sqrt(np.pi)/exp_val['electron_radius**2']\n c3 = (64 * np.pi**4)/45\n c4 = .75\n c5 = exp_val['anisotropy']\n \n value = c1 * c2 * c3 * c4 * c5 \n \n return value\n\ndef raman_crosssection(l, j):\n c1 = (64 * np.pi**4)/45\n c2 = .75\n c3 = exp_val['anisotropy']\n c4 = (3 * (j + 1) * (j + 2))/(2 * (2 * j + 1)*(2*j+3))\n c5 = (1 / (lambda_raman_stokes(l, j)))**4\n \n value = c1 * c2 * c3 * c4 * c5\n \n return value\n\n\n\ndef raman_distribution(j, T, gj):\n # j distribution of raman values with T temperature of N2 gas in K and \n # finally gj is degeneracy for whether j is even or odd\n c1 = gj * ((2 * j) + 1)\n c2 = (2 * exp_val['h'] * exp_val['c'] * exp_val['N2_rot_constant'] * 10**2) / (9 * exp_val['kB'] * T)\n c3 = -(exp_val['h'] * exp_val['c'] * exp_val['N2_rot_constant'] * 10**2 * j * (j+1))/(exp_val['kB']*T)\n c4 = (3 * (j + 1) * (j + 2))/(2 * ((2 * j) + 1)*((2*j)+3))\n \n value = c1 * c2 * np.exp(c3) * c4\n\n return value\n\n\ndef raman_scattered_photons(E_pulse, n, j, T, gj):\n # rotational stokes raman scattered photonss per unit wavelength\n c1 = raman_coef(E_pulse, n)\n c2 = raman_distribution(j, T, gj)\n c3 = (1/(lambda_raman_stokes(exp_val['laser_wavelength'], j)*10**-7))**4\n \n value = c1 * c2 * c3\n \n return value\n\n\n\ndef total_photoelectrons_raman_center_function(E_pulse, p, T, \n wavelength_min, wavelength_max):\n c1 = optical_efficiency(exp_val['Twin1'], exp_val['Tlens1'], \n exp_val['Tmirrors1'], exp_val['Tfiber'],\n exp_val['Tspect1'], exp_val['Tcolllens'],\n exp_val['Tfiberimage1'], exp_val['Tpmtlens1'])\n\n c1 = 1\n \n n = ((p/(7.5006*10**-3))/(exp_val['kB'] * T))*10**-6\n c2 = 0\n\n for j in range(0, 60):\n wavelength = lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j)\n if wavelength <= wavelength_max and wavelength >= wavelength_min:\n c2 = c2 + (raman_scattered_photons(E_pulse, n, 2 * j, T, \n exp_val['gjeven']) * \n QEPMTH742240plusH1170640(lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j)))\n\n \n c3 = 0\n for j in range(0, 60):\n wavelength = lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j + 1)\n if wavelength <= wavelength_max and wavelength >= wavelength_min:\n c3 = c3 + (raman_scattered_photons(E_pulse, n, 2 * j + 1, T, \n exp_val['gjodd']) * \n QEPMTH742240plusH1170640(lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j + 1)))\n\n \n value = c1 * (c2 + c3)\n \n return value\n \n\ndef step_function_array(start, stop, height):\n frac = (stop - start)/20\n x = np.linspace(start - frac, stop + frac, 110)\n y = np.array([0] * 110)\n y[5:104] = height\n return x, y\n \n\ndef plot_thomson_channels(height):\n x1, y1 = step_function_array(533.5, 539, height)\n plt.plot(x1, y1, 'k')\n x1, y1 = step_function_array(539.5, 545, height)\n plt.plot(x1, y1, 'k') \n x1, y1 = step_function_array(545.5, 551, height)\n plt.plot(x1, y1, 'k') \n x1, y1 = step_function_array(551.5, 557, height)\n plt.plot(x1, y1, 'k') \n x1, y1 = step_function_array(557.5, 563, height)\n plt.plot(x1, y1, 'k')\n return\n \n#x1 = TotalPhotoelectronsRamanCenterFunction(.8, 50, 295, 545, 551)\nx2 = raman_scattered_photons(.8, ((50/(7.5006*10**-3))/(exp_val['kB'] * 295))*10**-6, 35,\n 295, exp_val['gjeven'])\n\n\n\n\n\"\"\"\nPressure = np.linspace(0, 50, 100)\nPhotons = total_photoelectrons_raman_center_function(1.8, Pressure, 295, 536, 561)\n\na = np.loadtxt('170929_photon_counts_combined_a.txt')\nb = np.loadtxt('170929_photon_counts_combined_b_edit.txt')\n\n\npressure_a = np.loadtxt('170929_pressure_torr_a.txt')\npressure_b = np.loadtxt('170929_pressure_torr_b_edit.txt')\n\nweights_b = np.loadtxt('weights_b.txt')\nweights_a = weights_b[1:len(a)]\n\nplt.figure()\n\n\n\n\n\nplt.plot(Pressure, Photons, c='k')\nplt.scatter(pressure_a, a, c = 'r')\nfit1 = np.polyfit(Pressure, Photons, 1)\nlabel1 = str('Theory: y = ' + str(np.round(fit1[0],2)) + 'x')\n\nplt.plot(np.unique(Pressure), np.poly1d(np.polyfit(Pressure, Photons, 1))(np.unique(Pressure)),\n color='k', label=label1)\n\nfit2 = np.polyfit(pressure_a, a, 1)\nlabel2 = str('Data: y = ' + str(np.round(fit2[0],2)) + 'x' )\n\nplt.plot(np.unique(pressure_a), np.poly1d(np.polyfit(pressure_a, a, 1))(np.unique(pressure_a)),\n color='r', label = label2)\n\nplt.xlabel('Pressure (Torr)', fontsize = 15, weight ='bold')\nplt.ylabel('Photons', fontsize = 15, weight ='bold')\ntitle = str(\"Predicted Raman Scattering \\n (536 nm - 561 nm)\")\nplt.title(title, fontsize = 15, weight ='bold')\nplt.xticks(fontsize = 13, weight = 'bold')\nplt.yticks(fontsize = 13, weight = 'bold')\nplt.legend()\nplt.savefig('test_1_theory_vs_data.png', format='png', dpi = 1000)\nplt.show()\n\n\"\"\"\n\"\"\"\n\nPressure = np.linspace(0, 50, 100)\nPhotons = total_photoelectrons_raman_center_function(1.8, Pressure, 295, 543, 565)\n\nb = np.loadtxt('170929_photon_counts_combined_b_edit.txt')\n\npressure_b = np.loadtxt('170929_pressure_torr_b_edit.txt')\n\nweights_b = np.loadtxt('weights_b.txt')\nweights_b = weights_b[1:len(b)]\n\nplt.figure()\n\n\n\nplt.plot(Pressure, Photons, c='k')\nplt.scatter(pressure_b, b, c = 'b')\nfit1 = np.polyfit(Pressure, Photons, 1)\nlabel1 = str('Theory: y = ' + str(np.round(fit1[0],2)) + 'x')\n\nplt.plot(np.unique(Pressure), np.poly1d(np.polyfit(Pressure, Photons, 1))(np.unique(Pressure)),\n color='k', label=label1)\n\nfit2 = np.polyfit(pressure_b, b, 1)\nlabel2 = str('Data: y = ' + str(np.round(fit2[0],2)) + 'x' )\n\nplt.plot(np.unique(pressure_a), np.poly1d(np.polyfit(pressure_a, a, 1))(np.unique(pressure_a)),\n color='b', label = label2)\n\nplt.xlabel('Pressure (Torr)', fontsize = 15, weight ='bold')\nplt.ylabel('Photons', fontsize = 15, weight ='bold')\ntitle = str(\"Raman Scattering \\n(543 nm - 565 nm)\")\nplt.title(title, fontsize = 15, weight ='bold')\nplt.xticks(fontsize = 13, weight = 'bold')\nplt.yticks(fontsize = 13, weight = 'bold')\nplt.legend()\n#plt.savefig('test_2_theory_vs_data.png', format='png', dpi = 1000)\nplt.show()\n\"\"\"\n\"\"\"\nplt.figure()\nx = np.linspace(532, 563, 200)\ny = thomson_scattered_photons(1.69, 1*10**13, 100, x)\nplt.plot(x, y,'r', label = 'Te: 100 eV')\ny = thomson_scattered_photons(1.69, 1*10**13, 150, x)\nplt.plot(x, y, 'b', label = 'Te: 150 eV')\ny = thomson_scattered_photons(1.69, 1*10**13, 200, x)\nplt.plot(x, y, 'k', label = 'Te: 200 eV')\nprint(max(y)/10)\nplot_thomson_channels(max(y)/5)\n\nplt.xlabel('Wavelength (nm)', fontsize = 15, weight ='bold')\nplt.ylabel('Photons', fontsize = 15, weight ='bold')\nplt.title('Estimated Thomson Scattered Photons', fontsize = 15, weight ='bold')\nplt.legend(fontsize = 12,loc='upper right')\nplt.show()\n\nplt.figure()\nx = np.linspace(532, 563, 200)\ny = thomson_scattered_photons(1.69, 1*10**13, 100, x)\nplt.plot(x, y,'r', label = 'Total Scattered')\n\nc1 = optical_efficiency(exp_val['Twin1'], exp_val['Tlens1'], \n exp_val['Tmirrors1'], exp_val['Tfiber'],\n exp_val['Tspect1'], exp_val['Tcolllens'],\n exp_val['Tfiberimage1'], exp_val['Tpmtlens1'])\ny = y * c1 \nplt.plot(x, y, 'b', label = 'Collected by PMT')\n\nplt.xlabel('Wavelength (nm)', fontsize = 15, weight ='bold')\nplt.ylabel('Photons', fontsize = 15, weight ='bold')\nplt.title('Estimated Thomson Scattered Photons \\n 100 eV Plasma', fontsize = 15, weight ='bold')\nplt.legend(fontsize = 12,loc='upper right')\nplt.show()\n\n\n\n\"\"\"\n\"\"\"\nx = np.linspace(1, 300, 100)\n\ny1 = thomson_channel_photons(1.69, 1*10**13, x, 533.5, 539 )[1]\ny2 = thomson_channel_photons(1.69, 1*10**13, x, 539.5, 545 )[1]\ny3 = thomson_channel_photons(1.69, 1*10**13, x, 545.5, 551 )[1]\ny4 = thomson_channel_photons(1.69, 1*10**13, x, 551.5, 557 )[1]\ny5 = thomson_channel_photons(1.69, 1*10**13, x, 557.5, 563 )[1]\n\nplt.plot(x, y1)\nplt.plot(x, y2)\nplt.plot(x, y3)\nplt.plot(x, y4)\nplt.plot(x, y5)\n\nprint(thomson_channel_photons(1.69, 1*10**13, 100, 532, 532.5))\nprint(thomson_channel_volts(1.69, 1*10**13, 100, 532, 532.5))\n\"\"\"\n\n\n"
] |
[
[
"matplotlib.pyplot.yticks",
"sklearn.metrics.r2_score",
"matplotlib.pyplot.scatter",
"sklearn.datasets.load_diabetes",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.plot",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Koncopd/anndata
|
[
"c33bffbfb4a10f4bdeb26220579a2d33f4cd9b4d",
"c33bffbfb4a10f4bdeb26220579a2d33f4cd9b4d"
] |
[
"anndata/_core/aligned_mapping.py",
"anndata/_io/read.py"
] |
[
"from abc import ABC, abstractmethod\nfrom collections import abc as cabc\nfrom typing import Union, Optional, Type, ClassVar, TypeVar # Special types\nfrom typing import Iterator, Mapping, Sequence # ABCs\nfrom typing import Tuple, List, Dict # Generic base types\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import spmatrix\n\nfrom ..utils import deprecated, ensure_df_homogeneous\nfrom . import raw, anndata\nfrom .views import as_view, ViewArgs\nfrom .index import _subset\n\n\nOneDIdx = Union[Sequence[int], Sequence[bool], slice]\nTwoDIdx = Tuple[OneDIdx, OneDIdx]\n\nI = TypeVar(\"I\", OneDIdx, TwoDIdx, covariant=True)\n# TODO: pd.DataFrame only allowed in AxisArrays?\nV = Union[pd.DataFrame, spmatrix, np.ndarray]\n\n\nclass AlignedMapping(cabc.MutableMapping, ABC):\n \"\"\"\\\n An abstract base class for Mappings containing array-like values aligned\n to either one or both AnnData axes.\n \"\"\"\n\n _allow_df: ClassVar[bool]\n \"\"\"If this mapping supports heterogeneous DataFrames\"\"\"\n\n _view_class: ClassVar[Type[\"AlignedViewMixin\"]]\n \"\"\"The view class for this aligned mapping.\"\"\"\n\n _actual_class: ClassVar[Type[\"AlignedActualMixin\"]]\n \"\"\"The actual class (which has it’s own data) for this aligned mapping.\"\"\"\n\n def __repr__(self):\n return f\"{type(self).__name__} with keys: {', '.join(self.keys())}\"\n\n def _ipython_key_completions_(self) -> List[str]:\n return list(self.keys())\n\n def _validate_value(self, val: V, key: str) -> V:\n \"\"\"Raises an error if value is invalid\"\"\"\n for i, axis in enumerate(self.axes):\n if self.parent.shape[axis] != val.shape[i]:\n right_shape = tuple(self.parent.shape[a] for a in self.axes)\n raise ValueError(\n f\"Value passed for key {key!r} is of incorrect shape. \"\n f\"Values of {self.attrname} must match dimensions \"\n f\"{self.axes} of parent. Value had shape {val.shape} while \"\n f\"it should have had {right_shape}.\"\n )\n if not self._allow_df and isinstance(val, pd.DataFrame):\n name = self.attrname.title().rstrip(\"s\")\n val = ensure_df_homogeneous(val, f\"{name} {key!r}\")\n return val\n\n @property\n @abstractmethod\n def attrname(self) -> str:\n \"\"\"What attr for the AnnData is this?\"\"\"\n pass\n\n @property\n @abstractmethod\n def axes(self) -> Tuple[int, ...]:\n \"\"\"Which axes of the parent is this aligned to?\"\"\"\n pass\n\n @property\n @abstractmethod\n def is_view(self) -> bool:\n pass\n\n @property\n def parent(self) -> Union[\"anndata.AnnData\", \"raw.Raw\"]:\n return self._parent\n\n def copy(self):\n d = self._actual_class(self.parent, self._axis)\n for k, v in self.items():\n d[k] = v.copy()\n return d\n\n def _view(self, parent: \"anndata.AnnData\", subset_idx: I):\n \"\"\"Returns a subset copy-on-write view of the object.\"\"\"\n return self._view_class(self, parent, subset_idx)\n\n @deprecated(\"dict(obj)\")\n def as_dict(self) -> dict:\n return dict(self)\n\n\nclass AlignedViewMixin:\n parent: \"anndata.AnnData\"\n \"\"\"Reference to parent AnnData view\"\"\"\n\n attrname: str\n \"\"\"What attribute in the parent is this?\"\"\"\n\n parent_mapping: Mapping[str, V]\n \"\"\"The object this is a view of.\"\"\"\n\n is_view = True\n\n def __getitem__(self, key: str) -> V:\n return as_view(\n _subset(self.parent_mapping[key], self.subset_idx),\n ViewArgs(self.parent, self.attrname, (key,)),\n )\n\n def __setitem__(self, key: str, value: V):\n value = self._validate_value(value, key) # Validate before mutating\n adata = self.parent.copy()\n new_mapping = getattr(adata, self.attrname)\n new_mapping[key] = value\n self.parent._init_as_actual(adata)\n\n def __delitem__(self, key: str):\n self[key] # Make sure it exists before bothering with a copy\n adata = self.parent.copy()\n new_mapping = getattr(adata, self.attrname)\n del new_mapping[key]\n self.parent._init_as_actual(adata)\n\n def __contains__(self, key: str) -> bool:\n return key in self.parent_mapping\n\n def __iter__(self) -> Iterator[str]:\n return iter(self.parent_mapping)\n\n def __len__(self) -> int:\n return len(self.parent_mapping)\n\n\nclass AlignedActualMixin:\n _data: Dict[str, V]\n \"\"\"Underlying mapping to the data\"\"\"\n\n is_view = False\n\n def __getitem__(self, key: str) -> V:\n return self._data[key]\n\n def __setitem__(self, key: str, value: V):\n value = self._validate_value(value, key)\n self._data[key] = value\n\n def __contains__(self, key: str) -> bool:\n return key in self._data\n\n def __delitem__(self, key: str):\n del self._data[key]\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._data)\n\n def __len__(self) -> int:\n return len(self._data)\n\n\nclass AxisArraysBase(AlignedMapping):\n \"\"\"\\\n Mapping of key→array-like,\n where array-like is aligned to an axis of parent AnnData.\n \"\"\"\n\n _allow_df = True\n _dimnames = (\"obs\", \"var\")\n\n @property\n def attrname(self) -> str:\n return f\"{self.dim}m\"\n\n @property\n def axes(self) -> Tuple[int]:\n \"\"\"Axes of the parent this is aligned to\"\"\"\n return (self._axis,)\n\n @property\n def dim(self) -> str:\n \"\"\"Name of the dimension this aligned to.\"\"\"\n return self._dimnames[self._axis]\n\n def flipped(self) -> \"AxisArraysBase\":\n \"\"\"Transpose.\"\"\"\n new = self.copy()\n new.dimension = abs(self._axis - 1)\n return new\n\n def to_df(self) -> pd.DataFrame:\n \"\"\"Convert to pandas dataframe.\"\"\"\n df = pd.DataFrame(index=self.dim_names)\n for key in self.keys():\n value = self[key]\n for icolumn, column in enumerate(value.T):\n df[f\"{key}{icolumn + 1}\"] = column\n return df\n\n def _validate_value(self, val: V, key: str) -> V:\n if (\n hasattr(val, \"index\")\n and isinstance(val.index, cabc.Collection)\n and not (val.index == self.dim_names).all()\n ):\n # Could probably also re-order index if it’s contained\n raise ValueError(\n f\"value.index does not match parent’s axis {self.axes[0]} names\"\n )\n return super()._validate_value(val, key)\n\n\nclass AxisArrays(AlignedActualMixin, AxisArraysBase):\n def __init__(\n self,\n parent: Union[\"anndata.AnnData\", \"raw.Raw\"],\n axis: int,\n vals: Union[Mapping, AxisArraysBase, None] = None,\n ):\n self._parent = parent\n if axis not in (0, 1):\n raise ValueError()\n self._axis = axis\n self.dim_names = (parent.obs_names, parent.var_names)[self._axis]\n self._data = dict()\n if vals is not None:\n self.update(vals)\n\n\nclass AxisArraysView(AlignedViewMixin, AxisArraysBase):\n def __init__(\n self,\n parent_mapping: AxisArraysBase,\n parent_view: \"anndata.AnnData\",\n subset_idx: OneDIdx,\n ):\n self.parent_mapping = parent_mapping\n self._parent = parent_view\n self.subset_idx = subset_idx\n self._axis = parent_mapping._axis\n self.dim_names = parent_mapping.dim_names[subset_idx]\n\n\nAxisArraysBase._view_class = AxisArraysView\nAxisArraysBase._actual_class = AxisArrays\n\n\nclass LayersBase(AlignedMapping):\n \"\"\"\\\n Mapping of key: array-like, where array-like is aligned to both axes of the\n parent anndata.\n \"\"\"\n\n _allow_df = False\n attrname = \"layers\"\n axes = (0, 1)\n\n # TODO: I thought I had a more elegant solution to overiding this...\n def copy(self) -> \"Layers\":\n d = self._actual_class(self.parent)\n for k, v in self.items():\n d[k] = v.copy()\n return d\n\n\nclass Layers(AlignedActualMixin, LayersBase):\n def __init__(self, parent: \"anndata.AnnData\", vals: Optional[Mapping] = None):\n self._parent = parent\n self._data = dict()\n if vals is not None:\n self.update(vals)\n\n\nclass LayersView(AlignedViewMixin, LayersBase):\n def __init__(\n self,\n parent_mapping: LayersBase,\n parent_view: \"anndata.AnnData\",\n subset_idx: TwoDIdx,\n ):\n self.parent_mapping = parent_mapping\n self._parent = parent_view\n self.subset_idx = subset_idx\n\n\nLayersBase._view_class = LayersView\nLayersBase._actual_class = Layers\n\n\nclass PairwiseArraysBase(AlignedMapping):\n \"\"\"\\\n Mapping of key: array-like, where both axes of array-like are aligned to\n one axis of the parent anndata.\n \"\"\"\n\n _allow_df = False\n _dimnames = (\"obs\", \"var\")\n\n @property\n def attrname(self) -> str:\n return f\"{self.dim}p\"\n\n @property\n def axes(self) -> Tuple[int, int]:\n \"\"\"Axes of the parent this is aligned to\"\"\"\n return self._axis, self._axis\n\n @property\n def dim(self) -> str:\n \"\"\"Name of the dimension this aligned to.\"\"\"\n return self._dimnames[self._axis]\n\n\nclass PairwiseArrays(AlignedActualMixin, PairwiseArraysBase):\n def __init__(\n self, parent: \"anndata.AnnData\", axis: int, vals: Optional[Mapping] = None,\n ):\n self._parent = parent\n if axis not in (0, 1):\n raise ValueError()\n self._axis = axis\n self._data = dict()\n if vals is not None:\n self.update(vals)\n\n\nclass PairwiseArraysView(AlignedViewMixin, PairwiseArraysBase):\n def __init__(\n self,\n parent_mapping: PairwiseArraysBase,\n parent_view: \"anndata.AnnData\",\n subset_idx: OneDIdx,\n ):\n self.parent_mapping = parent_mapping\n self._parent = parent_view\n self.subset_idx = (subset_idx, subset_idx)\n self._axis = parent_mapping._axis\n\n\nPairwiseArraysBase._view_class = PairwiseArraysView\nPairwiseArraysBase._actual_class = PairwiseArrays\n",
"from pathlib import Path\nfrom os import PathLike, fspath\nfrom typing import Union, Optional, Mapping\nfrom typing import Iterable, Iterator, Generator\nfrom collections import OrderedDict\nimport gzip\nimport bz2\n\nimport h5py\nimport numpy as np\n\nfrom .. import AnnData\nfrom .utils import is_float\nfrom .h5ad import read_h5ad\n\ntry:\n from .zarr import read_zarr\nexcept ImportError as e:\n\n def read_zarr(*_, **__):\n raise e\n\n\ndef read_csv(\n filename: Union[PathLike, Iterator[str]],\n delimiter: Optional[str] = \",\",\n first_column_names: Optional[bool] = None,\n dtype: str = \"float32\",\n) -> AnnData:\n \"\"\"\\\n Read `.csv` file.\n\n Same as :func:`~anndata.read_text` but with default delimiter `','`.\n\n Parameters\n ----------\n filename\n Data file.\n delimiter\n Delimiter that separates data within text file.\n If `None`, will split at arbitrary number of white spaces,\n which is different from enforcing splitting at single white space `' '`.\n first_column_names\n Assume the first column stores row names.\n dtype\n Numpy data type.\n \"\"\"\n return read_text(filename, delimiter, first_column_names, dtype)\n\n\ndef read_excel(\n filename: PathLike, sheet: Union[str, int], dtype: str = \"float32\"\n) -> AnnData:\n \"\"\"\\\n Read `.xlsx` (Excel) file.\n\n Assumes that the first columns stores the row names and the first row the\n column names.\n\n Parameters\n ----------\n filename\n File name to read from.\n sheet\n Name of sheet in Excel file.\n \"\"\"\n # rely on pandas for reading an excel file\n from pandas import read_excel\n\n df = read_excel(fspath(filename), sheet)\n X = df.values[:, 1:]\n row = dict(row_names=df.iloc[:, 0].values.astype(str))\n col = dict(col_names=np.array(df.columns[1:], dtype=str))\n return AnnData(X, row, col, dtype=dtype)\n\n\ndef read_umi_tools(filename: PathLike, dtype: str = \"float32\") -> AnnData:\n \"\"\"\\\n Read a gzipped condensed count matrix from umi_tools.\n\n Parameters\n ----------\n filename\n File name to read from.\n \"\"\"\n # import pandas for conversion of a dict of dicts into a matrix\n # import gzip to read a gzipped file :-)\n import gzip\n from pandas import DataFrame\n\n dod = {} # this will contain basically everything\n fh = gzip.open(fspath(filename))\n header = fh.readline() # read the first line\n\n for line in fh:\n # gzip read bytes, hence the decoding\n t = line.decode(\"ascii\").split(\"\\t\")\n try:\n dod[t[1]].update({t[0]: int(t[2])})\n except KeyError:\n dod[t[1]] = {t[0]: int(t[2])}\n\n df = DataFrame.from_dict(dod, orient=\"index\") # build the matrix\n df.fillna(value=0.0, inplace=True) # many NaN, replace with zeros\n return AnnData(\n np.array(df), dict(obs_names=df.index), dict(var_names=df.columns), dtype=dtype,\n )\n\n\ndef read_hdf(filename: PathLike, key: str) -> AnnData:\n \"\"\"\\\n Read `.h5` (hdf5) file.\n\n Note: Also looks for fields `row_names` and `col_names`.\n\n Parameters\n ----------\n filename\n Filename of data file.\n key\n Name of dataset in the file.\n \"\"\"\n with h5py.File(filename, \"r\") as f:\n # the following is necessary in Python 3, because only\n # a view and not a list is returned\n keys = [k for k in f.keys()]\n if key == \"\":\n raise ValueError(\n f\"The file {filename} stores the following sheets:\\n{keys}\\n\"\n f\"Call read/read_hdf5 with one of them.\"\n )\n # read array\n X = f[key][()]\n # try to find row and column names\n rows_cols = [{}, {}]\n for iname, name in enumerate([\"row_names\", \"col_names\"]):\n if name in keys:\n rows_cols[iname][name] = f[name][()]\n adata = AnnData(X, rows_cols[0], rows_cols[1], dtype=X.dtype.name)\n return adata\n\n\ndef read_loom(\n filename: PathLike,\n sparse: bool = True,\n cleanup: bool = False,\n X_name: str = \"spliced\",\n obs_names: str = \"CellID\",\n obsm_names: Optional[Mapping[str, Iterable[str]]] = None,\n var_names: str = \"Gene\",\n varm_names: Optional[Mapping[str, Iterable[str]]] = None,\n dtype: str = \"float32\",\n **kwargs,\n) -> AnnData:\n \"\"\"\\\n Read `.loom`-formatted hdf5 file.\n\n This reads the whole file into memory.\n\n Beware that you have to explicitly state when you want to read the file as\n sparse data.\n\n Parameters\n ----------\n filename\n The filename.\n sparse\n Whether to read the data matrix as sparse.\n cleanup\n Whether to collapse all obs/var fields that only store\n one unique value into `.uns['loom-.']`.\n X_name\n Loompy key with which the data matrix :attr:`~anndata.AnnData.X` is initialized.\n obs_names\n Loompy key where the observation/cell names are stored.\n obsm_names\n Loompy keys which will be constructed into observation matrices\n var_names\n Loompy key where the variable/gene names are stored.\n obsm_names\n Loompy keys which will be constructed into variable matrices\n **kwargs:\n Arguments to loompy.connect\n \"\"\"\n obsm_names = obsm_names or {}\n varm_names = varm_names or {}\n\n filename = fspath(filename) # allow passing pathlib.Path objects\n from loompy import connect\n\n with connect(filename, \"r\", **kwargs) as lc:\n if X_name not in lc.layers.keys():\n X_name = \"\"\n X = lc.layers[X_name].sparse().T.tocsr() if sparse else lc.layers[X_name][()].T\n\n layers = OrderedDict()\n if X_name != \"\":\n layers[\"matrix\"] = (\n lc.layers[\"\"].sparse().T.tocsr() if sparse else lc.layers[\"\"][()].T\n )\n for key in lc.layers.keys():\n if key != \"\":\n layers[key] = (\n lc.layers[key].sparse().T.tocsr()\n if sparse\n else lc.layers[key][()].T\n )\n\n obs = dict(lc.col_attrs)\n\n obsm = {}\n for key, names in obsm_names.items():\n obsm[key] = np.array([obs.pop(name) for name in names]).T\n\n if obs_names in obs.keys():\n obs[\"obs_names\"] = obs.pop(obs_names)\n obsm_attrs = [k for k, v in obs.items() if v.ndim > 1 and v.shape[1] > 1]\n\n for key in obsm_attrs:\n obsm[key] = obs.pop(key)\n\n var = dict(lc.row_attrs)\n\n varm = {}\n for key, names in varm_names.items():\n varm[key] = np.array([var.pop(name) for name in names]).T\n\n if var_names in var.keys():\n var[\"var_names\"] = var.pop(var_names)\n varm_attrs = [k for k, v in var.items() if v.ndim > 1 and v.shape[1] > 1]\n\n for key in varm_attrs:\n varm[key] = var.pop(key)\n\n uns = {}\n if cleanup:\n uns_obs = {}\n for key in list(obs.keys()):\n if len(set(obs[key])) == 1:\n uns_obs[f\"{key}\"] = obs[key][0]\n del obs[key]\n if uns_obs:\n uns[\"loom-obs\"] = uns_obs\n uns_var = {}\n for key in list(var.keys()):\n if len(set(var[key])) == 1:\n uns_var[f\"{key}\"] = var[key][0]\n del var[key]\n if uns_var:\n uns[\"loom-var\"] = uns_var\n\n adata = AnnData(\n X,\n obs=obs,\n var=var,\n layers=layers,\n obsm=obsm if obsm else None,\n varm=varm if varm else None,\n uns=uns,\n dtype=dtype,\n )\n return adata\n\n\ndef read_mtx(filename: PathLike, dtype: str = \"float32\") -> AnnData:\n \"\"\"\\\n Read `.mtx` file.\n\n Parameters\n ----------\n filename\n The filename.\n dtype\n Numpy data type.\n \"\"\"\n from scipy.io import mmread\n\n # could be rewritten accounting for dtype to be more performant\n X = mmread(fspath(filename)).astype(dtype)\n from scipy.sparse import csr_matrix\n\n X = csr_matrix(X)\n return AnnData(X, dtype=dtype)\n\n\ndef read_text(\n filename: Union[PathLike, Iterator[str]],\n delimiter: Optional[str] = None,\n first_column_names: Optional[bool] = None,\n dtype: str = \"float32\",\n) -> AnnData:\n \"\"\"\\\n Read `.txt`, `.tab`, `.data` (text) file.\n\n Same as :func:`~anndata.read_csv` but with default delimiter `None`.\n\n Parameters\n ----------\n filename\n Data file, filename or stream.\n delimiter\n Delimiter that separates data within text file. If `None`, will split at\n arbitrary number of white spaces, which is different from enforcing\n splitting at single white space `' '`.\n first_column_names\n Assume the first column stores row names.\n dtype\n Numpy data type.\n \"\"\"\n if not isinstance(filename, (PathLike, str, bytes)):\n return _read_text(filename, delimiter, first_column_names, dtype)\n\n filename = Path(filename)\n if filename.suffix == \".gz\":\n with gzip.open(str(filename), mode=\"rt\") as f:\n return _read_text(f, delimiter, first_column_names, dtype)\n elif filename.suffix == \".bz2\":\n with bz2.open(str(filename), mode=\"rt\") as f:\n return _read_text(f, delimiter, first_column_names, dtype)\n else:\n with filename.open() as f:\n return _read_text(f, delimiter, first_column_names, dtype)\n\n\ndef iter_lines(file_like: Iterable[str]) -> Generator[str, None, None]:\n \"\"\" Helper for iterating only nonempty lines without line breaks\"\"\"\n for line in file_like:\n line = line.rstrip(\"\\r\\n\")\n if line:\n yield line\n\n\ndef _read_text(\n f: Iterator[str],\n delimiter: Optional[str],\n first_column_names: Optional[bool],\n dtype: str,\n) -> AnnData:\n comments = []\n data = []\n lines = iter_lines(f)\n col_names = []\n row_names = []\n # read header and column names\n for line in lines:\n if line.startswith(\"#\"):\n comment = line.lstrip(\"# \")\n if comment:\n comments.append(comment)\n else:\n if delimiter is not None and delimiter not in line:\n raise ValueError(f\"Did not find delimiter {delimiter!r} in first line.\")\n line_list = line.split(delimiter)\n # the first column might be row names, so check the last\n if not is_float(line_list[-1]):\n col_names = line_list\n # logg.msg(\" assuming first line in file stores column names\", v=4)\n else:\n if not is_float(line_list[0]) or first_column_names:\n first_column_names = True\n row_names.append(line_list[0])\n data.append(np.array(line_list[1:], dtype=dtype))\n else:\n data.append(np.array(line_list, dtype=dtype))\n break\n if not col_names:\n # try reading col_names from the last comment line\n if len(comments) > 0:\n # logg.msg(\" assuming last comment line stores variable names\", v=4)\n col_names = np.array(comments[-1].split())\n # just numbers as col_names\n else:\n # logg.msg(\" did not find column names in file\", v=4)\n col_names = np.arange(len(data[0])).astype(str)\n col_names = np.array(col_names, dtype=str)\n # read another line to check if first column contains row names or not\n if first_column_names is None:\n first_column_names = False\n for line in lines:\n line_list = line.split(delimiter)\n if first_column_names or not is_float(line_list[0]):\n # logg.msg(\" assuming first column in file stores row names\", v=4)\n first_column_names = True\n row_names.append(line_list[0])\n data.append(np.array(line_list[1:], dtype=dtype))\n else:\n data.append(np.array(line_list, dtype=dtype))\n break\n # if row names are just integers\n if len(data) > 1 and data[0].size != data[1].size:\n # logg.msg(\n # \" assuming first row stores column names and first column row names\",\n # v=4,\n # )\n first_column_names = True\n col_names = np.array(data[0]).astype(int).astype(str)\n row_names.append(data[1][0].astype(int).astype(str))\n data = [data[1][1:]]\n # parse the file\n for line in lines:\n line_list = line.split(delimiter)\n if first_column_names:\n row_names.append(line_list[0])\n data.append(np.array(line_list[1:], dtype=dtype))\n else:\n data.append(np.array(line_list, dtype=dtype))\n # logg.msg(\" read data into list of lists\", t=True, v=4)\n # transfrom to array, this takes a long time and a lot of memory\n # but it’s actually the same thing as np.genfromtxt does\n # - we don’t use the latter as it would involve another slicing step\n # in the end, to separate row_names from float data, slicing takes\n # a lot of memory and CPU time\n if data[0].size != data[-1].size:\n raise ValueError(\n f\"Length of first line ({data[0].size}) is different \"\n f\"from length of last line ({data[-1].size}).\"\n )\n data = np.array(data, dtype=dtype)\n # logg.msg(\" constructed array from list of list\", t=True, v=4)\n # transform row_names\n if not row_names:\n row_names = np.arange(len(data)).astype(str)\n # logg.msg(\" did not find row names in file\", v=4)\n else:\n row_names = np.array(row_names)\n for iname, name in enumerate(row_names):\n row_names[iname] = name.strip('\"')\n # adapt col_names if necessary\n if col_names.size > data.shape[1]:\n col_names = col_names[1:]\n for iname, name in enumerate(col_names):\n col_names[iname] = name.strip('\"')\n return AnnData(\n data, obs=dict(obs_names=row_names), var=dict(var_names=col_names), dtype=dtype,\n )\n\n\ndef load_sparse_csr(d, key=\"X\"):\n from scipy.sparse.csr import csr_matrix\n\n key_csr = f\"{key}_csr\"\n d[key] = csr_matrix(\n (d[f\"{key_csr}_data\"], d[f\"{key_csr}_indices\"], d[f\"{key_csr}_indptr\"]),\n shape=d[f\"{key_csr}_shape\"],\n )\n del_sparse_matrix_keys(d, key_csr)\n return d\n\n\ndef del_sparse_matrix_keys(mapping, key_csr):\n del mapping[f\"{key_csr}_data\"]\n del mapping[f\"{key_csr}_indices\"]\n del mapping[f\"{key_csr}_indptr\"]\n del mapping[f\"{key_csr}_shape\"]\n"
] |
[
[
"pandas.DataFrame"
],
[
"numpy.array",
"scipy.sparse.csr.csr_matrix",
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
robotics-upo/og-sgg
|
[
"106c56919428ce927a1cae494932c00a5f58c37d",
"106c56919428ce927a1cae494932c00a5f58c37d"
] |
[
"train_telenet.py",
"convert_vg_images.py"
] |
[
"import io\nimport zipfile\nimport os\nimport random\n\nfrom telenet.config import get as tn_config\n\nRND_SEED = tn_config('train.random_seed')\n\nos.environ['PYTHONHASHSEED'] = str(RND_SEED)\nrandom.seed(RND_SEED)\n\nimport numpy as np\nimport pandas as pd\n\nnp.random.seed(RND_SEED)\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport tensorflow_docs as tfdocs\nimport tensorflow_docs.plots # Do not remove this import\n#import tensorflow.keras.mixed_precision as mp\n\nfrom tensorflow.python.training.tracking.data_structures import NoDependency\n\ntf.random.set_seed(RND_SEED)\n\nfrom matplotlib import pyplot as plt\n\nimport telenet.model as tn_model\nimport telenet.dataset_data as tn_data\n\nfrom tqdm import tqdm\n\n#mp.set_global_policy(mp.Policy('mixed_float16'))\n\nDATASET_NAME = tn_config('train.dataset')\nMODEL_VARIANT = tn_config('model.variant')\n\nif 'teresa' in DATASET_NAME:\n\ttn_data.load_names(f'teresa-names.json')\nelse:\n\ttn_data.load_names(f'{DATASET_NAME}-names.json')\n\nTRAIN_DATA = tn_data.load_json_xz(f'{DATASET_NAME}-train-without-val')\nVAL_DATA = tn_data.load_json_xz(f'{DATASET_NAME}-val')\n\nfor known_dataset in ['vrd', 'vg', DATASET_NAME]:\n\tif DATASET_NAME.startswith(known_dataset):\n\t\tSEM_VECTORS = tf.convert_to_tensor(np.load(tn_data.path(f'{known_dataset}-semvecs.npy')))\n\t\tzf_pi = zipfile.ZipFile(tn_data.path(f'{known_dataset}-yolo-train.zip'), 'r')\n\t\tzf_om = zipfile.ZipFile(tn_data.path(f'{known_dataset}-mask-train.zip'), 'r')\n\ndef get_priors(src,dst):\n\treturn np.zeros((tn_data.NUM_RELS,), np.float32)\n\ndef preprocess_gt_nongt(img):\n\timg['gt'] = gt = {}\n\tfor rel in img['rels']:\n\t\tsrc_id = rel['si']\n\t\tdst_id = rel['di']\n\t\ty_real = np.zeros((tn_data.NUM_RELS,), np.float32)\n\t\tfor i in rel['v']:\n\t\t\ty_real[i] = 1.\n\t\tgt[(src_id,dst_id)] = {\n\t\t\t'sv': rel['sv'],\n\t\t\t'dv': rel['dv'],\n\t\t\t'a': SEM_VECTORS[rel['sv']],\n\t\t\t'b': SEM_VECTORS[rel['dv']],\n\t\t\t'p': get_priors(rel['sv'],rel['dv']),\n\t\t\t'y': y_real\n\t\t}\n\timg['nongt'] = nongt = set()\n\tfor i in range(len(img['objs'])):\n\t\tfor j in range(len(img['objs'])):\n\t\t\tif i != j and (i,j) not in gt:\n\t\t\t\tnongt.add((i,j))\n\n# Preprocess training/validation data\nfor img in TRAIN_DATA:\n\tpreprocess_gt_nongt(img)\nfor img in VAL_DATA:\n\tpreprocess_gt_nongt(img)\n\ndef stupid_adapter(f):\n\treturn io.BytesIO(f.read())\n\nclass TelenetTrainer(tn_model.CombinedRelationshipDetector):\n\tdef __init__(self, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\t\tself.trn_batch_size = NoDependency(32)\n\t\tself.trn_batch_gt_size = NoDependency(self.trn_batch_size - int(.5 + .5 * self.trn_batch_size))\n\t\tself.trn_loss_tracker = NoDependency(tf.keras.metrics.Mean(name=\"loss\"))\n\t\tself.val_loss_tracker = NoDependency(tf.keras.metrics.Mean(name=\"loss\"))\n\n\tdef prepare_minibatch(self, img):\n\t\timg_name = img['id']\n\t\twith stupid_adapter(zf_pi.open(f'{img_name}.npy','r')) as f:\n\t\t\timg_features = tf.expand_dims(tf.convert_to_tensor(np.load(f), tf.float32), axis=0)\n\t\twith stupid_adapter(zf_om.open(f'{img_name}.npy','r')) as f:\n\t\t\tobj_masks = tf.convert_to_tensor(np.load(f)[0,:,:,:], tf.float32)\n\n\t\tnum_objs = len(img['objs'])\n\t\tnum_pairs = num_objs * (num_objs - 1)\n\t\tif num_pairs == 0:\n\t\t\treturn (None, None, None)\n\n\t\tground_truth = img['gt']\n\t\tnon_ground_truth = img['nongt']\n\t\tnum_gt_pairs = len(ground_truth)\n\t\tnum_non_gt_pairs = len(non_ground_truth)\n\t\tbatch_mask = []\n\t\tbatch_srcsem = []\n\t\tbatch_dstsem = []\n\t\tbatch_priors = []\n\t\tbatch_y_real = []\n\n\t\tdef sample_gt_pair(pair, pairdata):\n\t\t\tsrc_id,dst_id = pair\n\t\t\tbatch_mask.append(tf.stack([obj_masks[:,:,src_id], obj_masks[:,:,dst_id]], axis=-1))\n\t\t\tbatch_srcsem.append(pairdata['a'])\n\t\t\tbatch_dstsem.append(pairdata['b'])\n\t\t\tbatch_priors.append(pairdata['p'])\n\t\t\tbatch_y_real.append(pairdata['y'])\n\n\t\tdef sample_non_gt_pair(pair):\n\t\t\tsrc_id,dst_id = pair\n\t\t\tsrc_objid = img['objs'][src_id]['v']\n\t\t\tdst_objid = img['objs'][dst_id]['v']\n\t\t\tbatch_mask.append(tf.stack([obj_masks[:,:,src_id], obj_masks[:,:,dst_id]], axis=-1))\n\t\t\tbatch_srcsem.append(SEM_VECTORS[src_objid])\n\t\t\tbatch_dstsem.append(SEM_VECTORS[dst_objid])\n\t\t\tbatch_priors.append(get_priors(src_objid, dst_objid))\n\t\t\tbatch_y_real.append(np.zeros((tn_data.NUM_RELS,), np.float32))\n\n\t\tnum_sampled_gt_pairs = np.minimum(self.trn_batch_gt_size, num_gt_pairs)\n\t\tnum_sampled_non_gt_pairs = np.minimum(self.trn_batch_size - num_sampled_gt_pairs, num_non_gt_pairs)\n\t\tnum_dupes = self.trn_batch_size - num_sampled_gt_pairs - num_sampled_non_gt_pairs\n\n\t\tfor pair,pairdata in random.sample(list(ground_truth.items()), k=num_sampled_gt_pairs):\n\t\t\tsample_gt_pair(pair, pairdata)\n\t\tfor pair in random.sample(list(non_ground_truth), k=num_sampled_non_gt_pairs):\n\t\t\tsample_non_gt_pair(pair)\n\n\t\t# Fill batch with dupes\n\t\tif num_dupes > 0:\n\t\t\tfor i in random.choices(list(range(len(batch_mask))), k=num_dupes):\n\t\t\t\tbatch_mask.append(batch_mask[i])\n\t\t\t\tbatch_srcsem.append(batch_srcsem[i])\n\t\t\t\tbatch_dstsem.append(batch_dstsem[i])\n\t\t\t\tbatch_priors.append(batch_priors[i])\n\t\t\t\tbatch_y_real.append(batch_y_real[i])\n\n\t\tbatch_mask = tf.stack(batch_mask, axis=0)\n\t\tbatch_srcsem = tf.stack(batch_srcsem, axis=0)\n\t\tbatch_dstsem = tf.stack(batch_dstsem, axis=0)\n\t\tbatch_priors = tf.stack(batch_priors, axis=0)\n\t\tbatch_y_real = tf.stack(batch_y_real, axis=0)\n\t\tbatch_x = (img_features, batch_mask, batch_srcsem, batch_dstsem, batch_priors)\n\t\treturn (batch_x, batch_y_real)\n\n\t@property\n\tdef metrics(self):\n\t\treturn [self.trn_loss_tracker, self.val_loss_tracker]\n\n\tdef ranking_loss(self, y_real, y_pred, margin=1.):\n\t\tscores_0, scores_1 = tf.dynamic_partition(y_pred, tf.cast(y_real, tf.int32), 2)\n\t\tscale = tf.size(y_real, out_type=tf.float32)\n\t\treturn tf.reduce_sum(tf.vectorized_map(lambda val: tf.reduce_sum(tf.nn.relu(margin - (scores_1 - val))), elems=scores_0)) / scale\n\n\[email protected]\n\tdef train_kernel(self, x, y_real):\n\t\twith tf.GradientTape() as tape:\n\t\t\ty_pred = self(x, training=True)\n\t\t\tloss = self.ranking_loss(y_real, y_pred)\n\n\t\tself.optimizer.minimize(loss, self.trainable_variables, tape=tape)\n\t\treturn loss\n\n\tdef train_step(self, data):\n\t\tbatch_x, batch_y_real = self.prepare_minibatch(TRAIN_DATA[int(data)])\n\t\tif batch_x is not None:\n\t\t\tloss = self.train_kernel(batch_x, batch_y_real)\n\t\t\tself.trn_loss_tracker.update_state(loss)\n\t\treturn { 'loss': self.trn_loss_tracker.result() }\n\n\[email protected]\n\tdef test_kernel(self, x, y_real):\n\t\ty_pred = self(x, training=False)\n\t\treturn self.ranking_loss(y_real, y_pred)\n\n\tdef test_step(self, data):\n\t\tbatch_x, batch_y_real = self.prepare_minibatch(VAL_DATA[int(data)])\n\t\tif batch_x is not None:\n\t\t\tloss = self.test_kernel(batch_x, batch_y_real)\n\t\t\tself.val_loss_tracker.update_state(loss)\n\t\treturn { 'loss': self.val_loss_tracker.result() }\n\nmdl = TelenetTrainer(N=tn_data.NUM_RELS)\nmdl.compile(\n\toptimizer=tfa.optimizers.AdamW(learning_rate=tn_config('train.lr'), weight_decay=tn_config('train.wd')),\n\trun_eagerly=True\n)\n\nearly_stopping = tf.keras.callbacks.EarlyStopping(\n\tmonitor='val_loss',\n\tpatience=tn_config('train.early_stopping'),\n\tmode='min',\n\trestore_best_weights=True\n)\n\ntensorboard = tf.keras.callbacks.TensorBoard(\n\tlog_dir=f'tensorboard/{MODEL_VARIANT}',\n\thistogram_freq=1\n)\n\nhistory = mdl.fit(\n\tx = tf.data.Dataset.range(len(TRAIN_DATA)).shuffle(256, seed=RND_SEED, reshuffle_each_iteration=True),\n\tvalidation_data = tf.data.Dataset.range(len(VAL_DATA)),\n\tcallbacks = [ early_stopping, tensorboard ],\n\tepochs = tn_config('train.epochs')\n)\n\nmdl.save_weights(f'weights/telenet+{MODEL_VARIANT}')\n\nplt.figure()\nplotter = tfdocs.plots.HistoryPlotter(metric = 'loss')\nplotter.plot({ 'Model': history })\nplt.savefig(f\"train-results/{MODEL_VARIANT}.png\")\n\nwith open(f'train-results/{MODEL_VARIANT}.csv', mode='wt', encoding='utf-8') as f:\n\tpd.DataFrame(history.history).to_csv(f)\n",
"import tensorflow as tf\n#import tensorflow_hub as hub\nimport numpy as np\n#import cv2\nimport zipfile\nimport json\nimport lzma\nimport os\nimport telenet.dataset_data as tn_data\nfrom telenet.utils import load_image_for_vrd_yolo, mdl_yolo, parse_yolo_results\nfrom telenet.config import get as tn_config\nfrom tqdm import tqdm\n\nVG_PATH = tn_config('paths.vg')\n\nimgcnvdata = tn_data.load_json_xz('vg-imgcnvdata')\nzf1 = zipfile.ZipFile(os.path.join(VG_PATH, 'images.zip'), 'r')\nzf2 = zipfile.ZipFile(os.path.join(VG_PATH, 'images2.zip'), 'r')\n\ntrain_imgs = []\ntest_imgs = []\n\nfor obj in imgcnvdata:\n\t(train_imgs,test_imgs)[obj['split']].append(obj)\n\ndef load_image(db, index):\n\tobj = db[index]\n\tif obj['dir'] == 1:\n\t\timgdata = zf1.read(f\"VG_100K/{obj['file']}\")\n\telif obj['dir'] == 2:\n\t\timgdata = zf2.read(f\"VG_100K_2/{obj['file']}\")\n\telse:\n\t\traise \"Bad dir\"\n\timg, w, h = load_image_for_vrd_yolo(imgdata)\n\treturn obj['id'], img, w, h\n\ndef load_train_image(index):\n\treturn load_image(train_imgs, index)\n\ndef load_test_image(index):\n\treturn load_image(test_imgs, index)\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices(list(range(len(train_imgs)))).map(\n\tlambda x: tf.py_function(func=load_train_image, inp=[x], Tout=[tf.string, tf.float32, tf.float32, tf.float32]),\n\tnum_parallel_calls=tf.data.AUTOTUNE).batch(1)\n\ntest_dataset = tf.data.Dataset.from_tensor_slices(list(range(len(test_imgs)))).map(\n\tlambda x: tf.py_function(func=load_test_image, inp=[x], Tout=[tf.string, tf.float32, tf.float32, tf.float32]),\n\tnum_parallel_calls=tf.data.AUTOTUNE).batch(1)\n\ndef convert_dataset(dataset, outfile, outfile2):\n\tres = {}\n\twith zipfile.ZipFile(tn_data.path(outfile), 'w') as zfo:\n\t\tfor names,img,widths,heights in tqdm(dataset):\n\t\t\tnames = names.numpy()\n\t\t\tfeatures,yolodata = mdl_yolo(img)\n\t\t\tfor imnm,imft,imyl,imw,imh in zip(names,features,yolodata,widths,heights):\n\t\t\t\timnm = imnm.decode('utf-8')\n\t\t\t\tres[imnm] = parse_yolo_results(np.expand_dims(imyl, axis=0), imw, imh)\n\t\t\t\twith zfo.open(f'{imnm}.npy','w') as f:\n\t\t\t\t\tnp.save(f, imft)\n\twith lzma.open(tn_data.path(outfile2), 'wt', encoding='utf-8') as f:\n\t\tjson.dump(res, f)\n\nconvert_dataset(train_dataset, 'vg-yolo-train.zip', 'vg-yolo-train-objs.json.xz')\nconvert_dataset(test_dataset, 'vg-yolo-test.zip', 'vg-yolo-test-objs.json.xz')\n"
] |
[
[
"tensorflow.nn.relu",
"numpy.minimum",
"numpy.random.seed",
"tensorflow.stack",
"tensorflow.python.training.tracking.data_structures.NoDependency",
"tensorflow.cast",
"tensorflow.keras.metrics.Mean",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"tensorflow.GradientTape",
"tensorflow.keras.callbacks.TensorBoard",
"numpy.load",
"numpy.zeros",
"tensorflow.size",
"tensorflow.random.set_seed",
"matplotlib.pyplot.figure"
],
[
"tensorflow.py_function",
"numpy.expand_dims",
"numpy.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jstark27249/RISC-V-Vector-Hardware
|
[
"d6f75227a09dd4c4719e1ec5547ea6447997e4fe"
] |
[
"TestFiles/FloatMulTest.py"
] |
[
"# Program to show various ways to read and\n# write data in a file.\nimport math\n\nimport numpy as np\nimport struct\n\nimport random\n\n\n# opcodes\n# Bit shift values must go to B Input on the execute\n# 000001 or = shift left\n# 000100 = shift right\n# 001000 = equivalence check\n# 000000 = branching ie < or >\n# 000111 or 000101 = subtraction\n# 000011 or 000010 = addition\n# 000110 = multiplication\n# 110000 or 010001 = float multiply\n# 110001 = float subtract\n# 110000 = float addition\n\n\n\nfile1 = open(\"FloatMulTest.txt\", \"w\")\n#names must match logism pins (put FloatSubMode[1] where FloatSubMode[0]\nfloatTestHeader = [\"Ain[32] \", \"Bin[32] \", \"FPUMulResult[32] \\n\"]\nfile1.writelines(floatTestHeader)\na = 0.0\nb = 0.0\na = np.finfo(np.float32(a)).max\nb = np.finfo(np.float32(a)).min\nFLOAT_MAX = 3.402823466e+38\nFLOAT_MIN = 1.175494351e-38\n\n\nprint(math.sqrt(FLOAT_MAX))\nprint(math.sqrt(FLOAT_MIN))\nrandomList = []\n\n\nprint(np.multiply(math.sqrt(FLOAT_MAX), math.sqrt(FLOAT_MIN)))\npositive_infinity = float('inf')\nnegative_infinity = float('-inf')\n#convert a floating point number to binary\ndef binary(num):\n return ''.join('{:0>8b}'.format(c) for c in struct.pack('!f', np.float32(num)))\n\n\ndef float_to_hex(f):\n return hex(struct.unpack('<I', struct.pack('<f', f))[0])\n\n\n\n# makes a binary float32 addition problem + its solution\n\n\n\ndef generateBinaryListAddition(list):\n tempList = []\n overflow = np.float32(1)\n\n #values constrained to the sqrt of the float min and max for sake of testing, this should reduce the risk of overflows during testing\n a = np.random.uniform(1.0842021725674597e-19, 1.8446743522909403e+19)\n np.float32(a)\n\n #overflowing produces garbage when multiplying so the results will always be wrong\n b = np.random.uniform(1.0842021725674597e-19, 1.8446743522909403e+19)\n np.float32(b)\n\n c = np.multiply(np.float32(a), np.float32(b))\n np.float32(c)\n #print(c)\n a = float_to_hex(a)\n b = float_to_hex(b)\n\n\n #sets one as the output if the multiplier overflows\n if c == (positive_infinity or negative_infinity):\n c = overflow\n np.float32(c)\n c = float_to_hex(c)\n else:\n c = float_to_hex(c)\n\n\n\n\n\n tempList.append(a)\n tempList.append(' ')\n tempList.append(b)\n tempList.append(' ')\n tempList.append(c)\n tempList.append(' ')\n tempList.append('\\n')\n tempList = (map(str, tempList))\n return tempList\n\n\n\n#write to the file 32 times with the float addition function\nfor i in range(0, 32):\n randomList = generateBinaryListAddition(randomList)\n file1.writelines(randomList)\n\n\n\n\n# \\n is placed to indicate EOL (End of Line)\n\n\n\nfile1.close() # to change file access modes\nfile1 = open(\"FloatMulTest.txt\", \"r+\")\n"
] |
[
[
"numpy.random.uniform",
"numpy.float32"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KayThangan/LiftSimulation
|
[
"507b8cfaba854bec0427351dbf0eb245fc54a39c"
] |
[
"AdvanceCase/src/GraphPanel.py"
] |
[
"import matplotlib.pyplot as plt\nfrom com.simulation.lift.api.CostDict import cost_dict\n\nfig = plt.figure()\nfig.canvas.set_window_title('Advance Case Graph')\nfig.suptitle(\"\"\"Cost\nin function of \nLift Capacity, Floor Number and Passenger Number\"\"\")\n\nax = fig.add_subplot(111, projection='3d')\n\nx = []\ny = []\nz = []\nc = []\n\nfor lift_capacity in cost_dict:\n for floor_number in cost_dict[lift_capacity]:\n for passenger_number in cost_dict[lift_capacity][floor_number]:\n x.append(lift_capacity)\n y.append(floor_number)\n z.append(passenger_number)\n c.append(cost_dict[lift_capacity][floor_number][passenger_number][\"cost\"])\n\nimg = ax.scatter(x, y, z, c=c, cmap=plt.hot())\ncbar = fig.colorbar(img)\ncbar.ax.get_yaxis().labelpad = 15; cbar.ax.set_ylabel(\"Cost\", rotation = 270);\nax.set_xlabel('Lift Capacity')\nax.set_ylabel('Floor Number')\nax.set_zlabel('Passenger Number')\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hot",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vlomonaco/DeepLearningImplementations
|
[
"2aa73198b6293fadbd393e2d71e5d33a2307a709"
] |
[
"pix2pix/src/utils/data_utils.py"
] |
[
"from keras.datasets import mnist\nfrom keras.utils import np_utils\nimport numpy as np\nimport h5py\n\nimport matplotlib.pylab as plt\n\n\ndef normalization(X):\n\n return X / 127.5 - 1\n\n\ndef inverse_normalization(X):\n\n return (X + 1.) / 2.\n\n\ndef get_nb_patch(img_dim, patch_size, image_dim_ordering):\n\n assert image_dim_ordering in [\"th\", \"tf\"], \"Bad image_dim_ordering\"\n\n if image_dim_ordering == \"th\":\n assert img_dim[1] % patch_size[0] == 0, \"patch_size does not divide height\"\n assert img_dim[2] % patch_size[1] == 0, \"patch_size does not divide width\"\n nb_patch = (img_dim[1] / patch_size[0]) * (img_dim[2] / patch_size[1])\n img_dim_disc = (img_dim[0], patch_size[0], patch_size[1])\n\n elif image_dim_ordering == \"tf\":\n assert img_dim[0] % patch_size[0] == 0, \"patch_size does not divide height\"\n assert img_dim[1] % patch_size[1] == 0, \"patch_size does not divide width\"\n nb_patch = (img_dim[0] / patch_size[0]) * (img_dim[1] / patch_size[1])\n img_dim_disc = (patch_size[0], patch_size[1], img_dim[-1])\n\n return nb_patch, img_dim_disc\n\n\ndef extract_patches(X, image_dim_ordering, patch_size):\n\n # Now extract patches form X_disc\n if image_dim_ordering == \"th\":\n X = X.transpose(0,2,3,1)\n\n list_X = []\n list_row_idx = [(i * patch_size[0], (i + 1) * patch_size[0]) for i in range(X.shape[1] / patch_size[0])]\n list_col_idx = [(i * patch_size[1], (i + 1) * patch_size[1]) for i in range(X.shape[2] / patch_size[1])]\n\n for row_idx in list_row_idx:\n for col_idx in list_col_idx:\n list_X.append(X[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])\n\n if image_dim_ordering == \"th\":\n for i in range(len(list_X)):\n list_X[i] = list_X[i].transpose(0,3,1,2)\n\n return list_X\n\n\ndef load_facade(image_dim_ordering):\n\n with h5py.File(\"../../data/processed/facade_data.h5\", \"r\") as hf:\n\n X_full_train = hf[\"train_data_full\"][:].astype(np.float32)\n X_full_train = normalization(X_full_train)\n\n X_sketch_train = hf[\"train_data_sketch\"][:].astype(np.float32)\n X_sketch_train = normalization(X_sketch_train)\n\n if image_dim_ordering == \"tf\":\n X_full_train = X_full_train.transpose(0, 2, 3, 1)\n X_sketch_train = X_sketch_train.transpose(0, 2, 3, 1)\n\n X_full_val = hf[\"val_data_full\"][:].astype(np.float32)\n X_full_val = normalization(X_full_val)\n\n X_sketch_val = hf[\"val_data_sketch\"][:].astype(np.float32)\n X_sketch_val = normalization(X_sketch_val)\n\n if image_dim_ordering == \"tf\":\n X_full_val = X_full_val.transpose(0, 2, 3, 1)\n X_sketch_val = X_sketch_val.transpose(0, 2, 3, 1)\n\n return X_full_train, X_sketch_train, X_full_val, X_sketch_val\n\n\ndef gen_batch(X1, X2, batch_size):\n\n while True:\n idx = np.random.choice(X1.shape[0], batch_size, replace=False)\n yield X1[idx], X2[idx]\n\n\ndef get_disc_batch(X_full_batch, X_sketch_batch, generator_model, batch_counter, patch_size,\n image_dim_ordering, label_smoothing=False, label_flipping=0):\n\n # Create X_disc: alternatively only generated or real images\n if batch_counter % 2 == 0:\n # Produce an output\n X_disc = generator_model.predict(X_sketch_batch)\n y_disc = np.zeros((X_disc.shape[0], 2), dtype=np.uint8)\n y_disc[:, 0] = 1\n\n if label_flipping > 0:\n p = np.random.binomial(1, label_flipping)\n if p > 0:\n y_disc[:, [0, 1]] = y_disc[:, [1, 0]]\n\n else:\n X_disc = X_full_batch\n y_disc = np.zeros((X_disc.shape[0], 2), dtype=np.uint8)\n if label_smoothing:\n y_disc[:, 1] = np.random.uniform(low=0.9, high=1, size=y_disc.shape[0])\n else:\n y_disc[:, 1] = 1\n\n if label_flipping > 0:\n p = np.random.binomial(1, label_flipping)\n if p > 0:\n y_disc[:, [0, 1]] = y_disc[:, [1, 0]]\n\n # Now extract patches form X_disc\n X_disc = extract_patches(X_disc, image_dim_ordering, patch_size)\n\n return X_disc, y_disc\n\n\ndef plot_generated_batch(X_full, X_sketch, generator_model, batch_size, image_dim_ordering):\n\n # Generate images\n X_gen = generator_model.predict(X_sketch)\n\n X_full = inverse_normalization(X_full)\n X_gen = inverse_normalization(X_gen)\n\n Xg = X_gen[:8]\n Xr = X_full[:8]\n\n if image_dim_ordering == \"tf\":\n X = np.concatenate((Xg, Xr), axis=0)\n list_rows = []\n for i in range(int(X.shape[0] / 4)):\n Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=1)\n list_rows.append(Xr)\n\n Xr = np.concatenate(list_rows, axis=0)\n\n if image_dim_ordering == \"th\":\n X = np.concatenate((Xg, Xr), axis=0)\n list_rows = []\n for i in range(int(X.shape[0] / 4)):\n Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=2)\n list_rows.append(Xr)\n\n Xr = np.concatenate(list_rows, axis=1)\n Xr = Xr.transpose(1,2,0)\n\n if Xr.shape[-1] == 1:\n plt.imshow(Xr[:, :, 0], cmap=\"gray\")\n else:\n plt.imshow(Xr)\n plt.savefig(\"../../figures/current_batch.png\")\n plt.clf()\n plt.close()\n"
] |
[
[
"numpy.random.choice",
"numpy.concatenate",
"matplotlib.pylab.imshow",
"matplotlib.pylab.clf",
"numpy.random.binomial",
"numpy.random.uniform",
"matplotlib.pylab.savefig",
"numpy.zeros",
"matplotlib.pylab.close"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeffkinnison/shadho
|
[
"5da10d6e9916af224acfaa5150a0c151d4af42f9"
] |
[
"shadho/shadho.py"
] |
[
"\"\"\"Main driver for the SHADHO framework.\n\nClasses\n-------\nShadho\n Driver class for local and distributed hyperparameter optimization.\n\"\"\"\nfrom shadho.configuration import ShadhoConfig\nfrom shadho.hardware import ComputeClass\nfrom shadho.managers import create_manager\n\nfrom collections import OrderedDict\nimport json\nimport itertools\nimport os\nimport tarfile\nimport tempfile\nimport time\n\nimport numpy as np\nimport pyrameter\nimport scipy.stats\n\nfrom pyrameter.optimizer import FMin\nfrom pyrameter.trial import Trial\n\n\ndef shadho():\n pass\n\n\nclass Shadho(pyrameter.FMin):\n \"\"\"Optimize hyperparameters using specified hardware.\n\n Parameters\n ----------\n exp_key : str\n Unique ID for this experiment, used when referencing existing results.\n cmd : str or function\n The command to run on remote workers or function to run locally.\n spec : dict\n The specification defining search spaces.\n method : {'random','bayes','tpe','smac','pso'} or callable\n The optimization method to use.\n backend : str\n Filepath (JSON) or URL (MongoDB, SQLite) to the backend storage to\n record search spaces, domain state, trials, etc.\n files : list of str or WQFile\n The files to send to remote workers for task execution.\n use_complexity : bool, optional\n If True, use the complexity heuristic to adjust search proportions.\n use_priority : bool, optional\n If True, use the priority heuristic to adjust search proportions.\n timeout : int, optional\n Number of seconds to search for. If None or a negative number is\n passed, search with no timeout until ``max_tasks`` is reached.\n max_tasks : int, optional\n The maximum number of trials to run. If not passed, tasks will\n continue processing until ``timeout`` or indefinitely.\n max_queued_tasks : int, optional\n Number of tasks to queue at a time.\n await_pending : bool, optional\n If True, wait for all running tasks to complete after `timeout`.\n Default: False\n max_evals : int, optional\n The number of times to evaluate a set of generated hyperparameters.\n max_resubmissions: int, optional\n Maximum number of times to resubmit a particular parameterization for\n processing if task failure occurs. Default is not to resubmit.\n\n Notes\n -----\n To enable configuration after intialization, ``backend`` and ``manager``\n are created when `Shadho.run` is called.\n\n \"\"\"\n\n def __init__(self, exp_key, cmd, spec, method='random', backend='results.json',\n files=None, use_complexity=True, use_uncertainty=True,\n timeout=600, max_tasks=None, max_queued_tasks=100, await_pending=False,\n max_evals=1, max_resubmissions=0, save_frequency=10,\n hyperparameters_per_task=1):\n super().__init__(exp_key, spec, method, backend, max_evals=max_evals)\n self.config = ShadhoConfig()\n self.cmd = cmd\n if not isinstance(cmd, str):\n self.config.manager = 'local'\n else:\n self.config.workqueue.name = exp_key\n self.use_complexity = use_complexity\n self.use_uncertainty = use_uncertainty\n self.timeout = timeout if timeout is not None and timeout >= 0 \\\n else float('inf')\n self.max_tasks = max_tasks if max_tasks is not None and max_tasks >= 0 \\\n else float('inf')\n self.max_queued_tasks = max_queued_tasks\n self.max_resubmissions = max_resubmissions\n self.await_pending = await_pending\n self.save_frequency = save_frequency\n self.hyperparameters_per_task = hyperparameters_per_task \\\n if isinstance(hyperparameters_per_task, int) \\\n and hyperparameters_per_task > 0 \\\n else 1\n\n self.ccs = OrderedDict()\n\n self.files = []\n if files is not None:\n for f in files:\n self.files.append(f)\n self.assignments = {}\n\n self.__tmpdir = tempfile.mkdtemp(prefix='shadho_', suffix='_output')\n\n self.add_input_file(\n os.path.join(os.path.dirname(__file__), 'worker.py'),\n remotepath=self.config.wrapper)\n\n self.add_input_file(\n os.path.join(os.path.dirname(__file__), 'utils.py'),\n remotepath=self.config.utils)\n\n self.config.save_config(self.__tmpdir)\n self.add_input_file(os.path.join(self.__tmpdir, '.shadhorc'))\n # self.backend = backend\n\n def __del__(self):\n if hasattr(self, '__tmpdir') and self.__tmpdir is not None:\n os.rmdir(self.__tmpdir)\n\n def add_input_file(self, localpath, remotepath=None, cache=True):\n \"\"\"Add an input file to the global file list.\n\n Parameters\n ----------\n localpath : str\n Path to the file on the local filesystem.\n remotepath : str, optional\n Path to write the file to on the remote worker. If omitted, the\n basename of ``localpath`` (e.g. \"foo/bar.baz\" => \"bar.baz\").\n cache : bool, optional\n Whether to cache the file on the remote worker. If True (default),\n will be cached on the worker between tasks, reducing network\n transfer overhead. If False, will be re-transferred to the worker\n on each task.\n \"\"\"\n self.files.append((localpath, remotepath, 'input', cache))\n\n def add_output_file(self, localpath, remotepath=None, cache=False):\n \"\"\"Add an input file to the global file list.\n\n Output files are expected to be discovered on the remote worker after a\n task has completed. They are returned to the `shadho.Shadho` instance\n and will be stored for further review without additional processing.\n\n Parameters\n ----------\n localpath : str\n Path to the file on the local filesystem.\n remotepath : str, optional\n Path to write the file to on the remote worker. If omitted, the\n basename of ``localpath`` (e.g. \"foo/bar.baz\" => \"bar.baz\").\n cache : bool, optional\n Whether to cache the file on the remote worker. It is recommended\n that this be set to False for output files.\n\n Notes\n -----\n `shadho.Shadho` automatically parses the output file specified in\n ``.shadhorc``, so and output file added through this method will not be\n processed, but rather stored for later review.\n \"\"\"\n self.files.append((localpath, remotepath, 'output', cache))\n\n def add_compute_class(self, name, resource, value, max_queued_tasks=100):\n \"\"\"Add a compute class representing a set of consistent recources.\n\n Parameters\n ----------\n name : str\n The name of this set of compute resources.\n resource : str\n The resource to match, e.g. gpu_name, cores, etc.\n value\n The value of the resource that should be matched, e.g. \"TITAN X\n (Pascal)\", 8, etc.\n max_queued_tasks : int, optional\n The maximum number of tasks to queue for this compute class,\n default 100.\n \"\"\"\n cc = ComputeClass(name, resource, value, min(self.max_tasks, max_queued_tasks))\n self.ccs[cc.id] = cc\n\n def load(self):\n max_tasks = self.max_tasks\n super().load()\n self.max_tasks = max_tasks + len(self.trials)\n\n def run(self):\n \"\"\"Search hyperparameter values on remote workers.\n\n Generate and evaluate hyperparameters using the selected task manager\n and search strategy. Hyperparameters will be evaluated until timeout,\n and the optimal set will be printed to screen.\n\n Notes\n -----\n If `self.await_pending` is True, Shadho will continue to evaluate\n hyperparameters in the queue without generating new hyperparameter\n values. This will continue until the queue is empty and all tasks have\n returned.\n \"\"\"\n # Set up the task manager as defined in `shadho.managers`\n if not hasattr(self, 'manager'):\n self.manager = create_manager(\n manager_type=self.config.manager,\n config=self.config,\n tmpdir=self.__tmpdir)\n\n # If no ComputeClass was created, create a dummy class.\n if len(self.ccs) == 0:\n cc = ComputeClass('all', None, None, min(self.max_tasks, self.max_queued_tasks))\n self.ccs[cc.id] = cc\n else:\n for cc in self.ccs.values():\n cc.optimizer = self.copy()\n cc.max_queued_tasks = max(cc.max_queued_tasks / len(self.ccs), 1)\n\n # Set up intial model/compute class assignments.\n self.assign_to_ccs()\n\n self.start = time.time()\n completed_tasks = 0\n try:\n # Run the search until timeout or until all tasks complete\n # while elapsed < self.timeout and completed_tasks < self.max_tasks and not exhausted and (elapsed == 0 or not self.manager.empty()):\n while not self.done():\n # Generate hyperparameters and a flag to continue or stop\n stop = self.generate()\n if not stop:\n # Run another task and await results\n result = self.manager.run_task()\n if result is not None:\n # If a task returned post-process as a success or fail\n if len(result) == 3:\n self.success(*result) # Store and move on\n completed_tasks += 1\n else:\n self.failure(*result) # Resubmit if asked\n # Checkpoint the results to file or DB at some frequency\n if self.trial_count % self.save_frequency == 0:\n self.save()\n else:\n break\n\n self.save()\n\n # If requested, continue the loop until all tasks return\n if self.await_pending:\n while not self.manager.empty():\n result = self.manager.run_task()\n if result is not None:\n if len(result) == 3:\n self.success(*result)\n else:\n self.failure(*result)\n self.save()\n\n # On keyboard interrupt, save any results and clean up\n except KeyboardInterrupt:\n if hasattr(self, '__tmpdir') and self.__tmpdir is not None:\n os.rmdir(self.__tmpdir)\n\n self.end = time.time()\n\n # Save the results and print the optimal set of parameters to screen\n self.save()\n self.summary()\n return self.to_dataframes()\n\n def done(self):\n elapsed = time.time() - self.start\n exhausted = all([space.done(self.max_tasks) for space in self.searchspaces])\n return elapsed >= self.timeout or exhausted\n\n def generate(self):\n \"\"\"Generate hyperparameter values to test.\n\n Hyperparameter values are generated from the search space specification\n supplied at instantiation using the requested generation method (i.e.,\n random search, TPE, Gaussian process Bayesian optimization, etc.).\n\n Returns\n -------\n stop : bool\n If True, no values were generated and the search should stop. This\n facilitates grid-search-like behavior, for example stopping on\n completion of an exhaustive search.\n\n Notes\n -----\n This method will automatically add a new task to the queue after\n generating hyperparameter values.\n \"\"\"\n stop = True\n\n # Generate hyperparameters for every compute class with space in queue\n for cc_id in self.ccs:\n cc = self.ccs[cc_id]\n n = cc.max_queued_tasks - cc.current_tasks\n print(cc.max_queued_tasks, cc.current_tasks, n)\n\n # Generate enough hyperparameters to fill the queue\n for i in range(n):\n # Get bookkeeping ids and hyperparameter values\n if self.hyperparameters_per_task == 1:\n trial = super().generate(searchspaces=cc.searchspaces)\n\n if isinstance(trial, Trial):\n self.trials[trial.id] = trial\n # Encode info to map to db in the task tag\n tag = '.'.join([str(trial.id),\n str(trial.searchspace.id),\n cc_id])\n self.manager.add_task(\n self.cmd,\n tag,\n trial.parameter_dict,\n files=self.files,\n resource=cc.resource,\n value=cc.value)\n elif isinstance(trial, list) and len(trial) > 0:\n for t in trial:\n self.trials[t.id] = t\n tag = '.'.join([str(t.id),\n str(t.searchspace.id),\n cc_id])\n self.manager.add_task(\n self.cmd,\n tag,\n t.parameter_dict,\n files=self.files,\n resource=cc.resource,\n value=cc.value)\n\n # elif self.hyperparameters_per_task > 1:\n # trial = list(itertools.chain.from_iterable([cc.generate() for _ in range(self.hyperparameters_per_task)]))\n\n # if not any([t is None for t in trial]) or len(trial) > 0:\n # self.trials.update({t.id: t for t in trial})\n # # Encode info to map to db in the task tag\n # tag = '.'.join(['@'.join([str(t.id) for t in trial]),\n # str(trial[0].searchspace().id),\n # cc_id])\n # parameters = [t.parameter_dict for t in trial]\n # self.manager.add_task(\n # self.cmd,\n # tag,\n # parameters,\n # files=self.files,\n # resource=cc.resource,\n # value=cc.value)\n\n # Create a new distributed task if values were generated\n stop = False # Ensure that the search continues\n cc.current_tasks = cc.max_queued_tasks # Update to show full queue\n return stop\n\n def assign_to_ccs(self):\n \"\"\"Assign trees to compute classes.\n\n Each independent model in the search (model being one of a disjoint set\n of search domains) is assigned to at least two compute classes based on\n its rank relative to other models. In this way, only a subset of models\n are evaluated on each set of hardware.\n\n Notes\n -----\n This method accounts for differing counts of models and compute\n classes, adjusting for a greater number of models, a greater number of\n compute classes, or equal counts of models and compute classes.\n\n See Also\n --------\n `shadho.ComputeClass`\n `pyrameter.ModelGroup`\n \"\"\"\n # If only one compute class exists, do nothing. If multiple compute\n # classes exist, heuristically assign search trees to CCs. If no\n # compute classes exist, create a dummy to wrap the search.\n if len(self.ccs) > 1:\n # Sort models in the search by complexity, priority, or both and\n # get the updated order.\n # self.backend.sort_spaces(use_complexity=self.use_complexity,\n self.sort_spaces(use_complexity=self.use_complexity,\n use_uncertainty=self.use_uncertainty)\n\n # Clear the current assignments\n for cc in self.ccs:\n cc.clear()\n\n # Determine if the number of compute classes or the number of\n # model ids is larger\n ccids = list(self.ccs.keys())\n larger = self.searchspaces \\\n if len(self.searchspaces) >= len(ccids) \\\n else ccids\n smaller = ccids if larger == len(self.searchspaces) \\\n else self.searchspaces\n\n # Assign models to CCs such that each model is assigned to at\n # least two CCs.\n\n # Steps between `smaller` index increment\n x = float(len(larger)) / float(len(smaller))\n y = x - 1 # Current step index (offset by 1 for 0-indexing)\n j = 0 # Current index of `smaller`\n m = len(smaller) / 2 # Halfway point for second assignment\n n = len(larger) / 2 # Halfway point for second assignment\n\n for i in range(len(larger)):\n # If at a step point for `smaller` increment the index\n if i > np.ceil(y):\n j += 1\n y += x\n\n # Add the model to the current CC. If i <= n, add the model to\n # the next CC as well; if i > n, add to the previous CC.\n if smaller[j] in self.ccs:\n self.ccs[smaller[j]].add_searchspace(larger[i])\n if j < m:\n self.ccs[smaller[j + 1]].add_searchspace(\n self.searchspaces[larger[i]])\n else:\n self.ccs[smaller[j - 1]].add_searchspace(\n self.searchspaces[larger[i]])\n else:\n self.ccs[larger[i]].add_searchspace(smaller[j])\n if i < n:\n self.ccs[larger[i + 1]].add_searchspace(\n self.searchspaces[smaller[j]])\n else:\n self.ccs[larger[i - 1]].add_searchspace(\n self.searchspaces[smaller[j]])\n elif len(self.ccs) == 0:\n cc = ComputeClass('all', None, None, min(self.max_tasks, self.max_queued_tasks))\n self.ccs[cc.id] = cc\n cc.add_searchspace(self.searchspaces)\n else:\n cc = list(self.ccs.values())[0]\n cc.clear()\n cc.add_searchspace(self.searchspaces)\n\n def success(self, tag, loss, results):\n \"\"\"Handle successful task completion.\n\n Parameters\n ----------\n tag : str\n The task tag, encoding the result id, model id, and compute class\n id as ``<result_id>.<model_id>.<cc_id>``.\n loss : float\n The loss value associated with this result.\n results : dict\n Additional metrics to be included with this result.\n\n Notes\n -----\n This method will trigger a model/compute class reassignment in the\n event that storing the result caused the model's priority to be\n updated.\n \"\"\"\n # Get bookkeeping information from the task tag\n trial_id, ss_id, ccid = tag.split('.')\n\n if not isinstance(results, list):\n results['compute_class'] = {\n 'id': ccid,\n 'name': self.ccs[ccid].name,\n 'value': self.ccs[ccid].value\n }\n else:\n trial_id = trial_id.split('@')\n ccdata = {\n 'id': ccid,\n 'name': self.ccs[ccid].name,\n 'value': self.ccs[ccid].value\n }\n for r in results:\n r['compute_class'] = ccdata\n\n\n # Update the DB with the result\n # self.backend.register_result(ss_id, trial_id, loss, results)\n self.register_result(ss_id, trial_id, loss, results)\n\n # Reassign models to CCs at some frequency\n # n_completed = sum([1 for trial in self.backend.trials.values()\n n_completed = sum([1 for trial in self.trials.values()\n if trial.status.value == 3])\n if n_completed % 10 == 0:\n self.assign_to_ccs()\n\n # Update the number of enqueued items\n self.ccs[ccid].current_tasks -= 1\n\n def failure(self, tag, resub):\n \"\"\"Handle task failure.\n\n Parameters\n ----------\n task : `work_queue.Task`\n The failed task to process.\n\n Notes\n -----\n This method will resubmit failed tasks on request to account for\n potential worker dropout, etc.\n \"\"\"\n # Get bookkeeping information from the task tag\n trial_id, ss_id, ccid = tag.split('.')\n\n trials = trial_id.split('@')\n\n # Determine whether or not to resubmit\n # self.backend.register_result(ss_id, trial_id, objective=None,\n submissions, params = \\\n self.register_result(ss_id, trial_id, objective=None,\n results=None, errmsg='yes')\n\n # Resubmit the task if it should be, otherwise update the number of\n # enqueued items.\n if resub and submissions < self.max_resubmissions:\n cc = self.ccs[ccid]\n self.manager.add_task(self.cmd,\n tag,\n params,\n files=self.files,\n resource=cc.resource,\n value=cc.value)\n else:\n self.ccs[ccid].current_tasks -= 1\n"
] |
[
[
"numpy.ceil"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
implus/PVT
|
[
"4f70d09f2c0390a9ca2dabf271d725f2d8f75d08"
] |
[
"pvt.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom functools import partial\n\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\nfrom timm.models.registry import register_model\nfrom timm.models.vision_transformer import _cfg\n\n__all__ = [\n 'pvt_tiny', 'pvt_small', 'pvt_medium', 'pvt_large',\n 'pvt_small_sk2ffn'\n]\n\n\nclass SK2(nn.Module):\n\n def __init__(self, dim, reduce_ratio=4.):\n super().__init__()\n hidden_dim = max(int(dim // reduce_ratio), 32)\n print('hidden_dim = ', hidden_dim)\n self.mlp = nn.Sequential(\n nn.Linear(dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, dim),\n nn.BatchNorm1d(dim),\n )\n # Mlp(in_features=dim, hidden_features=hidden_dim)\n self.alpha = nn.Parameter(torch.zeros(1, 1))\n\n def forward(self, x0, x1):\n sig = self.alpha.sigmoid()\n attn = (1.0 - sig) * x0.mean(dim=1, keepdim=False) + sig * x1.mean(dim=1, keepdim=False)\n attn = self.mlp(attn).sigmoid().unsqueeze(1)\n x = x0 + attn * (x1 - x0)\n return x\n\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass SK2Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n self.sk2 = SK2(hidden_features)\n\n def forward(self, x):\n y = self.fc1(x)\n y = self.act(y)\n x = self.sk2(x, y)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass Block_sk2mlp(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = SK2Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x, H, W):\n x = x + self.drop_path(self.attn(self.norm1(x), H, W))\n # x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))\n # x = self.sk2(x, self.attn(self.norm1(x), H, W))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n # x = self.sk2ln(x, self.mlp(self.norm2(x)))\n\n return x\n\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):\n super().__init__()\n assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n self.dim = dim\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n self.q = nn.Linear(dim, dim, bias=qkv_bias)\n self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n self.sr_ratio = sr_ratio\n if sr_ratio > 1:\n self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)\n self.norm = nn.LayerNorm(dim)\n\n def forward(self, x, H, W):\n B, N, C = x.shape\n q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n if self.sr_ratio > 1:\n x_ = x.permute(0, 2, 1).reshape(B, C, H, W)\n x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)\n x_ = self.norm(x_)\n kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n else:\n kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n k, v = kv[0], kv[1]\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n\n return x\n\n\nclass SK2LN(nn.Module):\n\n def __init__(self, dim, reduce_ratio=4.):\n super().__init__()\n hidden_dim = max(int(dim // reduce_ratio), 32)\n print('hidden_dim = ', hidden_dim)\n self.mlp = nn.Sequential(\n nn.Linear(dim, hidden_dim),\n nn.LayerNorm(hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, dim),\n nn.LayerNorm(dim),\n #nn.BatchNorm1d(dim),\n )\n # Mlp(in_features=dim, hidden_features=hidden_dim)\n self.alpha = nn.Parameter(torch.zeros(1, 1))\n\n def forward(self, x0, x1):\n sig = self.alpha.sigmoid()\n attn = (1.0 - sig) * x0.mean(dim=1, keepdim=False) + sig * x1.mean(dim=1, keepdim=False)\n attn = self.mlp(attn).sigmoid().unsqueeze(1)\n x = x0 + attn * (x1 - x0)\n return x\n\n\nclass Block_sk2lnffn(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n self.sk2ln = SK2LN(dim)\n print('using ffn sk2ln')\n\n def forward(self, x, H, W):\n x = x + self.drop_path(self.attn(self.norm1(x), H, W))\n # x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))\n # x = self.sk2(x, self.attn(self.norm1(x), H, W))\n # x = x + self.drop_path(self.mlp(self.norm2(x)))\n x = self.sk2ln(x, self.mlp(self.norm2(x)))\n\n return x\n\n\nclass Block(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n #self.sk2 = SK2(dim)\n #print('using ffn sk2')\n\n def forward(self, x, H, W):\n x = x + self.drop_path(self.attn(self.norm1(x), H, W))\n # x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))\n # x = self.sk2(x, self.attn(self.norm1(x), H, W))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n # x = self.sk2(x, self.mlp(self.norm2(x)))\n\n return x\n\n\nclass Block_sk2ffn(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n self.sk2 = SK2(dim)\n print('using ffn sk2')\n\n def forward(self, x, H, W):\n x = x + self.drop_path(self.attn(self.norm1(x), H, W))\n # x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))\n # x = self.sk2(x, self.attn(self.norm1(x), H, W))\n # x = x + self.drop_path(self.mlp(self.norm2(x)))\n x = self.sk2(x, self.mlp(self.norm2(x)))\n\n return x\n\n\nclass Block_sk2ffnx(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n self.sk2 = SK2(dim)\n print('using ffn sk2 x, x+mlp(x)')\n\n def forward(self, x, H, W):\n x = x + self.drop_path(self.attn(self.norm1(x), H, W))\n # x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))\n # x = self.sk2(x, self.attn(self.norm1(x), H, W))\n # x = x + self.drop_path(self.mlp(self.norm2(x)))\n x = self.sk2(x, x + self.mlp(self.norm2(x)))\n\n return x\n\n\nclass Block_sk2(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n self.sk2 = SK2(dim)\n print('using sk2')\n\n def forward(self, x, H, W):\n # x = x + self.drop_path(self.attn(self.norm1(x), H, W))\n # x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))\n x = self.sk2(x, self.attn(self.norm1(x), H, W))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n # x = self.sk2(x, self.mlp(self.norm2(x)))\n\n return x\n\n\nclass PatchEmbed(nn.Module):\n \"\"\" Image to Patch Embedding\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n\n self.img_size = img_size\n self.patch_size = patch_size\n assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \\\n f\"img_size {img_size} should be divided by patch_size {patch_size}.\"\n self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]\n self.num_patches = self.H * self.W\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n self.norm = nn.LayerNorm(embed_dim)\n\n def forward(self, x):\n B, C, H, W = x.shape\n\n x = self.proj(x).flatten(2).transpose(1, 2)\n x = self.norm(x)\n H, W = H // self.patch_size[0], W // self.patch_size[1]\n\n return x, (H, W)\n\n\nclass PyramidVisionTransformer(nn.Module):\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],\n num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,\n attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,\n depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], Block=None):\n super().__init__()\n self.num_classes = num_classes\n self.depths = depths\n\n # patch_embed\n self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans,\n embed_dim=embed_dims[0])\n self.patch_embed2 = PatchEmbed(img_size=img_size // 4, patch_size=2, in_chans=embed_dims[0],\n embed_dim=embed_dims[1])\n self.patch_embed3 = PatchEmbed(img_size=img_size // 8, patch_size=2, in_chans=embed_dims[1],\n embed_dim=embed_dims[2])\n self.patch_embed4 = PatchEmbed(img_size=img_size // 16, patch_size=2, in_chans=embed_dims[2],\n embed_dim=embed_dims[3])\n\n # pos_embed\n self.pos_embed1 = nn.Parameter(torch.zeros(1, self.patch_embed1.num_patches, embed_dims[0]))\n self.pos_drop1 = nn.Dropout(p=drop_rate)\n self.pos_embed2 = nn.Parameter(torch.zeros(1, self.patch_embed2.num_patches, embed_dims[1]))\n self.pos_drop2 = nn.Dropout(p=drop_rate)\n self.pos_embed3 = nn.Parameter(torch.zeros(1, self.patch_embed3.num_patches, embed_dims[2]))\n self.pos_drop3 = nn.Dropout(p=drop_rate)\n self.pos_embed4 = nn.Parameter(torch.zeros(1, self.patch_embed4.num_patches + 1, embed_dims[3]))\n self.pos_drop4 = nn.Dropout(p=drop_rate)\n\n # transformer encoder\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n cur = 0\n self.block1 = nn.ModuleList([Block(\n dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\n sr_ratio=sr_ratios[0])\n for i in range(depths[0])])\n\n cur += depths[0]\n self.block2 = nn.ModuleList([Block(\n dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\n sr_ratio=sr_ratios[1])\n for i in range(depths[1])])\n\n cur += depths[1]\n self.block3 = nn.ModuleList([Block(\n dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\n sr_ratio=sr_ratios[2])\n for i in range(depths[2])])\n\n cur += depths[2]\n self.block4 = nn.ModuleList([Block(\n dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\n sr_ratio=sr_ratios[3])\n for i in range(depths[3])])\n self.norm = norm_layer(embed_dims[3])\n\n # cls_token\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[3]))\n\n # classification head\n self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()\n\n # init weights\n trunc_normal_(self.pos_embed1, std=.02)\n trunc_normal_(self.pos_embed2, std=.02)\n trunc_normal_(self.pos_embed3, std=.02)\n trunc_normal_(self.pos_embed4, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def reset_drop_path(self, drop_path_rate):\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]\n cur = 0\n for i in range(self.depths[0]):\n self.block1[i].drop_path.drop_prob = dpr[cur + i]\n\n cur += self.depths[0]\n for i in range(self.depths[1]):\n self.block2[i].drop_path.drop_prob = dpr[cur + i]\n\n cur += self.depths[1]\n for i in range(self.depths[2]):\n self.block3[i].drop_path.drop_prob = dpr[cur + i]\n\n cur += self.depths[2]\n for i in range(self.depths[3]):\n self.block4[i].drop_path.drop_prob = dpr[cur + i]\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n # return {'pos_embed', 'cls_token'} # has pos_embed may be better\n return {'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n # def _get_pos_embed(self, pos_embed, patch_embed, H, W):\n # if H * W == self.patch_embed1.num_patches:\n # return pos_embed\n # else:\n # return F.interpolate(\n # pos_embed.reshape(1, patch_embed.H, patch_embed.W, -1).permute(0, 3, 1, 2),\n # size=(H, W), mode=\"bilinear\").reshape(1, -1, H * W).permute(0, 2, 1)\n\n def forward_features(self, x):\n B = x.shape[0]\n\n # stage 1\n x, (H, W) = self.patch_embed1(x)\n x = x + self.pos_embed1\n x = self.pos_drop1(x)\n for blk in self.block1:\n x = blk(x, H, W)\n x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n\n # stage 2\n x, (H, W) = self.patch_embed2(x)\n x = x + self.pos_embed2\n x = self.pos_drop2(x)\n for blk in self.block2:\n x = blk(x, H, W)\n x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n\n # stage 3\n x, (H, W) = self.patch_embed3(x)\n x = x + self.pos_embed3\n x = self.pos_drop3(x)\n for blk in self.block3:\n x = blk(x, H, W)\n x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n\n # stage 4\n x, (H, W) = self.patch_embed4(x)\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n x = x + self.pos_embed4\n x = self.pos_drop4(x)\n for blk in self.block4:\n x = blk(x, H, W)\n\n x = self.norm(x)\n\n return x[:, 0]\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n\n return x\n\n\ndef _conv_filter(state_dict, patch_size=16):\n \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\n out_dict = {}\n for k, v in state_dict.items():\n if 'patch_embed.proj.weight' in k:\n v = v.reshape((v.shape[0], 3, patch_size, patch_size))\n out_dict[k] = v\n\n return out_dict\n\n\n@register_model\ndef pvt_tiny(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],\n **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_small(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], Block=Block, **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_small_sk2ffn(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n Block=Block_sk2ffn, **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_small_sk2ffnx(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n Block=Block_sk2ffnx, **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_small_sk2(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n Block=Block_sk2, **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_small_sk2lnffn(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n Block=Block_sk2lnffn, **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_medium(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],\n **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_large(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1],\n **kwargs)\n model.default_cfg = _cfg()\n\n return model\n\n\n@register_model\ndef pvt_huge_v2(pretrained=False, **kwargs):\n model = PyramidVisionTransformer(\n patch_size=4, embed_dims=[128, 256, 512, 768], num_heads=[2, 4, 8, 12], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 10, 60, 3], sr_ratios=[8, 4, 2, 1],\n # drop_rate=0.0, drop_path_rate=0.02)\n **kwargs)\n model.default_cfg = _cfg()\n\n return model\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.cat",
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mackelab/IdentifyMechanisticModels_2020
|
[
"b93c90ec6156ae5f8afee6aaac7317373e9caf5e"
] |
[
"7_stg/model/dataset_proc/read_data.py"
] |
[
"import numpy as np\nimport experimental_data_utils as edu\nimport pyabf\n\n\nclass reader:\n def __init__(self, filedir):\n \"\"\"\n Initialize reader\n\n :param filedir: string. Relative or absolute path to directory\n \"\"\"\n self.filedir = filedir\n self.PY_spike_times = []\n self.LP_spike_times = []\n self.PD_spike_times = []\n self.PY_binary = []\n self.LP_binary = []\n self.PD_binary = []\n self.num = []\n self.t_PY = []\n self.t_LP = []\n self.t_PD = []\n self.lpn = []\n self.pdn = []\n self.lvn = []\n self.pyn = []\n self.dt = None\n self.sampling_rate = 0.0001\n self.t_max = 150.0\n\n def read_preparation(self, num, case='spike', subtract_PD=False):\n \"\"\"\n Read spike data of one of the two preparations\n\n :param num: string. Either '0000' or '0001'\n :return: reads spike times for all three neurons\n \"\"\"\n self.num = num\n if case=='spike':\n self.read_LP_spikes('828_042_2_LP_spikes_'+num+'.txt')\n self.read_PY_spikes('828_042_2_PY_spikes_'+num+'.txt')\n self.read_PD_spikes('828_042_2_PD_spikes_'+num+'.txt')\n if subtract_PD: self.subtract_PD_from_PY('828_042_2_PD_spikes_'+num+'.txt')\n elif case=='voltage':\n self.read_voltage('828_042_2_' + num + '_raw_trace.txt')\n\n def subtract_PD_from_PY(self, file):\n pass\n\n def binarize_spike_preparation(self, dt=0.001):\n \"\"\"\n Binarize one of the two preparations\n\n :param dt: double, step-size\n :return: time vectors and binarized neuron traces\n \"\"\"\n self.dt = dt\n self.t_PY, self.PY_binary = self.binarize_spike_data(self.PY_spike_times)\n self.t_LP, self.LP_binary = self.binarize_spike_data(self.LP_spike_times)\n self.t_PD, self.PD_binary = self.binarize_spike_data(self.PD_spike_times)\n\n def read_spike_data(self, file):\n \"\"\"\n Read the spike data from a file\n\n :param file: string. Filename\n :return: data: list. Contains spike times\n \"\"\"\n file_string = self.filedir + '/' + file\n\n infile = open(file_string, 'r')\n lines = infile.readlines()\n data = []\n for line in lines:\n try:\n data.append(float(line[:-2]))\n except:\n pass\n data = np.asarray(data)\n return data\n\n def read_voltage(self, file):\n \"\"\"\n Read the voltage data from a file\n\n :param file: string. Filename\n :return: voltage traces. See 828_042_2_NOTES.txt for meaning\n \"\"\"\n file_string = self.filedir + '/' + file\n\n infile = open(file_string, 'r')\n lines = infile.readlines()\n data = []\n last_worked = False\n for line in lines:\n try:\n data_per_neuron.append(float(line[:-2]))\n last_worked = True\n except:\n if last_worked: data.append(data_per_neuron)\n data_per_neuron = []\n last_worked = False\n pass\n self.lpn = np.asarray(data[0])\n self.pdn = np.asarray(data[1])\n self.lvn = np.asarray(data[2])\n self.pyn = np.asarray(data[3])\n\n def binarize_spike_data(self, data):\n \"\"\"\n Take the spike times and put them into a vector of zeros (no spike) and ones (spike)\n\n :param data: list, spike times\n :return: t: time vector\n :return: binarized_data: np.array of zeros (no spike) and ones (spike)\n \"\"\"\n t = np.arange(0, np.max(data), self.dt)\n inds = np.digitize(data, t)\n binarized_data = np.zeros_like(t)\n for ind in inds:\n binarized_data[ind-1] = 1.0\n return t, binarized_data\n\n def normalize_voltage_data(self):\n \"\"\"\n Normalize voltage data\n\n :return: normalized data\n \"\"\"\n self.lpn = (self.lpn - np.mean(self.lpn)) / np.std(self.lpn)\n self.pdn = (self.pdn - np.mean(self.pdn)) / np.std(self.pdn)\n self.lvn = (self.lvn - np.mean(self.lvn)) / np.std(self.lvn)\n self.pyn = (self.pyn - np.mean(self.pyn)) / np.std(self.pyn)\n\n def read_LP_spikes(self, file):\n \"\"\"\n Read data for LP neuron\n\n :param file: string. Filename of the file to be read\n :return: list. Contains LP spike times\n \"\"\"\n self.LP_spike_times = self.read_spike_data(file)\n\n def read_PY_spikes(self, file):\n \"\"\"\n Read data for PY neuron\n\n :param file: string. Filename of the file to be read\n :return: list. Contains PY spike times\n \"\"\"\n self.PY_spike_times = self.read_spike_data(file)\n\n def read_PD_spikes(self, file):\n \"\"\"\n Read data for PD neuron\n\n :param file: string. Filename of the file to be read\n :return: list. Contains PD spike times\n \"\"\"\n self.PD_spike_times = self.read_spike_data(file)\n\n\nclass ABF_reader(reader):\n def __init__(self, filedir):\n self.type = 'abf'\n super().__init__(filedir)\n\n abf = pyabf.ABF(filedir)\n channel_data = abf.data\n\n self.lpn = channel_data[3]\n self.pyn = channel_data[1]\n self.pdn = channel_data[2]\n self.lvn = channel_data[6]\n\n self.dt = 1 / abf.dataRate\n self.t_LP = np.arange(0, len(self.lpn)) * self.dt\n self.t_PY = np.arange(0, len(self.pyn)) * self.dt\n self.t_PD = np.arange(0, len(self.pdn)) * self.dt\n\n def read_LP_spikes(self, file):\n self.LP_binary = edu.calc_spikes(self.lpn, threshold=0.5)\n indizes = np.where(self.LP_binary == 1)[-1]\n self.LP_spike_times = indizes * self.dt\n\n def read_PY_spikes(self, file):\n self.PY_binary = edu.calc_spikes(self.pyn, threshold=10)\n indizes = np.where(self.PY_binary == 1)[-1]\n self.PY_spike_times = indizes * self.dt\n\n def read_PD_spikes(self, file):\n self.PD_binary = edu.calc_spikes(self.pdn, threshold=20)\n indizes = np.where(self.PD_binary == 1)[-1]\n self.PD_spike_times = indizes * self.dt\n\n\n\nclass ABF_reader_016(reader):\n def __init__(self, filedir):\n self.type = 'abf'\n super().__init__(filedir)\n\n abf = pyabf.ABF(filedir)\n channel_data = abf.data\n\n self.lpn = channel_data[1]\n self.pyn = channel_data[5]\n self.pdn = channel_data[4]\n self.lvn = channel_data[3]\n\n self.dt = 1 / abf.dataRate\n self.t_LP = np.arange(0, len(self.lpn)) * self.dt\n self.t_PY = np.arange(0, len(self.pyn)) * self.dt\n self.t_PD = np.arange(0, len(self.pdn)) * self.dt\n\n def read_LP_spikes(self, file):\n self.LP_binary = edu.calc_spikes(self.lpn, threshold=500)\n indizes = np.where(self.LP_binary == 1)[-1]\n self.LP_spike_times = indizes * self.dt\n\n def read_PY_spikes(self, file):\n self.PY_binary = edu.calc_spikes(self.pyn, threshold=4.1)\n indizes = np.where(self.PY_binary == 1)[-1]\n self.PY_spike_times = indizes * self.dt\n\n def read_PD_spikes(self, file):\n self.PD_binary = edu.calc_spikes(self.pdn, threshold=50)\n indizes = np.where(self.PD_binary == 1)[-1]\n self.PD_spike_times = indizes * self.dt\n\n def subtract_PD_from_PY(self, file):\n print(1/self.dt)\n self.PY_binary = edu.subtract_PDfromPY(self.PY_binary, self.PD_binary, vicinity=5, sampling_frequency=1/self.dt)\n indizes = np.where(self.PY_binary == 1)[-1]\n self.PY_spike_times = indizes * self.dt\n\n\nclass ABF_reader_078(reader):\n def __init__(self, filedir):\n self.type = 'abf'\n super().__init__(filedir)\n\n abf = pyabf.ABF(filedir)\n channel_data = abf.data\n\n self.lpn = channel_data[6]\n self.pyn = channel_data[5]\n self.pdn = channel_data[7]\n self.lvn = channel_data[4]\n\n self.dt = 1 / abf.dataRate\n self.t_LP = np.arange(0, len(self.lpn)) * self.dt\n self.t_PY = np.arange(0, len(self.pyn)) * self.dt\n self.t_PD = np.arange(0, len(self.pdn)) * self.dt\n\n def read_LP_spikes(self, file):\n self.LP_binary = edu.calc_spikes(self.lpn, threshold=50)\n indizes = np.where(self.LP_binary == 1)[-1]\n self.LP_spike_times = indizes * self.dt\n\n def read_PY_spikes(self, file):\n self.PY_binary = edu.calc_spikes(self.pyn, threshold=3)\n indizes = np.where(self.PY_binary == 1)[-1]\n self.PY_spike_times = indizes * self.dt\n\n def read_PD_spikes(self, file):\n self.PD_binary = edu.calc_spikes(self.pdn, threshold=.05)\n indizes = np.where(self.PD_binary == 1)[-1]\n self.PD_spike_times = indizes * self.dt\n\n def subtract_PD_from_PY(self, file):\n print(1/self.dt)\n self.PY_binary = edu.subtract_PDfromPY(self.PY_binary, self.PD_binary, vicinity=5, sampling_frequency=1/self.dt)\n indizes = np.where(self.PY_binary == 1)[-1]\n self.PY_spike_times = indizes * self.dt\n"
] |
[
[
"numpy.asarray",
"numpy.max",
"numpy.std",
"numpy.zeros_like",
"numpy.mean",
"numpy.digitize",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Razorbill-Instruments/razorbill-lab-python
|
[
"f1a8ae3727b16ab82dd95a802975c44c7aa32328"
] |
[
"instruments/rigol.py"
] |
[
"#\n# Copyright 2016-2021 Razorbill Instruments Ltd.\n# This file is part of the Razorbill Lab Python library which is\n# available under the MIT licence - see the LICENCE file for more.\n#\n\"\"\"\nModule for interfacing with Rigol Instruments\n\"\"\"\n\nfrom . import ScpiInstrument, ChildInstrument, _scpi_property, _logger\nimport matplotlib.pyplot as plt\nimport time\n\nclass _Ds1000_Channel(ChildInstrument):\n \"\"\"Input channel on a DS1000 series scope\"\"\"\n enable = _scpi_property(':CHAN{subaddr:}:DISP', '{:bool}')\n vert_offset = _scpi_property(':CHAN{subaddr:}:OFFS', '{:g}')\n vert_scale = _scpi_property(':CHAN{subaddr:}:SCAL', '{:g}')\n vert_vernier = _scpi_property(':CHAN{subaddr:}:VERN', '{:bool}')\n\nclass Ds1000(ScpiInstrument):\n \"\"\"DS1054 and related scopes\"\"\"\n def _setup(self):\n self.channels = {1: _Ds1000_Channel(self, 1), \n 2: _Ds1000_Channel(self, 2),\n 3: _Ds1000_Channel(self, 3),\n 4: _Ds1000_Channel(self, 4)}\n \n _idnstring = 'RIGOL TECHNOLOGIES,DS1'\n \n samp_rate = _scpi_property('ACQ:SRAT', '{:g}', can_set=False)\n memory_depth = _scpi_property('ACQ:MDEP', '{:g}', can_set=False)\n horiz_posn = _scpi_property(':TIM:OFFS', '{:g}', doc=\"Horisontal position in sec. Positive moves trigger point left.\")\n horiz_scale = _scpi_property(':TIM:SCAL', '{:g}', doc=\"Horisontal scale, in sec/div. Rounds up.\")\n trig_edge_level = _scpi_property(':TRIG:EDGE:LEV', '{:g}')\n waveform_xincrement = _scpi_property(':WAV:XINC', '{:g}', can_set=False)\n \n \n \n \n def run(self):\n \"\"\"Start acquring, same as run button the the front\"\"\"\n self.raw_write(':RUN')\n \n def single(self):\n \"\"\"Start acquring, same as single button the the front\"\"\"\n self.raw_write(':SING')\n \n def stop(self):\n \"\"\"Stop the scope, use after a run() command\"\"\"\n self.raw_write(':STOP')\n \n def _read_waveform_chunk(self, start, stop):\n time.sleep(0.01)\n self.raw_write(f':WAV:STAR {start}')\n self.raw_write(f':WAV:STOP {stop}')\n time.sleep(0.01)\n self.raw_write(':WAV:DATA?')\n time.sleep(0.05)\n resp = self.raw_read()\n data_string = resp[11:] # strip header\n return [float(i) for i in data_string.split(',')]\n \n def _read_waveform(self, channel, depth=None, chunk_size=100_000):\n # chunk size > 589792 casues empty response. Chunks size > 100k sometimes odd?\n _logger.info(f\"Reading channel {channel} waveform from scope.\")\n for attempt in range(3):\n failed = False\n try:\n with self.lock:\n self.stop()\n self.raw_write(f':WAV:SOUR CHAN{channel}')\n self.raw_write(':WAV:MODE RAW')\n self.raw_write(':WAV:FORM ASC')\n time.sleep(0.2)\n if depth == None:\n depth = self.memory_depth\n depth = int(depth)\n num_batches = depth // chunk_size\n data = []\n for batch in range(num_batches):\n start = batch * chunk_size + 1\n end = batch * chunk_size + chunk_size\n _logger.debug(f'reading channel {channel}: batch {batch + 1} / {num_batches} points {start}:{end}')\n data += self._read_waveform_chunk(start, end)\n time.sleep(0.2)\n if depth % chunk_size:\n _logger.debug(f'reading channel {channel}: tail')\n data += self._read_waveform_chunk(num_batches * chunk_size + 1, depth)\n time.sleep(0.2)\n return data\n except Exception as e:\n failed = True\n if attempt < 2:\n _logger.warning(f\"Failed to read from scope, trying again: {e}\")\n else:\n _logger.error(f\"Failed to read from scope, giving up: {e}\")\n raise(e)\n if not failed:\n break\n \n def read_waveforms(self, channels=[1,2,3,4], depth=None, plot=False):\n \"\"\"Read waveforms from scope.\n \n Scope must have been triggered and be displaying a waveform. Returns an\n numpy.array where hte forst column is time and the other columns are the\n voltages of the channels in the channel list. Id depth is not None,\n only the first that many points are read.\n \"\"\"\n with self.lock:\n xinc = self.waveform_xincrement\n data = [None]\n for ch in channels:\n data.append(self._read_waveform(ch, depth))\n x = list(range(len(data[1])))\n data[0] = [i * xinc for i in x]\n if plot:\n fig = plt.figure('Scope Read')\n fig.clear()\n ax = fig.add_subplot(111)\n for ix,ch in enumerate(channels):\n ax.plot(data[0], data[ch+1], label=f'Channel {ch}')\n ax.set_xlabel('Time [s]')\n ax.set_ylabel('Voltage [V]')\n return data\n "
] |
[
[
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
phorvicheka-python/BERTopic
|
[
"7813a4552c9abc3337726e0d37596900e30410db"
] |
[
"bertopic/plotting/_heatmap.py"
] |
[
"import numpy as np\nfrom typing import List\nfrom scipy.cluster.hierarchy import fcluster, linkage\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n\ndef visualize_heatmap(topic_model,\n topics: List[int] = None,\n top_n_topics: int = None,\n n_clusters: int = None,\n width: int = 800,\n height: int = 800) -> go.Figure:\n \"\"\" Visualize a heatmap of the topic's similarity matrix\n\n Based on the cosine similarity matrix between topic embeddings,\n a heatmap is created showing the similarity between topics.\n\n Arguments:\n topic_model: A fitted BERTopic instance.\n topics: A selection of topics to visualize.\n top_n_topics: Only select the top n most frequent topics.\n n_clusters: Create n clusters and order the similarity\n matrix by those clusters.\n width: The width of the figure.\n height: The height of the figure.\n\n Returns:\n fig: A plotly figure\n\n Usage:\n\n To visualize the similarity matrix of\n topics simply run:\n\n ```python\n topic_model.visualize_heatmap()\n ```\n\n Or if you want to save the resulting figure:\n\n ```python\n fig = topic_model.visualize_heatmap()\n fig.write_html(\"path/to/file.html\")\n ```\n <iframe src=\"../../getting_started/visualization/heatmap.html\"\n style=\"width:1000px; height: 720px; border: 0px;\"\"></iframe>\n \"\"\"\n\n # Select topic embeddings\n if topic_model.topic_embeddings is not None:\n embeddings = np.array(topic_model.topic_embeddings)\n else:\n embeddings = topic_model.c_tf_idf\n\n # Select topics based on top_n and topics args\n if topics is not None:\n topics = list(topics)\n elif top_n_topics is not None:\n topics = sorted(topic_model.get_topic_freq().Topic.to_list()[1:top_n_topics + 1])\n else:\n topics = sorted(list(topic_model.get_topics().keys()))\n\n # Order heatmap by similar clusters of topics\n if n_clusters:\n if n_clusters >= len(set(topics)):\n raise ValueError(\"Make sure to set `n_clusters` lower than \"\n \"the total number of unique topics.\")\n\n embeddings = embeddings[[topic + 1 for topic in topics]]\n distance_matrix = cosine_similarity(embeddings)\n Z = linkage(distance_matrix, 'ward')\n clusters = fcluster(Z, t=n_clusters, criterion='maxclust')\n\n # Extract new order of topics\n mapping = {cluster: [] for cluster in clusters}\n for topic, cluster in zip(topics, clusters):\n mapping[cluster].append(topic)\n mapping = [cluster for cluster in mapping.values()]\n sorted_topics = [topic for cluster in mapping for topic in cluster]\n else:\n sorted_topics = topics\n\n # Select embeddings\n indices = np.array([topics.index(topic) for topic in sorted_topics])\n embeddings = embeddings[indices]\n distance_matrix = cosine_similarity(embeddings)\n\n # Create nicer labels\n new_labels = [[[str(topic), None]] + topic_model.get_topic(topic) for topic in sorted_topics]\n # KK_EDITED\n new_labels = [\"_\".join([label[0] for label in labels[:9]]) for labels in new_labels]\n # new_labels = [label if len(label) < 30 else label[:27] + \"...\" for label in new_labels]\n\n fig = px.imshow(distance_matrix,\n labels=dict(color=\"Similarity Score\"),\n x=new_labels,\n y=new_labels,\n color_continuous_scale='GnBu'\n )\n\n fig.update_layout(\n title={\n 'text': \"<b>Similarity Matrix\",\n 'y': .95,\n 'x': 0.55,\n 'xanchor': 'center',\n 'yanchor': 'top',\n 'font': dict(\n size=22,\n color=\"Black\")\n },\n width=width,\n height=height,\n hoverlabel=dict(\n bgcolor=\"white\",\n font_size=16,\n font_family=\"Rockwell\"\n ),\n )\n fig.update_layout(showlegend=True)\n fig.update_layout(legend_title_text='Trend')\n\n return fig\n"
] |
[
[
"scipy.cluster.hierarchy.linkage",
"numpy.array",
"sklearn.metrics.pairwise.cosine_similarity",
"scipy.cluster.hierarchy.fcluster"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
dajuno/tikzplotlib
|
[
"bf62ddab7c30054616b17cced02eca46ac58412e",
"bf62ddab7c30054616b17cced02eca46ac58412e",
"bf62ddab7c30054616b17cced02eca46ac58412e",
"bf62ddab7c30054616b17cced02eca46ac58412e"
] |
[
"test/test_subplots.py",
"tikzplotlib/_text.py",
"test/test_errorbar.py",
"test/test_axvline.py"
] |
[
"from helpers import assert_equality\n\n\ndef plot():\n import numpy as np\n from matplotlib import pyplot as plt\n\n def f(t):\n s1 = np.cos(2 * np.pi * t)\n e1 = np.exp(-t)\n return np.multiply(s1, e1)\n\n fig = plt.figure()\n\n t1 = np.arange(0.0, 5.0, 0.4)\n t2 = np.arange(0.0, 5.0, 0.1)\n t3 = np.arange(0.0, 2.0, 0.1)\n\n plt.subplot(211)\n plt.plot(t1, f(t1), \"bo\", t2, f(t2), \"k--\", markerfacecolor=\"green\")\n plt.grid(True)\n plt.title(\"A tale of 2 subplots\")\n plt.ylabel(\"Damped oscillation\")\n\n plt.subplot(212)\n plt.plot(t3, np.cos(2 * np.pi * t3), \"r.\")\n plt.grid(True)\n plt.xlabel(\"time (s)\")\n plt.ylabel(\"Undamped\")\n\n fig.suptitle(\"PLOT TITLE\", fontsize=18, fontweight=\"bold\")\n\n return fig\n\n\ndef test():\n assert_equality(plot, __file__[:-3] + \"_reference.tex\")\n return\n",
"import matplotlib as mpl\nfrom matplotlib.patches import ArrowStyle\n\nfrom . import _color\n\n\ndef draw_text(data, obj):\n \"\"\"Paints text on the graph.\"\"\"\n content = []\n properties = []\n style = []\n ff = data[\"float format\"]\n if isinstance(obj, mpl.text.Annotation):\n pos = _annotation(obj, data, content)\n else:\n pos = obj.get_position()\n\n if isinstance(pos, str):\n tikz_pos = pos\n else:\n # from .util import transform_to_data_coordinates\n # pos = transform_to_data_coordinates(obj, *pos)\n\n if obj.axes:\n # If the coordinates are relative to an axis, use `axis cs`.\n tikz_pos = f\"(axis cs:{pos[0]:{ff}},{pos[1]:{ff}})\"\n else:\n # relative to the entire figure, it's a getting a littler harder. See\n # <http://tex.stackexchange.com/a/274902/13262> for a solution to the\n # problem:\n tikz_pos = (\n f\"({{$(current bounding box.south west)!{pos[0]:{ff}}!\"\n \"(current bounding box.south east)$}\"\n \"|-\"\n f\"{{$(current bounding box.south west)!{pos[1]:{ff}}!\"\n \"(current bounding box.north west)$})\"\n )\n\n text = obj.get_text()\n\n if text in [\"\", data[\"current axis title\"]]:\n # Text nodes which are direct children of Axes are typically titles. They are\n # already captured by the `title` property of pgfplots axes, so skip them here.\n return data, content\n\n size = obj.get_size()\n bbox = obj.get_bbox_patch()\n converter = mpl.colors.ColorConverter()\n # without the factor 0.5, the fonts are too big most of the time.\n # TODO fix this\n scaling = 0.5 * size / data[\"font size\"]\n if scaling != 1.0:\n properties.append(f\"scale={scaling:{ff}}\")\n\n if bbox is not None:\n _bbox(bbox, data, properties, scaling)\n\n ha = obj.get_ha()\n va = obj.get_va()\n anchor = _transform_positioning(ha, va)\n if anchor is not None:\n properties.append(anchor)\n data, col, _ = _color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color()))\n properties.append(f\"text={col}\")\n properties.append(f\"rotate={obj.get_rotation():.1f}\")\n\n if obj.get_style() == \"italic\":\n style.append(\"\\\\itshape\")\n else:\n assert obj.get_style() == \"normal\"\n\n # From matplotlib/font_manager.py:\n # weight_dict = {\n # 'ultralight' : 100,\n # 'light' : 200,\n # 'normal' : 400,\n # 'regular' : 400,\n # 'book' : 400,\n # 'medium' : 500,\n # 'roman' : 500,\n # 'semibold' : 600,\n # 'demibold' : 600,\n # 'demi' : 600,\n # 'bold' : 700,\n # 'heavy' : 800,\n # 'extra bold' : 800,\n # 'black' : 900}\n #\n # get_weights returns a numeric value in the range 0-1000 or one of\n # ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’,\n # ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’\n weight = obj.get_weight()\n if (\n weight\n in [\n \"semibold\",\n \"demibold\",\n \"demi\",\n \"bold\",\n \"heavy\",\n \"extra bold\",\n \"black\",\n ]\n or (isinstance(weight, int) and weight > 550)\n ):\n style.append(\"\\\\bfseries\")\n\n # \\lfseries isn't that common yet\n # elif weight == 'light' or (isinstance(weight, int) and weight < 300):\n # style.append('\\\\lfseries')\n\n if \"\\n\" in text:\n # http://tex.stackexchange.com/a/124114/13262\n properties.append(f\"align={ha}\")\n # Manipulating the text here is actually against mpl2tikz's policy not\n # to do that. On the other hand, newlines should translate into\n # newlines.\n # We might want to remove this here in the future.\n text = text.replace(\"\\n \", \"\\\\\\\\\")\n\n content.append(\n \"\\\\draw {pos} node[\\n {props}\\n]{{{text}}};\\n\".format(\n pos=tikz_pos, props=\",\\n \".join(properties), text=\" \".join(style + [text])\n )\n )\n return data, content\n\n\ndef _transform_positioning(ha, va):\n \"\"\"Converts matplotlib positioning to pgf node positioning.\n Not quite accurate but the results are equivalent more or less.\"\"\"\n if ha == \"center\" and va == \"center\":\n return None\n\n ha_mpl_to_tikz = {\"right\": \"east\", \"left\": \"west\", \"center\": \"\"}\n va_mpl_to_tikz = {\n \"top\": \"north\",\n \"bottom\": \"south\",\n \"center\": \"\",\n \"baseline\": \"base\",\n }\n return \"anchor={} {}\".format(va_mpl_to_tikz[va], ha_mpl_to_tikz[ha]).strip()\n\n\ndef _parse_annotation_coords(ff, coords, xy):\n \"\"\" Convert a coordinate name and xy into a tikz coordinate string \"\"\"\n # todo: add support for all the missing ones\n if coords == \"data\":\n x, y = xy\n return f\"(axis cs:{x:{ff}},{y:{ff}})\"\n elif coords == \"figure points\":\n raise NotImplementedError\n elif coords == \"figure pixels\":\n raise NotImplementedError\n elif coords == \"figure fraction\":\n raise NotImplementedError\n elif coords == \"axes points\":\n raise NotImplementedError\n elif coords == \"axes pixels\":\n raise NotImplementedError\n elif coords == \"axes fraction\":\n raise NotImplementedError\n elif coords == \"data\":\n raise NotImplementedError\n elif coords == \"polar\":\n raise NotImplementedError\n else:\n # unknown\n raise NotImplementedError\n\n\ndef _get_arrow_style(obj, data):\n # get a style string from a FancyArrowPatch\n arrow_translate = {\n ArrowStyle._style_list[\"-\"]: [\"-\"],\n ArrowStyle._style_list[\"->\"]: [\"->\"],\n ArrowStyle._style_list[\"<-\"]: [\"<-\"],\n ArrowStyle._style_list[\"<->\"]: [\"<->\"],\n ArrowStyle._style_list[\"|-|\"]: [\"|-|\"],\n ArrowStyle._style_list[\"-|>\"]: [\"-latex\"],\n ArrowStyle._style_list[\"<|-\"]: [\"latex-\"],\n ArrowStyle._style_list[\"<|-|>\"]: [\"latex-latex\"],\n ArrowStyle._style_list[\"]-[\"]: [\"|-|\"],\n ArrowStyle._style_list[\"-[\"]: [\"-|\"],\n ArrowStyle._style_list[\"]-\"]: [\"|-\"],\n ArrowStyle._style_list[\"fancy\"]: [\"-latex\", \"very thick\"],\n ArrowStyle._style_list[\"simple\"]: [\"-latex\", \"very thick\"],\n ArrowStyle._style_list[\"wedge\"]: [\"-latex\", \"very thick\"],\n }\n style_cls = type(obj.get_arrowstyle())\n try:\n style = arrow_translate[style_cls]\n except KeyError:\n raise NotImplementedError(f\"Unknown arrow style {style_cls}\")\n else:\n data, col, _ = _color.mpl_color2xcolor(data, obj.get_ec())\n return style + [\"draw=\" + col]\n\n\ndef _annotation(obj, data, content):\n ann_xy = obj.xy\n ann_xycoords = obj.xycoords\n ann_xytext = obj.xyann\n ann_textcoords = obj.anncoords\n\n ff = data[\"float format\"]\n\n try:\n xy_pos = _parse_annotation_coords(ff, ann_xycoords, ann_xy)\n except NotImplementedError:\n # Anything else except for explicit positioning is not supported yet\n return obj.get_position()\n\n # special cases only for text_coords\n if ann_textcoords == \"offset points\":\n x, y = ann_xytext\n unit = \"pt\"\n text_pos = f\"{xy_pos} ++({x:{ff}}{unit},{y:{ff}}{unit})\"\n # elif ann_textcoords == \"offset pixels\":\n # x, y = ann_xytext\n # unit = \"px\"\n # text_pos = f\"{xy_pos} ++({x:{ff}}{unit},{y:{ff}}{unit})\"\n else:\n try:\n text_pos = _parse_annotation_coords(ff, ann_xycoords, ann_xytext)\n except NotImplementedError:\n # Anything else except for explicit positioning is not supported yet\n return obj.get_position()\n\n if obj.arrow_patch:\n style = \",\".join(_get_arrow_style(obj.arrow_patch, data))\n the_arrow = (\"\\\\draw[{}] {} -- {};\\n\").format(style, text_pos, xy_pos)\n content.append(the_arrow)\n return text_pos\n\n\ndef _bbox(bbox, data, properties, scaling):\n bbox_style = bbox.get_boxstyle()\n if bbox.get_fill():\n data, fc, _ = _color.mpl_color2xcolor(data, bbox.get_facecolor())\n if fc:\n properties.append(f\"fill={fc}\")\n data, ec, _ = _color.mpl_color2xcolor(data, bbox.get_edgecolor())\n if ec:\n properties.append(f\"draw={ec}\")\n # XXX: This is ugly, too\n ff = data[\"float format\"]\n line_width = bbox.get_lw() * 0.4\n properties.append(f\"line width={line_width:{ff}}pt\")\n inner_sep = bbox_style.pad * data[\"font size\"]\n properties.append(f\"inner sep={inner_sep:{ff}}pt\")\n if bbox.get_alpha():\n properties.append(f\"fill opacity={bbox.get_alpha()}\")\n # Rounded boxes\n if isinstance(bbox_style, mpl.patches.BoxStyle.Round):\n properties.append(\"rounded corners\")\n elif isinstance(bbox_style, mpl.patches.BoxStyle.RArrow):\n data[\"tikz libs\"].add(\"shapes.arrows\")\n properties.append(\"single arrow\")\n elif isinstance(bbox_style, mpl.patches.BoxStyle.LArrow):\n data[\"tikz libs\"].add(\"shapes.arrows\")\n properties.append(\"single arrow\")\n properties.append(\"shape border rotate=180\")\n elif isinstance(bbox_style, mpl.patches.BoxStyle.DArrow):\n data[\"tikz libs\"].add(\"shapes.arrows\")\n properties.append(\"double arrow\")\n elif isinstance(bbox_style, mpl.patches.BoxStyle.Circle):\n properties.append(\"circle\")\n elif isinstance(bbox_style, mpl.patches.BoxStyle.Roundtooth):\n properties.append(\"decorate\")\n properties.append(\"decoration={snake,amplitude=0.5,segment length=3}\")\n elif isinstance(bbox_style, mpl.patches.BoxStyle.Sawtooth):\n properties.append(\"decorate\")\n properties.append(\"decoration={zigzag,amplitude=0.5,segment length=3}\")\n else:\n # TODO Round4\n assert isinstance(bbox_style, mpl.patches.BoxStyle.Square)\n\n # Line style\n if bbox.get_ls() == \"dotted\":\n properties.append(\"dotted\")\n elif bbox.get_ls() == \"dashed\":\n properties.append(\"dashed\")\n # TODO Check if there is there any way to extract the dashdot\n # pattern from matplotlib instead of hardcoding\n # an approximation?\n elif bbox.get_ls() == \"dashdot\":\n properties.append(\n \"dash pattern=on {:.3g}pt off {:.3g}pt on {:.3g}pt off {:.3g}pt\".format(\n 1.0 / scaling, 3.0 / scaling, 6.0 / scaling, 3.0 / scaling\n )\n )\n else:\n assert bbox.get_ls() == \"solid\"\n\n return\n",
"import matplotlib.pyplot as plt\r\nfrom helpers import assert_equality\r\n\r\n\r\ndef plot():\r\n # plot data\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n\r\n x = [7.14, 7.36, 7.47, 7.52]\r\n y = [3.3, 4.4, 8.8, 5.5]\r\n ystd = [0.1, 0.5, 0.8, 0.3]\r\n\r\n ax.errorbar(x, y, yerr=ystd)\r\n return fig\r\n\r\n\r\ndef test():\r\n assert_equality(plot, \"test_errorbar_reference.tex\")\r\n",
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom helpers import assert_equality\n\n\ndef plot():\n fig = plt.figure()\n np.random.seed(123)\n s = np.random.normal(0, 1, 10)\n plt.gca().set_ylim(-1.0, +1.0)\n plt.hist(s, 30)\n plt.axvline(1.96)\n return fig\n\n\ndef test():\n assert_equality(plot, __file__[:-3] + \"_reference.tex\")\n return\n\n\nif __name__ == \"__main__\":\n import helpers\n\n helpers.compare_mpl_tex(plot)\n # helpers.print_tree(plot())\n"
] |
[
[
"matplotlib.pyplot.title",
"numpy.multiply",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"matplotlib.pyplot.figure"
],
[
"matplotlib.colors.ColorConverter"
],
[
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axvline",
"numpy.random.seed",
"numpy.random.normal",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edbons/faiky-tails
|
[
"8aeae9d1335eff2ca87dfd528191baa45f73e8d6"
] |
[
"src/model/pipeline.py"
] |
[
"import copy\nimport datetime\nimport random\nimport traceback\nimport logging\nimport os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\nlogger = logging.getLogger('pipeline')\nlogger.setLevel(logging.INFO)\n\n\ndef init_random_seed(value=0):\n random.seed(value)\n np.random.seed(value)\n torch.manual_seed(value)\n torch.cuda.manual_seed(value)\n torch.backends.cudnn.deterministic = True\n\n\ndef copy_data_to_device(data, device):\n if torch.is_tensor(data):\n return data.to(device)\n elif isinstance(data, (list, tuple)):\n return [copy_data_to_device(elem, device) for elem in data]\n elif isinstance(data, dict):\n return {k: copy_data_to_device(elem, device) for k, elem in data.items()}\n raise ValueError('Недопустимый тип данных {}'.format(type(data)))\n\n\ndef train_eval_loop(model, train_dataset, val_dataset, \n lr=1e-4, epoch_n=10, batch_size=32,\n device=None, early_stopping_patience=5, l2_reg_alpha=0,\n max_batches_per_epoch_train=10000,\n max_batches_per_epoch_val=1000,\n data_loader_ctor=DataLoader,\n optimizer_ctor=None,\n lr_scheduler_ctor=None,\n shuffle_train=True,\n dataloader_workers_n=0,\n log_dir=None):\n \"\"\"\n Цикл для обучения модели. После каждой эпохи качество модели оценивается по отложенной выборке.\n :param model: torch.nn.Module - обучаемая модель\n :param train_dataset: torch.utils.data.Dataset - данные для обучения\n :param val_dataset: torch.utils.data.Dataset - данные для оценки качества\n :param criterion: функция потерь для настройки модели\n :param lr: скорость обучения\n :param epoch_n: максимальное количество эпох\n :param batch_size: количество примеров, обрабатываемых моделью за одну итерацию\n :param device: cuda/cpu - устройство, на котором выполнять вычисления\n :param early_stopping_patience: наибольшее количество эпох, в течение которых допускается\n отсутствие улучшения модели, чтобы обучение продолжалось.\n :param l2_reg_alpha: коэффициент L2-регуляризации\n :param max_batches_per_epoch_train: максимальное количество итераций на одну эпоху обучения\n :param max_batches_per_epoch_val: максимальное количество итераций на одну эпоху валидации\n :param data_loader_ctor: функция для создания объекта, преобразующего датасет в батчи\n (по умолчанию torch.utils.data.DataLoader)\n :return: кортеж из двух элементов:\n - среднее значение функции потерь на валидации на лучшей эпохе\n - лучшая модель\n \"\"\"\n enable_logs = False\n if log_dir is not None:\n fh = logging.FileHandler(os.path.join(log_dir, 'train.log'), encoding='utf-8')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n enable_logs = True\n \n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = torch.device(device)\n model.to(device)\n\n if optimizer_ctor is None:\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_reg_alpha)\n else:\n optimizer = optimizer_ctor(model.parameters(), lr=lr)\n\n if lr_scheduler_ctor is not None:\n lr_scheduler = lr_scheduler_ctor(optimizer)\n else:\n lr_scheduler = None\n\n train_dataloader = data_loader_ctor(train_dataset, batch_size=batch_size, shuffle=shuffle_train,\n num_workers=dataloader_workers_n)\n val_dataloader = data_loader_ctor(val_dataset, batch_size=batch_size, shuffle=False,\n num_workers=dataloader_workers_n)\n\n best_val_loss = float('inf')\n best_epoch_i = 0\n best_model = copy.deepcopy(model)\n\n for epoch_i in range(epoch_n):\n try:\n epoch_start = datetime.datetime.now()\n print('Эпоха {}/{}'.format(epoch_i + 1, epoch_n))\n if enable_logs:\n logger.info('Эпоха {}/{}'.format(epoch_i + 1, epoch_n))\n\n model.train()\n mean_train_loss = 0\n train_batches_n = 0\n for batch_i, batch in enumerate(tqdm(train_dataloader)):\n if batch_i > max_batches_per_epoch_train:\n break\n \n batch = copy_data_to_device(batch, device)\n \n pred = model(**batch)\n loss, _ = pred[:2]\n\n model.zero_grad()\n loss.backward()\n\n optimizer.step()\n\n mean_train_loss += float(loss)\n train_batches_n += 1\n\n mean_train_loss /= train_batches_n\n print('Эпоха: {} итераций, {:0.2f} сек'.format(train_batches_n,\n (datetime.datetime.now() - epoch_start).total_seconds()))\n print('Среднее значение функции потерь на обучении', mean_train_loss)\n if enable_logs:\n logger.info('Эпоха: {} итераций, {:0.2f} сек'.format(train_batches_n,\n (datetime.datetime.now() - epoch_start).total_seconds()))\n logger.info('Среднее значение функции потерь на обучении {}'.format(mean_train_loss))\n\n model.eval()\n mean_val_loss = 0\n val_batches_n = 0\n\n with torch.no_grad():\n for batch_i, batch in enumerate(tqdm(val_dataloader)):\n if batch_i > max_batches_per_epoch_val:\n break\n \n batch = copy_data_to_device(batch, device)\n pred = model(**batch)\n\n loss, _ = pred[:2]\n\n mean_val_loss += float(loss)\n val_batches_n += 1\n\n mean_val_loss /= val_batches_n\n print('Среднее значение функции потерь на валидации {}'.format(mean_val_loss))\n if enable_logs:\n logger.info('Среднее значение функции потерь на валидации {}'.format(mean_val_loss))\n\n if mean_val_loss < best_val_loss:\n best_epoch_i = epoch_i\n best_val_loss = mean_val_loss\n best_model = copy.deepcopy(model)\n print('Новая лучшая модель!')\n elif epoch_i - best_epoch_i > early_stopping_patience:\n print('Модель не улучшилась за последние {} эпох, прекращаем обучение'.format(\n early_stopping_patience))\n if enable_logs:\n logger.info('Модель не улучшилась за последние {} эпох, прекращаем обучение'.format(\n early_stopping_patience))\n break\n\n if lr_scheduler is not None:\n lr_scheduler.step(mean_val_loss)\n\n print()\n except KeyboardInterrupt:\n print('Досрочно остановлено пользователем')\n if enable_logs:\n logger.info('Досрочно остановлено пользователем')\n break\n except Exception as ex:\n print('Ошибка при обучении: {}\\n{}'.format(ex, traceback.format_exc()))\n if enable_logs:\n logger.info('Ошибка при обучении: {}\\n{}'.format(ex, traceback.format_exc()))\n break\n\n return best_val_loss, best_model\n"
] |
[
[
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.is_tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Andy-Grigg/surveying_problem
|
[
"d8485d2294b3c6701e798aa8e12674b1cc268627"
] |
[
"src/doe.py"
] |
[
"\"\"\" Main module for surveying solution.\nCalls each separate solution and summarizes results and performance \"\"\"\n\nimport time\nimport sys\nimport pandas as pd\nimport pyjion\nfrom itertools import product\nfrom typing import Type, TYPE_CHECKING\n\nfrom orchestrator import GridOrchestrator\n\nif TYPE_CHECKING:\n from view import GridView\n\nsys.setrecursionlimit(1000000)\n\n\nclass DesignOfExperiments:\n def __init__(\n self,\n grid_sizes: list[int],\n location_probabilities: list[float],\n model_types: list,\n pyjion_state: list[bool],\n ):\n\n self.grid_sizes = grid_sizes\n self.location_probabilities = location_probabilities\n self.model_types = model_types\n self.pyjion_state = pyjion_state\n\n def run(self) -> pd.DataFrame:\n results = []\n\n for grid_size, location_probability, model_type, pyjion_state in product(\n self.grid_sizes,\n self.location_probabilities,\n self.model_types,\n self.pyjion_state,\n ):\n print(\"*\" * 30)\n print(\n f\"Grid size: {grid_size}, Probability: {location_probability}, \"\n f\"Model Type: {model_type}, Pyjion: {pyjion_state}\"\n )\n self._set_pyjion(pyjion_state)\n grid = GridOrchestrator.get_grid_view_with_parameters(\n grid_size=grid_size,\n location_probability=location_probability,\n model_type=model_type,\n )\n row_dict = self._run_method(grid, model_type)\n results.append(row_dict)\n return pd.DataFrame(results)\n\n @staticmethod\n def _set_pyjion(pyjion_state: bool):\n if pyjion_state:\n pyjion.enable()\n pyjion.config(pgc=False)\n else:\n pyjion.disable()\n\n @staticmethod\n def _run_method(grid: \"GridView\", model_type: Type) -> dict[str, int | float | str]:\n print(f\"-\" * 20)\n print(f\"Method: {grid}\")\n sim_run = SimulationRun(grid)\n sim_run.execute()\n sim_run.print_grid()\n sim_run.print_results()\n row_dict = {\n \"Grid Size\": grid.size,\n \"Probability\": grid.location_probability,\n \"Number of Sites\": grid.number_of_sites,\n \"Method\": model_type,\n \"Number of Wells\": sim_run.number_of_wells,\n \"Time\": sim_run.time_to_run,\n }\n return row_dict\n\n\nclass SimulationRun:\n def __init__(self, grid: \"GridView\"):\n self.grid = grid\n self.wells = None\n self._time_taken = None\n\n @property\n def number_of_wells(self) -> int | None:\n if self.wells is None:\n return None\n return len(self.wells)\n\n @property\n def time_to_run(self) -> float:\n return self._time_taken\n\n @property\n def average_well_size(self) -> float | None:\n if self.number_of_wells is None:\n return None\n if self.number_of_wells == 0:\n return 0\n return sum([len(well) for well in self.wells]) / self.number_of_wells\n\n def execute(self):\n \"\"\"Main module for surveying solution.\n grid_size: One-dimensional size of the grid to be used for evaluation\"\"\"\n\n start = time.perf_counter()\n self.wells = self.grid.get_reservoirs()\n stop = time.perf_counter()\n self._time_taken = stop - start\n\n def print_grid(self):\n result = self.grid.to_ascii_art()\n print(result)\n\n def print_results(self):\n if self.wells is None:\n return\n\n print(\"Results...\")\n if self.number_of_wells < 10:\n self._print_reservoir_details()\n print(f\"Average well size: {self.average_well_size}\")\n print(f\"Number of wells needed: {self.number_of_wells}\")\n print(f\"Time to run: {self.time_to_run}\")\n\n def _print_reservoir_details(self):\n if self.number_of_wells < 10:\n for reservoir in self.wells:\n reservoir = list(reservoir)\n reservoir.sort()\n reservoir_locations = \"; \".join(map(str, reservoir))\n print(f\"Well size = {len(reservoir)}, locations: {reservoir_locations}\")\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zcyang/tensorflow
|
[
"b327647368117db19b42c6fd75d0aa67a66dbf26"
] |
[
"tensorflow/python/ops/array_ops.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"## Casting\n\nTensorFlow provides several operations that you can use to cast tensor data\ntypes in your graph.\n\n@@string_to_number\n@@to_double\n@@to_float\n@@to_bfloat16\n@@to_int32\n@@to_int64\n@@cast\n@@saturate_cast\n\n## Shapes and Shaping\n\nTensorFlow provides several operations that you can use to determine the shape\nof a tensor and change the shape of a tensor.\n\n@@shape\n@@size\n@@rank\n@@reshape\n@@squeeze\n@@expand_dims\n@@meshgrid\n\n## Slicing and Joining\n\nTensorFlow provides several operations to slice or extract parts of a tensor,\nor join multiple tensors together.\n\n@@slice\n@@split\n@@tile\n@@pad\n@@concat\n@@pack\n@@unpack\n@@reverse_sequence\n@@reverse\n@@transpose\n@@extract_image_patches\n@@space_to_batch\n@@batch_to_space\n@@space_to_depth\n@@depth_to_space\n@@gather\n@@gather_nd\n@@dynamic_partition\n@@dynamic_stitch\n@@boolean_mask\n@@one_hot\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\n\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\n# 'Constant' gets imported in the module 'array_ops'.\nfrom tensorflow.python.framework.constant_op import constant\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import logging_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_array_ops import *\n# pylint: enable=wildcard-import\n\n\n# Used for slicing to specify a new 1 size dimension\nnewaxis = None\n\n# We override the 'slice' for the \"slice\" op, so we keep python's\n# existing 'slice' for later use in this module.\n_baseslice = slice\n\n\n# Aliases for some automatically-generated names.\nlistdiff = gen_array_ops.list_diff\n\n\ndef shape(input, name=None):\n \"\"\"Returns the shape of a tensor.\n\n This operation returns a 1-D integer tensor representing the shape of `input`.\n\n For example:\n\n ```python\n # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\n shape(t) ==> [2, 2, 3]\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n \"\"\"\n with ops.op_scope([input], name, \"Shape\") as name:\n if isinstance(input, ops.SparseTensor):\n return input.shape\n else:\n return gen_array_ops.shape(input, name=name)\n\n\ndef rank(input, name=None):\n \"\"\"Returns the rank of a tensor.\n\n This operation returns an integer representing the rank of `input`.\n\n For example:\n\n ```python\n # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\n # shape of tensor 't' is [2, 2, 3]\n rank(t) ==> 3\n ```\n\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The\n rank of a tensor is the number of indices required to uniquely select each\n element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n \"\"\"\n with ops.op_scope([input], name, \"Rank\") as name:\n if isinstance(input, ops.SparseTensor):\n return gen_array_ops.size(input.shape, name=name)\n else:\n return gen_array_ops.rank(input, name=name)\n\n\n# DEPRECATED use init_ops.zeros_initializer\n# TODO(irving) Move it to init_ops.py\ndef zeros_initializer(shape, dtype=dtypes.float32):\n \"\"\"An adaptor for zeros() to match the Initializer spec.\"\"\"\n return zeros(shape, dtype)\n\n\ndef _NewSliceHelper(tensor, slice_spec):\n \"\"\"Overload for Tensor.__getitem__.\n\n This operation extracts the specified region from the tensor.\n The notation is similar to numpy with the restriction that\n currently only support basic indexing. That means that\n using a tensor as input is not currently allowed\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, or Ellipsis.\n \"\"\"\n\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n\n begin, end, strides = [], [], []\n index = 0\n\n new_axis_mask, shrink_axis_mask = 0, 0\n begin_mask, end_mask = 0, 0\n ellipse_mask = 0\n for s in slice_spec:\n if isinstance(s, _baseslice):\n strides.append(s.step if s.step is not None else 1)\n # python doesn't always use None when constructing ranges\n # for example a[:] gives slice(None,sys.maxsize,None)\n # whereas a[::1] gives slice(None,None,None)\n if s.start is not None and s.start is not sys.maxsize:\n begin.append(s.start)\n else:\n begin.append(0)\n begin_mask |= (1 << index)\n if s.stop is not None and s.stop != sys.maxsize:\n end.append(s.stop)\n else:\n end.append(0)\n end_mask |= (1 << index)\n elif s is Ellipsis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n ellipse_mask |= (1 << index)\n elif s is newaxis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n new_axis_mask |= (1 << index)\n else:\n try:\n s = int(s)\n except TypeError:\n raise TypeError(\"Bad slice index %s of type %s\" % (s, type(s)))\n\n begin.append(s)\n end.append(s + 1)\n strides.append(1)\n shrink_axis_mask |= (1 << index)\n index += 1\n\n return strided_slice(tensor,\n pack(begin),\n pack(end),\n pack(strides),\n begin_mask=begin_mask,\n end_mask=end_mask,\n shrink_axis_mask=shrink_axis_mask,\n new_axis_mask=new_axis_mask,\n ellipse_mask=ellipse_mask)\n\n\n# pylint: disable=undefined-variable,protected-access\ndef _SliceHelper(tensor, slice_spec):\n \"\"\"Overload for Tensor.__getitem__.\n\n Currently the size of the slice must be statically known in each dimension,\n i.e. the \"stop\" of the slice must not be omitted.\n\n TODO(mrry): Support slices where the sizes are not specified.\n TODO(mrry): Support negative indices in slices with numpy/Python semantics.\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, or Ellipsis.\n \"\"\"\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n indices = []\n sizes = []\n squeeze_dims = []\n for dim, s in enumerate(slice_spec):\n if isinstance(s, _baseslice):\n if s.step not in (None, 1):\n raise NotImplementedError(\n \"Steps other than 1 are not currently supported\")\n start = s.start if s.start is not None else 0\n if start < 0:\n raise NotImplementedError(\n \"Negative start indices are not currently supported\")\n indices.append(start)\n if s.stop is not None and s.stop < 0:\n raise NotImplementedError(\n \"Negative stop indices are not currently supported\")\n # NOTE(mrry): If the stop is not specified, Python substitutes\n # sys.maxsize, which is typically (2 ** 63) - 1. Since Slice currently\n # supports signed DT_INT32 arguments, we use -1 to specify that all\n # elements should be captured.\n if s.stop is None or s.stop == sys.maxsize:\n sizes.append(-1)\n else:\n if start > s.stop:\n raise ValueError(\"Stop must be at least start\")\n sizes.append(s.stop - start)\n elif s is Ellipsis:\n raise NotImplementedError(\"Ellipsis is not currently supported\")\n else:\n try:\n s = int(s)\n except TypeError:\n raise TypeError(\"Bad slice index %s of type %s\" % (s, type(s)))\n if s < 0:\n raise NotImplementedError(\"Negative indices are currently unsupported\")\n indices.append(s)\n sizes.append(1)\n squeeze_dims.append(dim)\n sliced = slice(tensor, indices, sizes)\n if squeeze_dims:\n return squeeze(sliced, squeeze_dims=squeeze_dims)\n else:\n return sliced\n\n\ndef slice(input_, begin, size, name=None):\n \"\"\"Extracts a slice from a tensor.\n\n This operation extracts a slice of size `size` from a tensor `input` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input`. In other\n words, `begin[i]` is the offset into the 'i'th dimension of `input` that you\n want to slice from.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```\n # 'input' is [[[1, 1, 1], [2, 2, 2]],\n # [[3, 3, 3], [4, 4, 4]],\n # [[5, 5, 5], [6, 6, 6]]]\n tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]\n tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],\n [4, 4, 4]]]\n tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],\n [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n return gen_array_ops._slice(input_, begin, size, name=name)\n\n\n# pylint: disable=invalid-name\ndef strided_slice(input_,\n begin,\n end,\n strides,\n begin_mask=0,\n end_mask=0,\n ellipse_mask=0,\n new_axis_mask=0,\n shrink_axis_mask=0,\n name=None):\n \"\"\"Extracts a strided slice from a tensor.\n\n To a first order, this operation extracts a slice of size `end - begin`\n from a tensor `input`\n starting at the location specified by `begin`. The slice continues by adding\n `stride` to the `begin` index until all dimensions are not less than `end`.\n Note that components of stride can be negative, which causes a reverse\n slice.\n\n This operation can be thought of an encoding of a numpy style sliced\n range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]\n this function will be called as follows.\n\n `begin`, `end`, and `strides` will be all length n. n is in general\n not the same dimensionality as `input`.\n\n For the ith spec,\n `begin_mask`, `end_mask`, `ellipse_mask`, `new_axis_mask`,\n and `shrink_axis_mask` will have the ith bit corrsponding to\n the ith spec.\n\n If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and\n the fullest possible range in that dimension is used instead.\n `end_mask` works analogously, except with the end range.\n\n `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.\n `foo[::-1]` reverses a tensor with shape 8.\n\n\n If the ith bit of `ellipse_mask`, as many unspecified dimensions\n as needed will be inserted between other dimensions. Only one\n non-zero bit is allowed in `ellipse_mask`.\n\n For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is\n equivalent to `foo[3:5,:,:,4:5]` and\n `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is one, then a `begin`,\n `end`, and `stride` are ignored and a new length 1 dimension is\n added at this point in the output tensor.\n\n For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor\n whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask\n being 1<<1 == 2.\n\n If the ith bit of `shrink_axis_mask` is one, then `begin`,\n `end[i]`, and `stride[i]` are used to do a slice in the appropriate\n dimension, but the output tensor will be reduced in dimensionality\n by one. This is only valid if the ith entry of slice[i]==1.\n\n NOTE: `begin` and `end` are zero-indexed`.\n `strides` entries must be non-zero.\n\n\n ```\n # 'input' is [[[1, 1, 1], [2, 2, 2]],\n # [[3, 3, 3], [4, 4, 4]],\n # [[5, 5, 5], [6, 6, 6]]]\n tf.slice(input, [1, 0, 0], [2, 1, 3], [1, 1, 1]) ==> [[[3, 3, 3]]]\n tf.slice(input, [1, 0, 0], [2, 2, 3], [1, 1, 1]) ==> [[[3, 3, 3],\n [4, 4, 4]]]\n tf.slice(input, [1, 1, 0], [2, -1, 3], [1, -1, 1]) ==>[[[4, 4, 4],\n [3, 3, 3]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n end: An `int32` or `int64` `Tensor`.\n strides: An `int32` or `int64` `Tensor`.\n begin_mask: An `int32` mask.\n end_mask: An `int32` mask.\n ellipse_mask: An `int32` mask.\n new_axis_mask: An `int32` mask.\n shrink_axis_mask: An `int32` mask.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n return gen_array_ops.strided_slice(input_,\n begin,\n end,\n strides,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipse_mask=ellipse_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n# TODO(aselle): When gradient is added and performance verified switch\n# ops.Tensor._override_operator(\"__getitem__\", _NewSliceHelper)\nops.Tensor._override_operator(\"__getitem__\", _SliceHelper)\n\n\ndef pack(values, name=\"pack\"):\n \"\"\"Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n Packs tensors in `values` into a tensor with rank one higher than each tensor\n in `values` and shape `[len(values)] + values[0].shape`. The output satisfies\n `output[i, ...] = values[i][...]`.\n\n This is the opposite of unpack. The numpy equivalent is\n\n tf.pack([x, y, z]) = np.asarray([x, y, z])\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A packed `Tensor` with the same type as `values`.\n \"\"\"\n try:\n # If the input is a constant list, it can just be converted to a constant op\n return ops.convert_to_tensor(values, name=name)\n except (TypeError, ValueError):\n # Input list contains non-constant tensors\n return gen_array_ops._pack(values, name=name)\n\n\n# pylint: disable=invalid-name\ndef _autopacking_helper(list_or_tuple, dtype, name):\n \"\"\"Converts the given list or tuple to a tensor by packing.\n\n Args:\n list_or_tuple: A (possibly nested) list or tuple containing a tensor.\n dtype: The element type of the returned tensor.\n name: A name for the returned tensor.\n\n Returns:\n A `tf.Tensor` with value equivalent to `list_or_tuple`.\n \"\"\"\n must_pack = False\n converted_elems = []\n with ops.name_scope(name) as scope:\n for i, elem in enumerate(list_or_tuple):\n if ops.is_dense_tensor_like(elem):\n if dtype is not None and elem.dtype.base_dtype != dtype:\n raise TypeError(\n \"Cannot convert a list containing a tensor of dtype \"\n \"%s to %s (Tensor is: %r)\" % (elem.dtype, dtype, elem))\n converted_elems.append(elem)\n must_pack = True\n elif isinstance(elem, (list, tuple)):\n converted_elem = _autopacking_helper(elem, dtype, str(i))\n if ops.is_dense_tensor_like(converted_elem):\n must_pack = True\n converted_elems.append(converted_elem)\n else:\n converted_elems.append(elem)\n if must_pack:\n elems_as_tensors = []\n for i, elem in enumerate(converted_elems):\n if ops.is_dense_tensor_like(elem):\n elems_as_tensors.append(elem)\n else:\n # NOTE(mrry): This is inefficient, but it enables us to\n # handle the case where the list arguments are other\n # convertible-to-tensor types, such as numpy arrays.\n elems_as_tensors.append(\n constant_op.constant(elem, dtype=dtype, name=str(i)))\n return gen_array_ops._pack(elems_as_tensors, name=scope)\n else:\n return converted_elems\n\n\ndef _get_dtype_from_nested_lists(list_or_tuple):\n \"\"\"Returns the dtype of any tensor-like object in `list_or_tuple`, if found.\n\n Args:\n list_or_tuple: A list or tuple representing an object that can be\n converted to a `tf.Tensor`.\n\n Returns:\n The dtype of any tensor-like object in `list_or_tuple`, or `None` if no\n such object exists.\n \"\"\"\n for elem in list_or_tuple:\n if ops.is_dense_tensor_like(elem):\n return elem.dtype.base_dtype\n elif isinstance(elem, (list, tuple)):\n maybe_dtype = _get_dtype_from_nested_lists(elem)\n if maybe_dtype is not None:\n return maybe_dtype\n return None\n\n\ndef _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):\n \"\"\"Tensor conversion function that automatically packs arguments.\"\"\"\n if as_ref:\n return NotImplemented\n inferred_dtype = _get_dtype_from_nested_lists(v)\n if inferred_dtype is None:\n # We did not find any tensor-like objects in the nested lists, so defer to\n # other conversion functions.\n return NotImplemented\n if dtype is not None and dtype != inferred_dtype:\n return NotImplemented\n return _autopacking_helper(v, inferred_dtype, name or \"packed\")\n# pylint: enable=invalid-name\n\n\n# NOTE: Register this conversion function to run *before* one that\n# assumes every element is a value.\nops.register_tensor_conversion_function(\n (list, tuple), _autopacking_conversion_function, 99)\n\n\ndef unpack(value, num=None, name=\"unpack\"):\n \"\"\"Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks `num` tensors from `value` along the first dimension.\n If `num` is not specified (the default), it is inferred from `value`'s shape.\n If `value.shape[0]` is not known, `ValueError` is raised.\n\n The ith tensor in `output` is the slice `value[i, ...]`. Each tensor in\n `output` has shape `value.shape[1:]`.\n\n This is the opposite of pack. The numpy equivalent is\n\n tf.unpack(x, n) = list(x)\n\n Args:\n value: A rank `R > 0` `Tensor` to be unpacked.\n num: An `int`. The first dimension of value. Automatically inferred if\n `None` (the default).\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unpacked from `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n \"\"\"\n if num is None:\n value = ops.convert_to_tensor(value)\n shape = value.get_shape()\n num = shape[0].value\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % shape)\n return gen_array_ops._unpack(value, num=num, name=name)\n\n\ndef concat(concat_dim, values, name=\"concat\"):\n \"\"\"Concatenates tensors along one dimension.\n\n Concatenates the list of tensors `values` along dimension `concat_dim`. If\n `values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Rconcat_dim, ...Dn]\n\n where\n\n Rconcat_dim = sum(Dconcat_dim(i))\n\n That is, the data from the input tensors is joined along the `concat_dim`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `concat_dim` must be equal.\n\n For example:\n\n ```python\n t1 = [[1, 2, 3], [4, 5, 6]]\n t2 = [[7, 8, 9], [10, 11, 12]]\n tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]\n\n # tensor t3 with shape [2, 3]\n # tensor t4 with shape [2, 3]\n tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]\n tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]\n ```\n\n Args:\n concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.\n values: A list of `Tensor` objects or a single `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n \"\"\"\n if not isinstance(values, (list, tuple)):\n values = [values]\n # TODO(mrry): Change to return values?\n if len(values) == 1: # Degenerate case of one tensor.\n # Make a throwaway call to convert_to_tensor to make sure\n # that concat_dim is of the correct type, and make sure that\n # the returned tensor is a scalar.\n # TODO(keveman): Implement a standalone type and shape checker.\n with ops.name_scope(name) as scope:\n ops.convert_to_tensor(concat_dim,\n name=\"concat_dim\",\n dtype=dtypes.int32).get_shape(\n ).assert_is_compatible_with(tensor_shape.scalar())\n return identity(values[0], name=scope)\n return gen_array_ops._concat(concat_dim=concat_dim,\n values=values,\n name=name)\n\n\[email protected](\"Pack\")\ndef _PackShape(op):\n input_shape = op.inputs[0].get_shape()\n for inp in op.inputs[1:]:\n input_shape = input_shape.merge_with(inp.get_shape())\n return [tensor_shape.TensorShape([len(op.inputs)]).concatenate(input_shape)]\n\n\[email protected](\"Unpack\")\ndef _UnpackShape(op):\n input_shape = op.inputs[0].get_shape()\n return [input_shape[1:]] * op.get_attr(\"num\")\n\n\[email protected](\"Concat\")\ndef _ConcatShape(op):\n concat_dim = tensor_util.constant_value(op.inputs[0])\n if concat_dim is None:\n # Return an unknown shape with the same rank as the inputs, or an\n # unknown rank if no input's rank is known.\n rank = None\n for value in op.inputs[1:]:\n if rank is not None:\n value.get_shape().assert_has_rank(rank)\n else:\n rank = value.get_shape().ndims\n if rank == 0:\n raise ValueError(\"Can't concatenate scalars (use tf.pack instead)\")\n return [tensor_shape.unknown_shape(ndims=rank)]\n\n else:\n # Merge all the non-concat dims, and sum the concat dim to make an\n # output shape.\n concat_dim = int(concat_dim)\n if concat_dim < 0:\n raise ValueError(\"Expected concat_dim >= 0, but got %d\" % concat_dim)\n\n output_shape = op.inputs[1].get_shape()\n for value in op.inputs[2:]:\n value_shape = value.get_shape()\n if value_shape.ndims is not None and concat_dim >= value_shape.ndims:\n raise ValueError(\"Expected concat_dim in range [0, %d), but got %d\" %\n (value_shape.ndims, concat_dim))\n before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])\n at = output_shape[concat_dim] + value_shape[concat_dim]\n after = output_shape[\n concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])\n output_shape = before.concatenate(at).concatenate(after)\n return [output_shape]\n\n\[email protected](\"ConcatOffset\")\ndef _ConcatOffsetShape(op):\n return [x.get_shape() for x in op.inputs[1:]]\n\n\ndef boolean_mask(tensor, mask, name=\"boolean_mask\"):\n \"\"\"Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = [True, False, True, False]\n boolean_mask(tensor, mask) ==> [0, 2]\n ```\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n\n Args:\n tensor: N-D tensor.\n mask: K-D boolean tensor, K <= N and K must be known statically.\n name: A name for this operation (optional).\n\n Returns:\n Tensor populated by entries in `tensor` corresponding to `True` values in\n `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = [True, False, True]\n boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]\n ```\n \"\"\"\n def _apply_mask_1d(reshaped_tensor, mask):\n \"\"\"Mask tensor along dimension 0 with a 1-D mask.\"\"\"\n indices = squeeze(where(mask), squeeze_dims=[1])\n return gather(reshaped_tensor, indices)\n\n with ops.op_scope([tensor, mask], name):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n mask = ops.convert_to_tensor(mask, name=\"mask\")\n\n shape_mask = mask.get_shape()\n ndims_mask = shape_mask.ndims\n shape_tensor = tensor.get_shape()\n if ndims_mask == 0:\n raise ValueError(\"mask cannot be scalar.\")\n if ndims_mask is None:\n raise ValueError(\n \"mask dimensions must be specified, even if some dimensions are None\"\n \". E.g. shape=[None] is ok, but shape=None is not.\")\n shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)\n\n tensor = reshape(tensor, concat(0, [[-1], shape(tensor)[ndims_mask:]]))\n first_dim = shape_tensor[:ndims_mask].num_elements()\n tensor.set_shape(\n tensor_shape.as_shape([first_dim])\n .concatenate(shape_tensor[ndims_mask:]))\n\n mask = reshape(mask, [-1])\n return _apply_mask_1d(tensor, mask)\n\n\ndef sparse_mask(a, mask_indices, name=None):\n \"\"\"Masks elements of `IndexedSlices`.\n\n Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that\n contains a subset of the slices of `a`. Only the slices at indices not\n specified in `mask_indices` are returned.\n\n This is useful when you need to extract a subset of slices in an\n `IndexedSlices` object.\n\n For example:\n\n ```python\n # `a` contains slices at indices [12, 26, 37, 45] from a large tensor\n # with shape [1000, 10]\n a.indices => [12, 26, 37, 45]\n tf.shape(a.values) => [4, 10]\n\n # `b` will be the subset of `a` slices at its second and third indices, so\n # we want to mask its first and last indices (which are at absolute\n # indices 12, 45)\n b = tf.sparse_mask(a, [12, 45])\n\n b.indices => [26, 37]\n tf.shape(b.values) => [2, 10]\n\n ```\n\n Args:\n * `a`: An `IndexedSlices` instance.\n * `mask_indices`: Indices of elements to mask.\n * `name`: A name for the operation (optional).\n\n Returns:\n The masked `IndexedSlices` instance.\n \"\"\"\n with ops.op_scope([a, mask_indices], name, \"sparse_mask\") as name:\n indices = a.indices\n out_indices, to_gather = listdiff(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return ops.IndexedSlices(out_values, out_indices, a.dense_shape)\n\n\ndef split(split_dim, num_split, value, name=\"split\"):\n \"\"\"Splits a tensor into `num_split` tensors along one dimension.\n\n Splits `value` along dimension `split_dim` into `num_split` smaller tensors.\n Requires that `num_split` evenly divide `value.shape[split_dim]`.\n\n For example:\n\n ```python\n # 'value' is a tensor with shape [5, 30]\n # Split 'value' into 3 tensors along dimension 1\n split0, split1, split2 = tf.split(1, 3, value)\n tf.shape(split0) ==> [5, 10]\n ```\n\n Args:\n split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.\n Must be in the range `[0, rank(value))`.\n num_split: A Python integer. The number of ways to split.\n value: The `Tensor` to split.\n name: A name for the operation (optional).\n\n Returns:\n `num_split` `Tensor` objects resulting from splitting `value`.\n \"\"\"\n return gen_array_ops._split(split_dim=split_dim,\n num_split=num_split,\n value=value,\n name=name)\n\n\[email protected](\"Reverse\")\ndef _ReverseShape(op):\n input_shape = op.inputs[0].get_shape()\n dims_shape = op.inputs[1].get_shape().with_rank(1)\n if dims_shape[0].value is not None:\n input_shape = input_shape.with_rank(dims_shape[0])\n if input_shape.ndims is not None and input_shape.ndims > 8:\n raise ValueError(\n \"tf.reverse() does not work on tensors with more than 8 dimensions\")\n return [input_shape]\n\n\ndef transpose(a, perm=None, name=\"transpose\"):\n \"\"\"Transposes `a`. Permutes the dimensions according to `perm`.\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors.\n\n For example:\n\n ```python\n # 'x' is [[1 2 3]\n # [4 5 6]]\n tf.transpose(x) ==> [[1 4]\n [2 5]\n [3 6]]\n\n # Equivalently\n tf.transpose(x, perm=[1, 0]) ==> [[1 4]\n [2 5]\n [3 6]]\n\n # 'perm' is more useful for n-dimensional tensors, for n > 2\n # 'x' is [[[1 2 3]\n # [4 5 6]]\n # [[7 8 9]\n # [10 11 12]]]\n # Take the transpose of the matrices in dimension-0\n tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]\n [2 5]\n [3 6]]\n\n [[7 10]\n [8 11]\n [9 12]]]\n ```\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`.\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n with ops.op_scope([a], name, \"transpose\") as name:\n if perm is None:\n rank = gen_array_ops.rank(a)\n perm = (rank - 1) - gen_math_ops._range(0, rank, 1)\n ret = gen_array_ops.transpose(a, perm, name=name)\n # NOTE(mrry): Setting the shape explicitly because\n # reverse is not handled by the shape function.\n input_shape = ret.op.inputs[0].get_shape().dims\n if input_shape is not None:\n ret.set_shape(input_shape[::-1])\n else:\n ret = gen_array_ops.transpose(a, perm, name=name)\n return ret\n\n\ndef zeros(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n For example:\n\n ```python\n tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ```\n\n Args:\n shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.op_scope([shape], name, \"zeros\") as name:\n try:\n shape = tensor_shape.as_shape(shape)\n output = constant(0, shape=shape, dtype=dtype, name=name)\n except (TypeError, ValueError):\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name=\"shape\")\n output = fill(shape, constant(0, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype\n return output\n\n\ndef zeros_like(tensor, dtype=None, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to zero. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n ```python\n # 'tensor' is [[1, 2, 3], [4, 5, 6]]\n tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.op_scope([tensor], name, \"zeros_like\") as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n if dtype is not None and tensor.dtype != dtype:\n ret = zeros(shape(tensor), dtype, name=name)\n ret.set_shape(tensor.get_shape())\n return ret\n else:\n return gen_array_ops._zeros_like(tensor, name=name)\n\n\ndef ones_like(tensor, dtype=None, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to 1. Optionally, you can\n specify a new type (`dtype`) for the returned tensor.\n\n For example:\n\n ```python\n # 'tensor' is [[1, 2, 3], [4, 5, 6]]\n tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.op_scope([tensor], name, \"ones_like\") as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n ones_shape = shape(tensor)\n if dtype is None:\n dtype = tensor.dtype\n ret = ones(ones_shape, dtype=dtype, name=name)\n ret.set_shape(tensor.get_shape())\n return ret\n\n\ndef ones(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n This operation returns a tensor of type `dtype` with shape `shape` and all\n elements set to 1.\n\n For example:\n\n ```python\n tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.op_scope([shape], name, \"ones\") as name:\n try:\n shape = tensor_shape.as_shape(shape)\n output = constant(1, shape=shape, dtype=dtype, name=name)\n except (TypeError, ValueError):\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name=\"shape\")\n output = fill(shape, constant(1, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype\n return output\n\n\ndef placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a tensor that will be always fed.\n\n **Important**: This tensor will produce an error if evaluated. Its value must\n be fed using the `feed_dict` optional argument to `Session.run()`,\n `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.placeholder(tf.float32, shape=(1024, 1024))\n y = tf.matmul(x, x)\n\n with tf.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n rand_array = np.random.rand(1024, 1024)\n print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n ```\n\n Args:\n dtype: The type of elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a tensor of any shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n \"\"\"\n shape = tensor_shape.as_shape(shape)\n if shape.is_fully_defined():\n dim_list = shape.as_list()\n else:\n dim_list = []\n ret = gen_array_ops._placeholder(\n dtype=dtype,\n shape=dim_list,\n name=name)\n ret.set_shape(shape)\n return ret\n\n\ndef sparse_placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a sparse tensor that will be always fed.\n\n **Important**: This sparse tensor will produce an error if evaluated.\n Its value must be fed using the `feed_dict` optional argument to\n `Session.run()`, `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.sparse_placeholder(tf.float32)\n y = tf.sparse_reduce_sum(x)\n\n with tf.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)\n values = np.array([1.0, 2.0], dtype=np.float32)\n shape = np.array([7, 9, 2], dtype=np.int64)\n print(sess.run(y, feed_dict={\n x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.\n print(sess.run(y, feed_dict={\n x: (indices, values, shape)})) # Will succeed.\n\n sp = tf.SparseTensor(indices=indices, values=values, shape=shape)\n sp_value = sp.eval(session)\n print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.\n ```\n\n Args:\n dtype: The type of `values` elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a sparse tensor of any shape.\n name: A name for prefixing the operations (optional).\n\n Returns:\n A `SparseTensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n \"\"\"\n if shape is None:\n shape = placeholder(\n dtypes.int64, name=(name + \"/shape\") if name is not None else None)\n else:\n shape = ops.convert_to_tensor(\n shape, name=(name + \"/shape\") if name is not None else None)\n return ops.SparseTensor(\n values=placeholder(\n dtype, name=(name + \"/values\") if name is not None else None),\n indices=placeholder(\n dtypes.int64,\n name=(name + \"/indices\") if name is not None else None),\n shape=shape\n )\n\n\ndef pad(tensor, paddings, mode=\"CONSTANT\", name=None): # pylint: disable=invalid-name\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n # 't' is [[1, 2, 3], [4, 5, 6]].\n # 'paddings' is [[1, 1,], [2, 2]].\n # rank of 't' is 2.\n pad(t, paddings, \"CONSTANT\") ==> [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 2, 3, 0, 0],\n [0, 0, 4, 5, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n pad(t, paddings, \"REFLECT\") ==> [[6, 5, 4, 5, 6, 5, 4],\n [3, 2, 1, 2, 3, 2, 1],\n [6, 5, 4, 5, 6, 5, 4],\n [3, 2, 1, 2, 3, 2, 1]]\n\n pad(t, paddings, \"SYMMETRIC\") ==> [[2, 1, 1, 2, 3, 3, 2],\n [2, 1, 1, 2, 3, 3, 2],\n [5, 4, 4, 5, 6, 6, 5],\n [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n\n if mode == \"CONSTANT\":\n return gen_array_ops._pad(tensor, paddings, name=name)\n if mode == \"REFLECT\":\n return gen_array_ops._mirror_pad(tensor,\n paddings,\n mode=\"REFLECT\",\n name=name)\n if mode == \"SYMMETRIC\":\n return gen_array_ops._mirror_pad(tensor,\n paddings,\n mode=\"SYMMETRIC\",\n name=name)\n raise ValueError(\"Unknown padding mode: %s\" % mode)\n\n\ndef meshgrid(*args, **kwargs):\n \"\"\"Broadcasts parameters for evaluation on an N-D grid.\n\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n\n Notes:\n\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n\n Examples:\n\n Calling `X, Y = meshgrid(x, y)` with the tensors\n ```prettyprint\n x = [1, 2, 3]\n y = [4, 5, 6]\n ```\n results in\n ```prettyprint\n X = [[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]]\n Y = [[4, 5, 6],\n [4, 5, 6],\n [4, 5, 6]]\n ```\n\n Args:\n *args: `Tensor`s with rank 1\n indexing: Either 'xy' or 'ij' (optional, default: 'xy')\n name: A name for the operation (optional).\n\n Returns:\n outputs: A list of N `Tensor`s with rank N\n \"\"\"\n indexing = kwargs.pop(\"indexing\", \"xy\")\n name = kwargs.pop(\"name\", \"meshgrid\")\n if len(kwargs) > 0:\n key = list(kwargs.keys())[0]\n raise TypeError(\"'{}' is an invalid keyword argument \"\n \"for this function\".format(key))\n\n if indexing not in (\"xy\", \"ij\"):\n raise ValueError(\"indexing parameter must be either 'xy' or 'ij'\")\n\n with ops.op_scope(args, name, \"meshgrid\") as name:\n num_inputs = len(args)\n ones = (1,) * num_inputs\n\n asserts = [logging_ops.Assert(\n gen_math_ops.equal(rank(x), 1),\n [\"Input %d needs to have rank 1: \" % i, rank(x)],\n ) for i, x in enumerate(args)]\n\n # Prepare reshape by inserting dimensions with size 1 where needed\n shapes = [ones[:i] + (-1,) + ones[i + 1:] for i in range(num_inputs)]\n # Create parameters for broadcasting each tensor to the full size\n sizes = [size(x) for x in args]\n bcast = [sizes[:i] + [1] + sizes[i + 1:] for i in range(num_inputs)]\n\n # By default, the numpy version swaps the instructions\n # for the first and second dimension\n if indexing == \"xy\" and num_inputs > 1:\n shapes[0], shapes[1] = shapes[1], shapes[0]\n bcast[0], bcast[1] = bcast[1], bcast[0]\n\n results = []\n with ops.control_dependencies(asserts):\n for a, r, e in zip(args, shapes, bcast):\n results.append(tile(reshape(a, r), e))\n\n return results\n\n\[email protected](\"Placeholder\")\ndef _PlaceholderShape(op):\n given_shape = tensor_util.TensorShapeProtoToList(op.get_attr(\"shape\"))\n if given_shape:\n return [tensor_shape.TensorShape(given_shape)]\n else:\n return [tensor_shape.unknown_shape()]\n\n\[email protected](\"CheckNumerics\")\[email protected](\"Identity\")\[email protected](\"RefIdentity\")\[email protected](\"StopGradient\")\[email protected](\"BatchMatrixBandPart\")\[email protected](\"QuantizeAndDequantize\")\ndef _UnchangedShape(op):\n return [op.inputs[0].get_shape()]\n\n\[email protected](\"Rank\")\[email protected](\"Size\")\ndef _ScalarShape(unused_op):\n return [tensor_shape.scalar()]\n\n\[email protected](\"Slice\")\ndef _SliceShape(op):\n \"\"\"Shape function for array_ops.slice.\"\"\"\n input_shape = op.inputs[0].get_shape()\n begin_shape = op.inputs[1].get_shape().with_rank(1)\n sizes_shape = op.inputs[2].get_shape().with_rank(1)\n ndims = begin_shape.merge_with(sizes_shape)[0].value\n if ndims is not None:\n input_shape.assert_has_rank(ndims)\n begin_value = tensor_util.constant_value(op.inputs[1])\n sizes_value = tensor_util.constant_value(op.inputs[2])\n if sizes_value is not None:\n returned_dims = []\n for i, slice_size in enumerate(sizes_value.ravel()):\n if slice_size != -1:\n returned_dims.append(slice_size)\n elif begin_value is not None:\n returned_dims.append(input_shape[i] - begin_value[i])\n else:\n returned_dims.append(None)\n return [tensor_shape.TensorShape(returned_dims)]\n else:\n if input_shape.ndims is not None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n elif ndims is not None:\n return [tensor_shape.unknown_shape(ndims=ndims)]\n else:\n return [tensor_shape.unknown_shape()]\n\n\nNEW_AXIS = -1\nSHRINK_AXIS = -2\n\n\n# PEP-8 naming\n# pylint: disable=invalid-name\ndef _compute_size_of_strided_dim(spec, size):\n unknown = None # Document what None means here.\n use_full_range = None # Document other use of None.\n\n if size is unknown or size.value is unknown:\n return unknown\n size = size.value\n stride = spec.step\n if stride is not unknown:\n if stride == 0:\n return unknown\n stride = spec.step\n valid_range = [0, size] if stride > 0 else [-1, size - 1]\n\n # PEP-8 naming\n # pylint: disable=invalid-name\n def canonical(x, c):\n if x is use_full_range:\n return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]\n else:\n x_fwd = size + x if x < 0 else x # make negative indices positive\n return max(valid_range[0], min(valid_range[1], x_fwd))\n\n begin = canonical(spec.start, 0)\n end = canonical(spec.stop, 1)\n interval_length = end - begin\n if interval_length == 0 or ((interval_length < 0) != (stride < 0)):\n return 0\n else:\n remainder = 1 if interval_length % stride != 0 else 0\n return interval_length // stride + remainder\n else:\n return unknown # unknown because stride is unknown\n\n\[email protected](\"StridedSlice\")\ndef _StridedSliceShape(op):\n \"\"\"Shape function for array_ops.slice.\"\"\"\n\n input_shape = op.inputs[0].get_shape()\n if input_shape.ndims is None:\n return [tensor_shape.unknown_shape()]\n\n ndims = len(input_shape)\n begin_shape = op.inputs[1].get_shape().with_rank(1)\n end_shape = op.inputs[2].get_shape().with_rank(1)\n strides_shape = op.inputs[3].get_shape().with_rank(1)\n # get constant values if available\n begin_value = tensor_util.constant_value(op.inputs[1])\n end_value = tensor_util.constant_value(op.inputs[2])\n strides_value = tensor_util.constant_value(op.inputs[3])\n\n sparse_dims = begin_shape.merge_with(end_shape).merge_with(strides_shape)[\n 0].value\n if sparse_dims is None:\n return [input_shape.unknown_shape()]\n\n begin_mask = op.get_attr(\"begin_mask\")\n end_mask = op.get_attr(\"end_mask\")\n ellipse_mask = op.get_attr(\"ellipse_mask\")\n new_axis_mask = op.get_attr(\"new_axis_mask\")\n shrink_axis_mask = op.get_attr(\"shrink_axis_mask\")\n # find the ellipsis\n ellipse_index = -1\n\n # look for ellipses\n num_add_axis_after_ellipse = 0\n for i in range(sparse_dims):\n if ellipse_index != -1 and ((1 << i) & new_axis_mask) != 0:\n num_add_axis_after_ellipse += 1\n if (1 << i) & ellipse_mask:\n if ellipse_index != -1:\n raise ValueError(\"Multiple ellipses not allowed\")\n ellipse_index = i\n # insert a virtual ellipse if not seen\n if ellipse_index == -1:\n ellipse_mask |= (1 << sparse_dims)\n sparse_dims += 1\n\n # build the dense specification\n dense_dims = ndims # not accounting for newaxis and shrink\n final_shape_gather = []\n full_index = 0\n dense_specs = []\n for dim in range(sparse_dims):\n bit = 1 << dim\n if bit & ellipse_mask:\n next_index = min(dense_dims -\n (sparse_dims - dim) + 1 + num_add_axis_after_ellipse,\n dense_dims)\n while full_index < next_index:\n dense_specs.append(_baseslice(None, None, 1))\n final_shape_gather.append(full_index)\n full_index += 1\n elif bit & new_axis_mask:\n final_shape_gather.append(NEW_AXIS)\n else:\n dense_specs.append(_baseslice(\n None if (begin_mask & bit) else begin_value[dim], None if (\n end_mask & bit) else end_value[dim], strides_value[dim]))\n if shrink_axis_mask & bit:\n final_shape_gather.append(SHRINK_AXIS)\n else:\n final_shape_gather.append(full_index)\n\n full_index += 1\n\n # Compute each dimensions contribution to the \"processing\" shape\n final_dims = []\n for dim in range(dense_dims):\n final_dims.append(_compute_size_of_strided_dim(dense_specs[dim],\n input_shape.dims[dim]))\n\n # Gather the final shape from the processing shape\n final_shape = []\n for index in final_shape_gather:\n if index == NEW_AXIS:\n final_shape.append(1)\n elif index == SHRINK_AXIS:\n pass\n else:\n final_shape.append(final_dims[index])\n\n return [tensor_shape.TensorShape(final_shape)]\n\n\[email protected](\"Gather\")\ndef _GatherShape(op):\n \"\"\"Shape function for array_ops.gather.\"\"\"\n params_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape()\n return [indices_shape.concatenate(params_shape[1:])]\n\n\[email protected](\"GatherNd\")\ndef _GatherNdShape(op):\n \"\"\"Shape function for array_ops.gather_nd.\"\"\"\n params_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape().with_rank_at_least(2)\n if indices_shape.ndims is not None:\n indices_shape[-1].merge_with(params_shape.ndims)\n return [indices_shape[:-1]]\n\n\[email protected](\"Unique\")\ndef _UniqueShape(op):\n \"\"\"Shape function for array_ops.Unique.\"\"\"\n # The output is a vector with data-dependent length.\n input_shape = op.inputs[0].get_shape()\n input_shape.assert_has_rank(1)\n return [tensor_shape.vector(None), input_shape]\n\n\[email protected](\"UniqueWithCounts\")\ndef _UniqueWithCountsShape(op):\n \"\"\"Shape function for array_ops.Unique.\"\"\"\n # The output is a vector with data-dependent length.\n input_shape = op.inputs[0].get_shape()\n input_shape.assert_has_rank(1)\n return [tensor_shape.vector(None), input_shape, tensor_shape.vector(None)]\n\n\[email protected](\"BatchMatrixDiag\")\ndef _BatchMatrixDiagShape(op):\n \"\"\"Shape function for array_ops.batch_matrix_diag.\"\"\"\n diag_shape = op.inputs[0].get_shape().with_rank_at_least(1)\n return [diag_shape.concatenate(diag_shape[-1])]\n\n\[email protected](\"BatchMatrixDiagPart\")\ndef _BatchMatrixDiagPartShape(op):\n \"\"\"Shape function for array_ops.batch_matrix_diag_part.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_least(2)\n # Last two dims must match\n input_shape[-1].assert_is_compatible_with(input_shape[-2])\n return [input_shape[:-1]]\n\n\[email protected](\"Diag\")\ndef _DiagShape(op):\n \"\"\"Shape function for array_ops.diag.\n\n This op has one input (of rank k <= 3), and one output (of rank 2k),\n where the shape of the output is the concatenation of the input\n shape with itself.\n\n Args:\n op: A Diag Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_most(3)\n return [input_shape.concatenate(input_shape)]\n\[email protected](\"DiagPart\")\ndef _DiagPartShape(op):\n \"\"\"Shape function for array_ops.diag_part.\n\n This op has one input (of rank k = 2, 4, or 6), and one output (of rank k/2),\n where the shape of the output is the diagonal of the input shape.\n\n Args:\n op: A DiagPart Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If input has odd rank or greater than 6, or the first and\n second halves of the shape are incompatible.\n\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_most(6)\n rank = input_shape.ndims\n if rank is None:\n return [tensor_shape.unknown_shape()]\n if rank % 2:\n raise ValueError(\"Input must be even rank, got rank = \" + str(rank) + \".\")\n mid = rank // 2\n return [input_shape[:mid].merge_with(input_shape[mid:])]\n\[email protected](\"ExpandDims\")\ndef _ExpandDimsShape(op):\n \"\"\"Determine shape for expand op's output tensor.\n\n Args:\n op: Operation for which to determine shape.\n op.inputs[0] is the input tensor.\n op.inputs[1] is the dimension in which to expand.\n Returns:\n Shape of op's output tensor.\n Raises:\n ValueError: If dim is outside of [-rank - 1, rank], where rank is the number\n of dimensions in the input tensor.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.dims is None:\n return [tensor_shape.unknown_shape()]\n dim = tensor_util.constant_value(op.inputs[1])\n input_ndims = input_shape.ndims\n if dim < -input_ndims - 1 or dim > input_ndims:\n raise ValueError(\n \"dim %d not in [%d, %d].\" % (dim, -input_ndims, input_ndims))\n if dim < 0:\n dim += (input_ndims + 1)\n result_shape = list(input_shape.dims)\n result_shape.insert(dim, 1)\n return [tensor_shape.TensorShape(result_shape)]\n\n\[email protected](\"Squeeze\")\ndef _SqueezeShape(op):\n \"\"\"Determine shape for squeeze op's output tensor.\n\n Args:\n op: Operation for which to determine shape.\n Returns:\n Shape of op's output tensor.\n Raises:\n ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),\n where rank is the number of dimensions in the input tensor. Or, if\n squeeze_dims includes a dimension for which input shape has a value\n not equal to 1.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.dims is None:\n return [tensor_shape.unknown_shape()]\n\n squeeze_dims = op.get_attr(\"squeeze_dims\") or []\n wrapped_squeeze_dims = []\n input_ndims = input_shape.ndims\n for i, squeeze_dim in enumerate(squeeze_dims):\n if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:\n raise ValueError(\n \"squeeze_dims[%d]=%d not in [%d, %d).\" % (\n i, squeeze_dim, -input_ndims, input_ndims))\n if squeeze_dim < 0:\n squeeze_dim += input_ndims\n wrapped_squeeze_dims.append(squeeze_dim)\n\n result_shape = []\n for i, dim in enumerate([d.value for d in input_shape.dims]):\n is_explicit_match = i in wrapped_squeeze_dims\n if dim is None:\n if is_explicit_match:\n # Assume that the squeezed dimension will be 1 at runtime.\n continue\n if not wrapped_squeeze_dims:\n # If squeezing all 1 dimensions and we see a None, give up.\n return [tensor_shape.unknown_shape()]\n elif dim == 1:\n if is_explicit_match or not wrapped_squeeze_dims:\n continue\n elif is_explicit_match:\n raise ValueError(\n \"Can not squeeze dim[%d], expected a dimension of 1, got %d.\" % (\n i, dim))\n result_shape.append(dim)\n return [tensor_shape.TensorShape(result_shape)]\n\n\[email protected](\"Bitcast\")\ndef _BitcastShape(op):\n \"\"\"Shape function for Bitcast op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape == tensor_shape.unknown_shape():\n return [tensor_shape.unknown_shape()]\n input_type = op.inputs[0].dtype\n size_of_input = input_type.size\n output = dtypes.as_dtype(op.get_attr(\"type\"))\n size_of_output = output.size\n if size_of_input == size_of_output:\n return [input_shape]\n else:\n if size_of_output > size_of_input:\n new_shape = input_shape.with_rank_at_least(1).as_list()\n last_val = new_shape[-1]\n if last_val is None or last_val == (size_of_output // size_of_input):\n new_shape = new_shape[:-1]\n else:\n raise ValueError(\n \"Cannot bitcast due to shape. %d is not evenly divisible by %d.\" %\n (new_shape[-1], size_of_input // size_of_output))\n else:\n new_shape = input_shape\n new_shape = new_shape.concatenate([size_of_input // size_of_output])\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"Reshape\")\ndef _ReshapeShape(op):\n \"\"\"Shape function for Reshape op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.ndims is not None:\n num_elements = tensor_shape.Dimension(1)\n for dim in input_shape.dims:\n num_elements *= dim\n else:\n num_elements = tensor_shape.Dimension(None)\n new_shape_shape = op.inputs[1].get_shape().with_rank(1)\n new_shape = tensor_util.constant_value(op.inputs[1])\n if new_shape is None:\n # Attempt to infer the rank of the output from the length of\n # new_shape.\n return [tensor_shape.unknown_shape(ndims=new_shape_shape[0].value)]\n new_shape = np.reshape(new_shape, -1).tolist()\n if -1 not in new_shape:\n # The new shape is fully defined.\n if (num_elements.value is not None\n and num_elements.value != np.prod(new_shape)):\n raise ValueError(\n \"Cannot reshape a tensor with %d elements to shape %s (%d elements)\"\n % (num_elements.value, new_shape, np.prod(new_shape)))\n return [tensor_shape.TensorShape(new_shape)]\n elif num_elements.value is not None:\n # We know the number of elements, so we can calculate the missing\n # dimension in the new_shape.\n known_elements = 1\n unknown_index = None\n for i, dim in enumerate(new_shape):\n if dim == -1:\n unknown_index = i\n else:\n known_elements *= dim\n if known_elements == 0:\n raise ValueError(\"cannot infer the missing input size for \"\n \"an empty tensor unless all specified \"\n \"input sizes are non-zero\")\n if num_elements % known_elements != 0:\n raise ValueError(\"input has %s elements, which isn't divisible by %d\" %\n (num_elements, known_elements))\n new_shape[unknown_index] = num_elements // known_elements\n return [tensor_shape.TensorShape(new_shape)]\n else:\n # We don't know the input shape, but we know n-1 of the dimensions\n # in the new shape.\n new_shape[new_shape.index(-1)] = None\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"BroadcastGradientArgs\")\ndef _BroadcastGradientArgsShape(op):\n \"\"\"Shape function for the BroadcastGradientArgs op.\"\"\"\n # TODO(mrry): Implement constant_value for BroadcastGradientArgs?\n op.inputs[0].get_shape().assert_has_rank(1)\n op.inputs[1].get_shape().assert_has_rank(1)\n return [tensor_shape.vector(None), tensor_shape.vector(None)]\n\n\[email protected](\"Fill\")\ndef _FillShape(op):\n \"\"\"Shape function for the Fill op.\n\n This op takes a vector of dimensions and a scalar, and produces a\n tensor with the given dimensions.\n\n Args:\n op: A Fill Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n dimensions_shape = op.inputs[0].get_shape().with_rank(1)\n op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())\n fill_dims = tensor_util.constant_value(op.inputs[0])\n if fill_dims is None:\n # Attempt to infer the rank of the output from the length of\n # dimensions.\n return [tensor_shape.unknown_shape(ndims=dimensions_shape[0].value)]\n else:\n return [tensor_shape.TensorShape(fill_dims.tolist())]\n\n\[email protected](\"InvertPermutation\")\ndef _InvertPermutationShape(op):\n \"\"\"Shape function for the InvertPermutation op.\"\"\"\n return [op.inputs[0].get_shape().with_rank(1)]\n\n\[email protected](\"ListDiff\")\ndef _ListDiffShape(op):\n \"\"\"Shape function for the ListDiff op.\"\"\"\n op.inputs[0].get_shape().assert_has_rank(1)\n op.inputs[1].get_shape().assert_has_rank(1)\n # TODO(mrry): Indicate that the length falls within an interval?\n return [tensor_shape.vector(None)] * 2\n\n\[email protected](\"Pad\")\[email protected](\"MirrorPad\")\ndef _PadShape(op):\n \"\"\"Shape function for the Pad op.\n\n This op has two inputs:\n\n * input: A rank-N tensor.\n * paddings: An N-by-2 matrix, in which the i^th row contains the\n number of padding elements to add before and after `input` in the\n i^th dimension.\n\n It has one output, which has the same rank as input, and additional\n elements according to the values in paddings.\n\n Args:\n op: A Pad Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the input shapes are incompatible.\n \"\"\"\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n input_shape = op.inputs[0].get_shape()\n input_shape = input_shape.with_rank(paddings_shape[0].value)\n paddings_shape = paddings_shape.merge_with(\n tensor_shape.matrix(input_shape.ndims, 2))\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n if paddings[i, 0] < 0 or paddings[i, 1] < 0:\n raise ValueError(\"paddings must be non-negative\")\n output_dims.append(dim + paddings[i, 0] + paddings[i, 1])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"MirrorPadGrad\")\ndef _MirrorPadGradShape(op):\n \"\"\"Shape function for the MirrorPadGrad op.\"\"\"\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n input_shape = op.inputs[0].get_shape().with_rank(paddings_shape[0].value)\n paddings_shape = paddings_shape.merge_with(tensor_shape.matrix(\n input_shape.ndims, 2))\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n if paddings[i, 0] < 0 or paddings[i, 1] < 0:\n raise ValueError(\"Paddings must be non-negative.\")\n if dim <= paddings[i, 0] + paddings[i, 1]:\n raise ValueError(\"Output dimension is not positive.\")\n output_dims.append(dim - paddings[i, 0] - paddings[i, 1])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"ReverseSequence\")\ndef _ReverseSequenceShape(op):\n \"\"\"Shape function for the ReverseSequence op.\n\n This op has two inputs:\n\n * input: A rank-N tensor with size B in the 0th dimension.\n * seq_lens: A vector of length B.\n\n It has one output, with the same size as input.\n\n Args:\n op: A ReverseSequence Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the input shapes are incompatible or seq_dim == batch_dim.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n seq_lens_shape = op.inputs[1].get_shape().with_rank(1)\n if input_shape.ndims is None:\n return [None]\n seq_dim = op.get_attr(\"seq_dim\")\n batch_dim = op.get_attr(\"batch_dim\")\n if input_shape.ndims is not None:\n if batch_dim >= input_shape.ndims:\n raise ValueError(\"batch_dim must be < input.dims() (%d vs %d)\" %\n (batch_dim, input_shape.ndims))\n if seq_dim >= input_shape.ndims:\n raise ValueError(\"seq_dim must be < input.dims() (%d vs %d)\" %\n (seq_dim, input_shape.ndims))\n batch_size = input_shape[batch_dim].merge_with(seq_lens_shape[0])\n input_shape = tensor_shape.TensorShape([\n value if ix != batch_dim else batch_size\n for ix, value in enumerate(input_shape)])\n return [input_shape]\n\n\[email protected](\"Shape\")\[email protected](\"ShapeN\")\ndef _ShapeNShape(op):\n \"\"\"Shape function for the Shape/ShapeN op.\"\"\"\n return [tensor_shape.vector(x.get_shape().ndims) for x in op.inputs]\n\n\[email protected](\"Transpose\")\ndef _TransposeShape(op):\n \"\"\"Shape function for the Transpose op.\n\n This op takes two inputs:\n\n * input: a rank-N tensor of arbitrary shape.\n * shuffle: a length-N vector.\n\n Its output is the rank-N tensor computed by permuting the dimensions\n of input according to shuffle.\n\n Args:\n op: A Transpose op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input and shuffle are incompatible.\n IndexError: If shuffle contains an index that is >= the rank of input.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(\n input_shape.ndims))\n transpose_vec = tensor_util.constant_value(op.inputs[1])\n if transpose_vec is None:\n return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]\n else:\n return [tensor_shape.TensorShape([input_shape[i]\n for i in transpose_vec.tolist()])]\n\n\[email protected](\"Split\")\ndef _SplitShape(op):\n \"\"\"Shape function for the Split op.\"\"\"\n split_dim = tensor_util.constant_value(op.inputs[0])\n num_split = len(op.outputs)\n input_shape = op.inputs[1].get_shape()\n if split_dim is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split\n else:\n split_dim = int(split_dim)\n input_shape = input_shape.with_rank_at_least(split_dim + 1)\n if not (input_shape[split_dim] % num_split).is_compatible_with(0):\n raise ValueError(\n \"Number of ways to split should evenly divide the split \"\n \"dimension but got split_dim %d (size = %d) and num_split %d\" %\n (split_dim, input_shape[split_dim].value, num_split))\n prefix = input_shape[:split_dim]\n size_in_split_dim = input_shape[split_dim] // num_split\n suffix = input_shape[split_dim + 1:]\n output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)\n return [output_shape] * num_split\n\n\[email protected](\"Tile\")\ndef _TileShape(op):\n \"\"\"Shape function for the Tile op.\n\n This op has two inputs:\n\n * input: A rank-N tensor.\n * multiples: A length-N vector, in which the i^th element contains\n the factor by which `input` will be tiled in the i^th dimension.\n\n It has one output, which has the same rank as input, and additional\n elements according to the values in multiples\n\n Args:\n op: A Tile Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)\n multiples = tensor_util.constant_value(op.inputs[1])\n if multiples is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n multiples = multiples.ravel()\n for i, dim in enumerate(input_shape.dims):\n output_dims.append(dim * multiples[i])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"TileGrad\")\ndef _TileGradShape(op):\n \"\"\"Shape function for the TileGrad op.\"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n multiples = tensor_util.constant_value(op.inputs[1])\n if multiples is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n output_dims.append(dim // multiples[i])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"Where\")\ndef _WhereShape(op):\n \"\"\"Shape function for the Where op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n return [tensor_shape.matrix(None, input_shape.ndims)]\n\n\[email protected](\"ZerosLike\")\ndef _ZerosLikeShape(op):\n \"\"\"Shape function for the ZerosLike op.\"\"\"\n return [op.inputs[0].get_shape()]\n\n\ndef edit_distance(hypothesis, truth, normalize=True, name=\"edit_distance\"):\n \"\"\"Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example, given the following input:\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"]\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n ```\n\n This operation would return the following:\n\n ```python\n # 'output' is a tensor of shape `[2, 2]` with edit distances normalized\n # by 'truth' lengths.\n output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n \"\"\"\n if not isinstance(hypothesis, ops.SparseTensor):\n raise TypeError(\"Hypothesis must be a SparseTensor\")\n if not isinstance(truth, ops.SparseTensor):\n raise TypeError(\"Truth must be a SparseTensor\")\n\n return gen_array_ops._edit_distance(hypothesis.indices,\n hypothesis.values,\n hypothesis.shape,\n truth.indices,\n truth.values,\n truth.shape,\n normalize=normalize,\n name=name)\n\n\[email protected](\"EditDistance\")\ndef _EditDistanceShape(op):\n \"\"\"Shape function for the EditDistance op.\"\"\"\n hypothesis_shape = tensor_util.constant_value(op.inputs[2])\n truth_shape = tensor_util.constant_value(op.inputs[5])\n if hypothesis_shape is not None and truth_shape is not None:\n if len(hypothesis_shape) != len(truth_shape):\n raise ValueError(\n \"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s\" %\n (str(hypothesis_shape), str(truth_shape)))\n return [tensor_shape.TensorShape(\n [max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]\n\n return [tensor_shape.unknown_shape()]\n\n\n# The remaining ops do not change the shape of their inputs.\[email protected](\"Quantize\")\[email protected](\"Dequantize\")\ndef _QuantizeDequantizeShape(op):\n unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())\n unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())\n return common_shapes.unchanged_shape(op)\n\n\[email protected](\"ExtractImagePatches\")\ndef _ExtractImagePatchesShape(op):\n \"\"\"Shape function for the ExtractImagePatches op.\n\n Args:\n op: An ExtractImagePatches op.\n\n Raises:\n ValueError: If the strides or padding are invalid.\n\n Returns:\n The shape of the op output.\n \"\"\"\n images_shape = op.inputs[0].get_shape().with_rank(4)\n batch = images_shape[0]\n in_rows = images_shape[1]\n in_cols = images_shape[2]\n in_depth = images_shape[3]\n\n ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr(\"ksizes\")\n if ksize_b != 1 or ksize_d != 1:\n raise ValueError(\"Current implementation does not yet support \"\n \"ksizes in the batch and depth dimensions.\")\n\n stride_b, stride_r, stride_c, stride_d = op.get_attr(\"strides\")\n if stride_b != 1 or stride_d != 1:\n raise ValueError(\"Current implementation does not yet support \"\n \"strides in the batch and depth dimensions.\")\n\n rate_b, rate_r, rate_c, rate_d = op.get_attr(\"rates\")\n if rate_b != 1 or rate_d != 1:\n raise ValueError(\"Current implementation does not yet support \"\n \"rates in the batch and depth dimensions.\")\n\n # Effective patch size, taking into account filter upsampling by rates.\n ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)\n ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)\n\n padding = op.get_attr(\"padding\")\n out_rows, out_cols = common_shapes.get2d_conv_output_size(in_rows, in_cols,\n ksize_r_eff,\n ksize_c_eff,\n stride_r, stride_c,\n padding)\n\n out_depth = None if in_depth is None else ksize_r * ksize_c * int(in_depth)\n output_shape = [batch, out_rows, out_cols, out_depth]\n\n return [tensor_shape.TensorShape(output_shape)]\n\n\[email protected](\"SpaceToBatch\")\ndef _SpaceToBatchShape(op):\n \"\"\"Shape function for the SpaceToBatch op.\n\n The output shape is determined by the following inputs/ attributes:\n\n * input: A rank-4 tensor with shape [B, H, W, D]\n * paddings: A 2-by-2 matrix, specified as follows:\n\n paddings = [[pad_top, pad_bottom], [pad_left, pad_right]],\n\n implying effective padded spatial dimensions:\n\n Hp = pad_top + H + pad_bottom\n Wp = pad_left + W + pad_right\n\n Both Hp and Wp must be multiples of block_size.\n * block_size: an int.\n\n Its output is also a rank-4 tensor with shape:\n\n [B*block_size*block_size, Hp/block_size, Wp/block_size, D]\n\n Args:\n op: A SpaceToBatch op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of inputs are not as expected.\n IndexError: If block_size does not divide Wp or Hp.\n \"\"\"\n # Check that the input tensor is 4-D.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.space_to_batch() requires 4-D input tensor.\")\n\n # Check that the paddings tensor is a matrix with shape [2, 2].\n try:\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n except ValueError:\n raise ValueError(\n \"tf.space_to_batch() requires 2-D paddings tensor.\")\n\n if paddings_shape[0] != 2 or paddings_shape[1] != 2:\n raise ValueError(\n \"tf.space_to_batch() requires input paddings with shape [2, 2].\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is not None:\n if (paddings[0, 0] < 0 or paddings[0, 1] < 0 or\n paddings[1, 0] < 0 or paddings[1, 1] < 0):\n raise ValueError(\"paddings cannot be negative.\")\n\n input_height = input_shape[1] + paddings[0, 0] + paddings[0, 1]\n input_width = input_shape[2] + paddings[1, 0] + paddings[1, 1]\n\n if input_height % block_size > 0 or input_width % block_size > 0:\n raise IndexError(\"block_size needs to divide both width and height.\")\n else:\n input_height = tensor_shape.Dimension(None)\n input_width = tensor_shape.Dimension(None)\n\n batch = input_shape[0] * block_size * block_size\n height = input_height // block_size\n width = input_width // block_size\n depth = input_shape[3]\n\n return [tensor_shape.TensorShape([batch, height, width, depth])]\n\n\[email protected](\"BatchToSpace\")\ndef _BatchToSpaceShape(op):\n \"\"\"Shape function for the BatchToSpace op.\n\n The output shape is determined by the following inputs/ attributes:\n\n * input: A rank-4 tensor with shape\n\n [B*block_size*block_size, Hp/block_size, Wp/block_size, D]\n\n Note that the batch size of the input tensor must be divisible by\n `block_size * block_size`.\n * crops: A 2-by-2 matrix, specified as follows:\n\n crops = [[crop_top, crop_bottom], [crop_left, crop_right]].\n\n * block_size: an int.\n\n Its output is also a rank-4 tensor with shape [B, H, W, D], where:\n\n H = Hp - crop_top - crop_bottom\n W = Wp - crop_left - crop_right\n\n Args:\n op: A BatchToSpace op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of the inputs are not as expected.\n IndexError: If block_size*block_size does not divide the input batch size.\n \"\"\"\n # Check that the input tensor is 4-D.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\"tf.batch_to_space() requires 4-D input tensor.\")\n\n # Check that the crops tensor is a matrix with shape [2, 2].\n try:\n crops_shape = op.inputs[1].get_shape().with_rank(2)\n except ValueError:\n raise ValueError(\n \"tf.space_to_batch() requires 2-D crops tensor.\")\n\n if crops_shape[0] != 2 or crops_shape[1] != 2:\n raise ValueError(\n \"tf.space_to_batch() requires input crops with shape [2, 2].\")\n\n crops = tensor_util.constant_value(op.inputs[1])\n if (crops is not None and\n (crops[0, 0] < 0 or crops[0, 1] < 0 or\n crops[1, 0] < 0 or crops[1, 1] < 0)):\n raise ValueError(\"crops cannot be negative.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_batch = input_shape[0]\n if input_batch % (block_size * block_size) > 0:\n raise IndexError(\"input batch must be divisible by block_size*block_size.\")\n batch = input_batch // (block_size * block_size)\n\n if crops is not None:\n height = input_shape[1] * block_size - crops[0, 0] - crops[0, 1]\n width = input_shape[2] * block_size - crops[1, 0] - crops[1, 1]\n if height <= 0 or width <= 0:\n raise ValueError(\"Output height or width is not positive.\")\n else:\n height = tensor_shape.Dimension(None)\n width = tensor_shape.Dimension(None)\n depth = input_shape[3]\n\n return [tensor_shape.TensorShape([batch, height, width, depth])]\n\n\[email protected](\"SpaceToDepth\")\ndef _SpaceToDepthShape(op):\n \"\"\"Shape function for the SpaceToDepth op.\n\n This op takes two inputs:\n\n * input: a tensor of shape like that [B, H, W, D]\n * block_size: an int.\n\n Its output is the same-rank tensor but with changed\n dimensions like that: [B, H/block_size, W/block_size, D*block_size*block_size]\n\n Args:\n op: A SpaceToDepth op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input are not as expected.\n IndexError: If block_size does not divide W or H.\n \"\"\"\n # Check that the input tensor is of 4 dimensions.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.space_to_depth() requires tensors with exactly 4 dimensions.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_height = input_shape[1]\n input_width = input_shape[2]\n\n if (input_width % block_size > 0) or (input_height % block_size > 0):\n raise IndexError(\n \"block_size needs to divide both width and height.\")\n\n width = input_width // block_size\n height = input_height // block_size\n new_depth = input_shape[3] * block_size * block_size\n\n return [tensor_shape.TensorShape(\n [input_shape[0], height, width, new_depth])]\n\n\[email protected](\"DepthToSpace\")\ndef _DepthToSpaceShape(op):\n \"\"\"Shape function for the DepthToSpace op.\n\n This op takes two inputs:\n\n * input: a tensor of shape like that [B, H, W, D]\n * block_size: an int.\n\n Its output is the same-rank tensor but with changed\n dimensions like that:\n [B, H*block_size, W*block_size, D/(block_size*block_size)]\n\n Args:\n op: A DepthToSpace op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input are not as expected.\n IndexError: If block_size*block_size does not divide D.\n \"\"\"\n # Check that the input tensor is of 4 dimensions.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.depth_to_space() requires tensors with exactly 4 dimensions.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_height = input_shape[1]\n input_width = input_shape[2]\n input_depth = input_shape[3]\n\n width = input_width * block_size\n height = input_height * block_size\n\n if input_depth % (block_size * block_size) > 0:\n raise IndexError(\n \"block_size*block_size needs to divide the input depth.\")\n\n new_depth = input_depth // (block_size * block_size)\n return [tensor_shape.TensorShape(\n [input_shape[0], height, width, new_depth])]\n\n\ndef one_hot(indices, depth, on_value=None, off_value=None,\n axis=None, dtype=None, name=None):\n \"\"\"Returns a one-hot tensor.\n\n The locations represented by indices in `indices` take value `on_value`,\n while all other locations take value `off_value`. \n\n `on_value` and `off_value` must have matching data types. If `dtype` is also\n provided, they must be the same data type as specified by `dtype`.\n\n If `on_value` is not provided, it will default to the value `1` with type \n `dtype`\n\n If `off_value` is not provided, it will default to the value `0` with type \n `dtype`\n\n If the input `indices` is rank `N`, the output will have rank `N+1`. The\n new axis is created at dimension `axis` (default: the new axis is appended\n at the end).\n\n If `indices` is a scalar the output shape will be a vector of length `depth`\n\n If `indices` is a vector of length `features`, the output shape will be:\n ```\n features x depth if axis == -1\n depth x features if axis == 0\n ```\n\n If `indices` is a matrix (batch) with shape `[batch, features]`, the output\n shape will be:\n ```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n ```\n\n If `dtype` is not provided, it will attempt to assume the data type of \n `on_value` or `off_value`, if one or both are passed in. If none of \n `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the \n value `tf.float32`\n\n Note: If a non-numeric data type output is desired (tf.string, tf.bool, etc.),\n both `on_value` and `off_value` _must_ be provided to `one_hot`\n\n Examples\n =========\n\n Suppose that\n\n ```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 5.0\n off_value = 0.0\n axis = -1\n ```\n\n Then output is `[4 x 3]`:\n\n ```\n output =\n [5.0 0.0 0.0] // one_hot(0)\n [0.0 0.0 5.0] // one_hot(2)\n [0.0 0.0 0.0] // one_hot(-1)\n [0.0 5.0 0.0] // one_hot(1)\n ```\n\n Suppose that\n\n ```\n indices = [[0, 2], [1, -1]]\n depth = 3\n on_value = 1.0\n off_value = 0.0\n axis = -1\n ```\n\n Then output is `[2 x 2 x 3]`:\n\n ```\n output =\n [\n [1.0, 0.0, 0.0] // one_hot(0)\n [0.0, 0.0, 1.0] // one_hot(2)\n ][\n [0.0, 1.0, 0.0] // one_hot(1)\n [0.0, 0.0, 0.0] // one_hot(-1)\n ]\n ```\n\n Using default values for `on_value` and `off_value`:\n\n ```\n indices = [0, 1, 2]\n depth = 3\n ```\n\n The output will be\n\n ```\n output = \n [[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]]\n ```\n\n Args:\n indices: A `Tensor` of indices.\n depth: A scalar defining the depth of the one hot dimension.\n on_value: A scalar defining the value to fill in output when `indices[j]\n = i`. (default: 1)\n off_value: A scalar defining the value to fill in output when `indices[j]\n != i`. (default: 0)\n axis: The axis to fill (default: -1, a new inner-most axis).\n dtype: The data type of the output tensor.\n\n Returns:\n output: The one-hot tensor.\n\n Raises:\n TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`\n TypeError: If dtype of `on_value` and `off_value` don't match one another\n \"\"\"\n with ops.op_scope([indices, depth, on_value, off_value,\n axis, dtype], name, \"one_hot\") as name:\n on_exists = on_value is not None\n off_exists = off_value is not None\n\n on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \\\n else None\n off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\\\n else None\n\n if on_exists or off_exists:\n if dtype is not None:\n # Ensure provided on_value and/or off_value match dtype\n if (on_exists and on_dtype != dtype):\n raise TypeError(\"dtype {0} of on_value does not match \" \\\n \"dtype parameter {1}\".format(on_dtype, dtype))\n if (off_exists and off_dtype != dtype):\n raise TypeError(\"dtype {0} of off_value does not match \" \\\n \"dtype parameter {1}\".format(off_dtype, dtype))\n else:\n # dtype not provided: automatically assign it\n dtype = on_dtype if on_exists else off_dtype\n elif dtype is None:\n # None of on_value, off_value, or dtype provided. Default dtype to float32\n dtype = dtypes.float32\n\n if not on_exists:\n # on_value not provided: assign to value 1 of type dtype\n on_value = ops.convert_to_tensor(1, dtype, name=\"on_value\")\n on_dtype = dtype\n if not off_exists:\n # off_value not provided: assign to value 0 of type dtype\n off_value = ops.convert_to_tensor(0, dtype, name=\"off_value\")\n off_dtype = dtype\n\n if on_dtype != off_dtype:\n raise TypeError(\"dtype {0} of on_value does not match \" \\\n \"dtype {1} of off_value\".format(on_dtype, off_dtype))\n\n return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,\n name)\n\n\[email protected](\"OneHot\")\ndef _OneHotShape(op):\n \"\"\"Shape function for the OneHot op.\n\n It closely follows the code in the .cc implementation.\n\n Args:\n op: A OneHot Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: if axis < -1.\n \"\"\"\n indices_shape = op.inputs[0].get_shape()\n indices_dims = indices_shape.ndims\n depth = tensor_util.constant_value(op.inputs[1])\n axis = op.get_attr(\"axis\")\n\n if axis < -1:\n raise ValueError(\"axis must be >= -1\")\n\n new_shape = None\n if indices_dims is not None:\n new_shape = indices_shape.as_list()\n new_shape.insert(axis % (indices_dims + 1), depth)\n\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"PlaceholderWithDefault\")\ndef _PlaceholderWithDefaultShape(op):\n \"\"\"Shape function for the PlaceholderWithDefault op.\n\n This op acts as an identity when it is not fed (passing through a\n default value), but allows the user to feed it with tensors of a\n possibly less precise shape than its default value.\n\n Args:\n op: A PlaceholderWithDefault `Operation`.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n output_shape = tensor_shape.TensorShape(op.get_attr(\"shape\"))\n # NOTE(mrry): We don't merge these shapes, because `output_shape`\n # may be *less* precise than `input_shape`.\n input_shape.assert_is_compatible_with(output_shape)\n return [output_shape]\n"
] |
[
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.ops.gen_math_ops._range",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.gen_array_ops._mirror_pad",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.framework.tensor_shape.matrix",
"tensorflow.python.ops.gen_array_ops._pad",
"tensorflow.python.framework.ops.is_dense_tensor_like",
"tensorflow.python.ops.gen_array_ops._placeholder",
"tensorflow.python.ops.gen_array_ops.strided_slice",
"numpy.reshape",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.gen_array_ops.rank",
"tensorflow.python.ops.gen_array_ops._zeros_like",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.gen_array_ops._split",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.framework.ops.register_tensor_conversion_function",
"tensorflow.python.framework.common_shapes.unchanged_shape",
"tensorflow.python.ops.gen_array_ops._concat",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.ops.gen_array_ops._slice",
"tensorflow.python.framework.common_shapes.get2d_conv_output_size",
"tensorflow.python.ops.gen_array_ops._pack",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.Tensor._override_operator",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.ops.gen_array_ops.transpose",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.ops.gen_array_ops._edit_distance",
"tensorflow.python.ops.gen_array_ops._one_hot",
"tensorflow.python.framework.tensor_shape.vector",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_array_ops.shape",
"tensorflow.python.ops.gen_array_ops._unpack",
"numpy.prod",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.gen_array_ops.size",
"tensorflow.python.framework.constant_op.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
LiuChaoXD/Remote-Sensing-Image-Retrieval-Models
|
[
"c135562263102080716e35260f111dcff7762264"
] |
[
"Deep-Hash-learning-for-Remote-Sensing-Image-Retrieval/codes.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom models.Net import AlexNet, Uniform_D\nfrom dataset.customData import MyCustomDataset\nfrom loss.contrast import Contrast_Loss, Quantization_Loss\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom utils.LoadWeights import load_preweights\nfrom utils.generateUniformData import generate_binary_distribution\nfrom tqdm import tqdm\nimport os\nimport argparse\n\n\ndef extract(args):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n transform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n trainset = MyCustomDataset(root_path=args.img_tr, transform=transform)\n testset = MyCustomDataset(root_path=args.img_te, transform=transform)\n trainloader = DataLoader(trainset,\n batch_size=args.batchsize,\n shuffle=False)\n testloader = DataLoader(testset, batch_size=args.batchsize, shuffle=False)\n\n G = AlexNet(num_classes=args.label_dim, Kbits=args.Kbits)\n\n G = G.cuda().float()\n G.eval()\n # parameters path\n G.load_state_dict(torch.load(args.parameters + \"/G.ckpt\"))\n print(\"sucessfully load the G parameters.\")\n code_path = args.codes_dir\n print(\"code path is : \" + str(code_path))\n if os.path.exists(code_path) is False:\n os.makedirs(code_path)\n\n traincodes, testcodes = [], []\n for batch_idx, (data, target) in enumerate(trainloader):\n data, target = data.cuda(), target.cuda()\n _, _, codes = G(data)\n codes = codes.cpu().detach().numpy()\n traincodes.extend(codes)\n\n for batch_idx, (data, target) in enumerate(testloader):\n data, target = data.cuda(), target.cuda()\n _, _, codes = G(data)\n codes = codes.cpu().detach().numpy()\n testcodes.extend(codes)\n\n traincodes, testcodes = (np.array(traincodes) >\n 0.5) / 1, (np.array(testcodes) > 0.5) / 1\n\n # generate training codes and features\n np.save(code_path + \"/traincodes.npy\", traincodes)\n print(\"sucessfully generate train codes\")\n\n # generate testing codes and feautures\n np.save(code_path + \"/testcodes.npy\", testcodes)\n print(\"sucessfully generate test codes\")\n"
] |
[
[
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.save",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ropg/SARveillance
|
[
"94c0b348df4fc5b9ee532aadfe3514e105441a74",
"94c0b348df4fc5b9ee532aadfe3514e105441a74"
] |
[
"sarveillance/webapp.py",
"sarveillance/utils.py"
] |
[
"import streamlit as st\nimport os\nimport sys\nimport ee\nimport sys\nimport base64\nimport geemap as gee\nfrom geemap import cartoee\nimport pandas as pd\nfrom utils import new_get_image_collection_gif\n\nst.title(\"SARveillance\")\nst.subheader(\"Sentinel-1 SAR time series analysis for OSINT use\")\n\n\nclass SAREXPLORER:\n\n north_arrow_dict1 = {\n \"text\": \"N\",\n \"xy\": (0.1, 0.3),\n \"arrow_length\": 0.15,\n \"text_color\": \"white\",\n \"arrow_color\": \"white\",\n \"fontsize\": 20,\n \"width\": 5,\n \"headwidth\": 15,\n \"ha\": \"center\",\n \"va\": \"center\",\n }\n\n def __init__(self):\n self.gee = gee\n self.bases = []\n self.col_final = None\n self.dirname = os.path.dirname(__file__)\n self.outpath = self.dirname + \"/Data/\"\n\n def run(self):\n self.auth()\n self.get_bases()\n self.get_collection()\n with st.spinner(\"Loading timeseries... this may take a couple of minutes\"):\n self.create_imagery()\n st.success(\"Done!\")\n self.display_gif()\n self.show_download()\n\n def auth(self):\n # self.gee.ee.Authenticate()\n self.gee.ee_initialize()\n\n def get_bases(self):\n self.bases = pd.read_csv(\"bases_df.csv\")\n\n def get_collection(self):\n collection = ee.ImageCollection(\"COPERNICUS/S1_GRD\")\n collection_both = collection.filter(\n ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VV\")\n ).filter(ee.Filter.eq(\"instrumentMode\", \"IW\"))\n composite_col = collection_both.map(\n lambda image: image.select(\"VH\")\n .subtract(image.select(\"VH\"))\n .rename(\"VH-VV\")\n )\n self.col_final = collection_both.map(self.band_adder)\n\n def band_adder(self, image):\n vh_vv = image.select(\"VH\").subtract(image.select(\"VH\")).rename(\"VH-VV\")\n return image.addBands(vh_vv)\n\n def generate_base_aoi(self, base_name):\n if base_name == \"Custom Location\":\n latitude = custom_lat\n longitude = custom_lon\n else:\n base_gdf = self.bases.loc[self.bases.Name == base_name]\n latitude = base_gdf.iloc[0][\"lat\"]\n longitude = base_gdf.iloc[0][\"lon\"]\n base_point = ee.Geometry.Point([float(longitude), float(latitude)])\n base_buffer = base_point.buffer(3000)\n base_bounds = base_buffer.bounds()\n return base_bounds\n\n def get_filtered_col(self, col, base_name):\n base_aoi = self.generate_base_aoi(base_name)\n filtered_col = col.filterBounds(base_aoi)\n clipped_col = filtered_col.map(lambda image: image.clip(base_aoi))\n return clipped_col\n\n def generate_timeseries_gif(self, base_name, start_date, end_date, outpath):\n col_final_recent = self.col_final.filterDate(\n start_date, end_date\n ) # .sort(\"system:time_start\")\n col_filtered = self.get_filtered_col(col_final_recent, base_name).sort(\n \"system:time_start\"\n )\n aoi = self.generate_base_aoi(base_name)\n minmax = col_filtered.first().reduceRegion(ee.Reducer.minMax(), aoi)\n max = minmax.getNumber(\"VV_max\").getInfo()\n min = minmax.getNumber(\"VV_min\").getInfo()\n if base_name == \"Custom Location\":\n lat = float(custom_lat)\n lon = float(custom_lon)\n else:\n base_gdf = self.bases.loc[self.bases.Name == base_name]\n lat = base_gdf.iloc[0][\"lat\"]\n lon = base_gdf.iloc[0][\"lon\"]\n w = 0.4\n h = 0.4\n region = [lon + w, lat - h, lon - w, lat + h]\n out_dir = os.path.expanduser(outpath)\n filename = base_name + \".gif\"\n out_gif = os.path.join(out_dir, filename)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n visParams = {\n \"bands\": [\"VV\", \"VH\", \"VH-VV\"],\n \"min\": min,\n \"max\": max,\n \"dimensions\": 500,\n \"framesPerSecond\": 2,\n \"region\": aoi,\n \"crs\": \"EPSG:32637\",\n }\n return cartoee.get_image_collection_gif(\n ee_ic=col_filtered, # .sort(\"system:time_start\"),\n out_dir=os.path.expanduser(outpath + \"BaseTimeseries/\" + base_name + \"/\"),\n out_gif=base_name + \".gif\",\n vis_params=visParams,\n region=region,\n fps=2,\n mp4=False,\n grid_interval=(0.2, 0.2),\n plot_title=base_name,\n date_format=\"YYYY-MM-dd\",\n fig_size=(10, 10),\n dpi_plot=100,\n file_format=\"png\",\n north_arrow_dict=self.north_arrow_dict1,\n verbose=True,\n )\n\n def create_imagery(self):\n base_name_list = self.bases[\"Name\"].tolist()\n self.generate_timeseries_gif(base_name, start_date, end_date, self.outpath)\n\n def display_gif(self):\n gif_loc = os.path.expanduser(\n self.outpath + \"BaseTimeseries/\" + base_name + \"/\" + base_name + \".gif\"\n )\n file_ = open(gif_loc, \"rb\")\n contents = file_.read()\n data_url = base64.b64encode(contents).decode(\"utf-8\")\n file_.close()\n st.markdown(\n f'<img align=\"left\" width=\"704\" height=\"704\" src=\"data:image/gif;base64,{data_url}\" alt=\"Base Timeseries\">',\n unsafe_allow_html=True,\n )\n\n def show_download(self):\n gif_loc = os.path.expanduser(\n self.outpath + \"BaseTimeseries/\" + base_name + \"/\" + base_name + \".gif\"\n )\n with open(gif_loc, \"rb\") as file:\n btn = st.download_button(\n label=\"Download image\",\n data=file,\n file_name=\"timeseries.gif\",\n mime=\"image/gif\",\n )\n\n\nif __name__ == \"__main__\":\n base_name = st.selectbox(\n \"Which location would you like to examine?\",\n (\n \"Custom Location\",\n \"Lesnovka\",\n \"Klintsy\",\n \"Unecha\",\n \"Klimovo Air Base\",\n \"Yelnya\",\n \"Kursk\",\n \"Pogonovo training ground\",\n \"Valuyki\",\n \"Soloti\",\n \"Opuk\",\n \"Bakhchysarai\",\n \"Novoozerne\",\n \"Dzhankoi\",\n \"Novorossiysk\",\n \"Raevskaya\",\n ),\n )\n st.write(\"You selected:\", base_name)\n if base_name == \"Custom Location\":\n custom_lat = st.text_input(\"Select Latitude\", \"\")\n custom_lon = st.text_input(\"Select Longitude\", \"\")\n start_date = st.text_input(\"Start Date - use format YYYY-MM-DD\", \"2021-11-01\")\n end_date = st.text_input(\"End Date - use format YYYY-MM-DD\", \"2022-01-10\")\n cartoee.get_image_collection_gif = new_get_image_collection_gif\n sar = SAREXPLORER()\n if st.button(\"Generate SAR Timeseries\"):\n sar.run()\n",
"import os\nimport subprocess\nimport ee\nfrom geemap import png_to_gif\nimport matplotlib.pyplot as plt\nfrom geemap.cartoee import get_map, add_gridlines, add_scale_bar_lite, add_north_arrow\n\n\ndef new_get_image_collection_gif(\n ee_ic,\n out_dir,\n out_gif,\n vis_params,\n region,\n cmap=None,\n proj=None,\n fps=10,\n mp4=False,\n grid_interval=None,\n plot_title=\"\",\n date_format=\"YYYY-MM-dd\",\n fig_size=(10, 10),\n dpi_plot=100,\n file_format=\"png\",\n north_arrow_dict={},\n scale_bar_dict={},\n verbose=True,\n):\n \"\"\"Download all the images in an image collection and use them to generate a gif/video.\n Args:\n ee_ic (object): ee.ImageCollection\n out_dir (str): The output directory of images and video.\n out_gif (str): The name of the gif file.\n vis_params (dict): Visualization parameters as a dictionary.\n region (list | tuple): Geospatial region of the image to render in format [E,S,W,N].\n fps (int, optional): Video frames per second. Defaults to 10.\n mp4 (bool, optional): Whether to create mp4 video.\n grid_interval (float | tuple[float]): Float specifying an interval at which to create gridlines, units are decimal degrees. lists will be interpreted a (x_interval, y_interval), such as (0.1, 0.1). Defaults to None.\n plot_title (str): Plot title. Defaults to \"\".\n date_format (str, optional): A pattern, as described at http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. Defaults to \"YYYY-MM-dd\".\n fig_size (tuple, optional): Size of the figure.\n dpi_plot (int, optional): The resolution in dots per inch of the plot.\n file_format (str, optional): Either 'png' or 'jpg'.\n north_arrow_dict (dict, optional): Parameters for the north arrow. See https://geemap.org/cartoee/#geemap.cartoee.add_north_arrow. Defaults to {}.\n scale_bar_dict (dict, optional): Parameters for the scale bar. See https://geemap.org/cartoee/#geemap.cartoee.add_scale_bar. Defaults. to {}.\n verbose (bool, optional): Whether or not to print text when the program is running. Defaults to True.\n \"\"\"\n\n # from .geemap import png_to_gif\n\n out_dir = os.path.abspath(out_dir)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n out_gif = os.path.join(out_dir, out_gif)\n\n count = int(ee_ic.size().getInfo())\n names = ee_ic.aggregate_array(\"system:index\").getInfo()\n images = ee_ic.toList(count)\n\n dates = ee_ic.aggregate_array(\"system:time_start\")\n dates = dates.map(lambda d: ee.Date(d).format(date_format)).getInfo()\n\n # list of file name\n img_list = []\n\n for i, date in enumerate(dates):\n image = ee.Image(images.get(i))\n name = str(names[i])\n # name = name + \".\" + file_format\n name = str(i).zfill(3) + \"_\" + name + \".\" + file_format\n out_img = os.path.join(out_dir, name)\n img_list.append(out_img)\n\n if verbose:\n print(f\"Downloading {i+1}/{count}: {name} ...\")\n\n # Size plot\n plt.figure(figsize=fig_size)\n\n # Plot image\n ax = get_map(image, region=region, vis_params=vis_params, cmap=cmap, proj=proj)\n\n # Add grid\n if grid_interval is not None:\n add_gridlines(ax, interval=grid_interval, linestyle=\":\")\n\n # Add title\n if len(plot_title) > 0:\n ax.set_title(label=plot_title + \" \" + date + \"\\n\", fontsize=15)\n\n # Add scale bar\n if len(scale_bar_dict) > 0:\n add_scale_bar_lite(ax, **scale_bar_dict)\n # Add north arrow\n if len(north_arrow_dict) > 0:\n add_north_arrow(ax, **north_arrow_dict)\n\n # Save plot\n plt.savefig(fname=out_img, dpi=dpi_plot)\n\n plt.clf()\n plt.close()\n\n out_gif = os.path.abspath(out_gif)\n png_to_gif(out_dir, out_gif, fps)\n if verbose:\n print(f\"GIF saved to {out_gif}\")\n\n if mp4:\n\n video_filename = out_gif.replace(\".gif\", \".mp4\")\n\n try:\n import cv2\n except ImportError:\n print(\"Installing opencv-python ...\")\n subprocess.check_call([\"python\", \"-m\", \"pip\", \"install\", \"opencv-python\"])\n import cv2\n\n # Video file name\n output_video_file_name = os.path.join(out_dir, video_filename)\n\n frame = cv2.imread(img_list[0])\n height, width, _ = frame.shape\n frame_size = (width, height)\n fps_video = fps\n\n # Make mp4\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n\n # Function\n def convert_frames_to_video(\n input_list, output_video_file_name, fps_video, frame_size\n ):\n\n \"\"\"Convert frames to video\n Args:\n input_list (list): Downloaded Image Name List.\n output_video_file_name (str): The name of the video file in the image directory.\n fps_video (int): Video frames per second.\n frame_size (tuple): Frame size.\n \"\"\"\n out = cv2.VideoWriter(output_video_file_name, fourcc, fps_video, frame_size)\n num_frames = len(input_list)\n\n for i in range(num_frames):\n img_path = input_list[i]\n img = cv2.imread(img_path)\n out.write(img)\n\n out.release()\n cv2.destroyAllWindows()\n\n # Use function\n convert_frames_to_video(\n input_list=img_list,\n output_video_file_name=output_video_file_name,\n fps_video=fps_video,\n frame_size=frame_size,\n )\n\n if verbose:\n print(f\"MP4 saved to {output_video_file_name}\")\n"
] |
[
[
"pandas.read_csv"
],
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fding253/nxviz
|
[
"e52fa46a751196af9b4c4833ff7529fdda2bcaf7"
] |
[
"examples/matrix/barbell.py"
] |
[
"\"\"\"\nDisplays a NetworkX barbell graph to screen using a CircosPlot.\n\nFeatures of this example:\n- MatrixPlot\n- Styling matrix plot with different colormap.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom nxviz.plots import MatrixPlot\n\nG = nx.barbell_graph(m1=10, m2=3)\n\n# Instantiate a MatrixPlot with no custom styling.\nm = MatrixPlot(G)\n\n# Change the cmap prior to drawing.\nm.cmap = plt.cm.get_cmap(\"Greens\")\nm.draw()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.cm.get_cmap"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
staketd/prom_prog_submodule_main
|
[
"f6dfa5984c1b79e989686d7db124eb529eac67ad"
] |
[
"tensorflow/python/keras/engine/training.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training-related part of the Keras engine.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.distribute import distribute_coordinator as dc\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras.distribute import distributed_training_utils\nfrom tensorflow.python.keras.engine import network\nfrom tensorflow.python.keras.engine import training_arrays\nfrom tensorflow.python.keras.engine import training_distributed\nfrom tensorflow.python.keras.engine import training_eager\nfrom tensorflow.python.keras.engine import training_generator\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer\nfrom tensorflow.python.keras.saving import saving_utils\nfrom tensorflow.python.keras.utils import data_utils\nfrom tensorflow.python.keras.utils import losses_utils\nfrom tensorflow.python.keras.utils.generic_utils import slice_arrays\nfrom tensorflow.python.keras.utils.mode_keys import ModeKeys\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import keras_export\n\n_keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras',\n 'keras api usage', 'method')\n\n\n@keras_export('keras.models.Model', 'keras.Model')\nclass Model(network.Network):\n \"\"\"`Model` groups layers into an object with training and inference features.\n\n There are two ways to instantiate a `Model`:\n\n 1 - With the \"functional API\", where you start from `Input`,\n you chain layer calls to specify the model's forward pass,\n and finally you create your model from inputs and outputs:\n\n ```python\n import tensorflow as tf\n\n inputs = tf.keras.Input(shape=(3,))\n x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)\n outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n ```\n\n 2 - By subclassing the `Model` class: in that case, you should define your\n layers in `__init__` and you should implement the model's forward pass\n in `call`.\n\n ```python\n import tensorflow as tf\n\n class MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n\n def call(self, inputs):\n x = self.dense1(inputs)\n return self.dense2(x)\n\n model = MyModel()\n ```\n\n If you subclass `Model`, you can optionally have\n a `training` argument (boolean) in `call`, which you can use to specify\n a different behavior in training and inference:\n\n ```python\n import tensorflow as tf\n\n class MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n self.dropout = tf.keras.layers.Dropout(0.5)\n\n def call(self, inputs, training=False):\n x = self.dense1(inputs)\n if training:\n x = self.dropout(x, training=training)\n return self.dense2(x)\n\n model = MyModel()\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__(*args, **kwargs)\n # initializing _distribution_strategy here since it is possible to call\n # predict on a model without compiling it.\n self._distribution_strategy = None\n # This flag is used to track if the user is using the deprecated path of\n # passing distribution strategy to compile rather than creating the model\n # under distribution strategy scope.\n self._compile_distribution = False\n\n self._run_eagerly = None\n\n # The epoch at which the checkpoint is saved. Used for fault-tolerance.\n # See `_maybe_load_initial_epoch_from_ckpt()` for more information.\n self._ckpt_saved_epoch = None\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n Returns:\n A flat list of Numpy arrays.\n \"\"\"\n if self._distribution_strategy:\n with self._distribution_strategy.scope():\n return super(Model, self).get_weights()\n return super(Model, self).get_weights()\n\n def load_weights(self, filepath, by_name=False):\n \"\"\"Loads all layer weights, either from a TensorFlow or an HDF5 file.\"\"\"\n if distributed_training_utils.is_tpu_strategy(self._distribution_strategy):\n if (self._distribution_strategy.extended.steps_per_run > 1 and\n (not network._is_hdf5_filepath(filepath))): # pylint: disable=protected-access\n raise ValueError('Load weights is not yet supported with TPUStrategy '\n 'with steps_per_run greater than 1.')\n return super(Model, self).load_weights(filepath, by_name)\n\n @trackable.no_automatic_dependency_tracking\n def compile(self,\n optimizer,\n loss=None,\n metrics=None,\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None,\n distribute=None,\n **kwargs):\n \"\"\"Configures the model for training.\n\n Arguments:\n optimizer: String (name of optimizer) or optimizer instance.\n See `tf.keras.optimizers`.\n loss: String (name of objective function), objective function or\n `tf.losses.Loss` instance. See `tf.losses`. If the model has\n multiple outputs, you can use a different loss on each output by\n passing a dictionary or a list of losses. The loss value that will\n be minimized by the model will then be the sum of all individual\n losses.\n metrics: List of metrics to be evaluated by the model during training\n and testing. Typically you will use `metrics=['accuracy']`.\n To specify different metrics for different outputs of a\n multi-output model, you could also pass a dictionary, such as\n `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.\n You can also pass a list (len = len(outputs)) of lists of metrics\n such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or\n `metrics=['accuracy', ['accuracy', 'mse']]`.\n loss_weights: Optional list or dictionary specifying scalar\n coefficients (Python floats) to weight the loss contributions\n of different model outputs.\n The loss value that will be minimized by the model\n will then be the *weighted sum* of all individual losses,\n weighted by the `loss_weights` coefficients.\n If a list, it is expected to have a 1:1 mapping\n to the model's outputs. If a tensor, it is expected to map\n output names (strings) to scalar coefficients.\n sample_weight_mode: If you need to do timestep-wise\n sample weighting (2D weights), set this to `\"temporal\"`.\n `None` defaults to sample-wise weights (1D).\n If the model has multiple outputs, you can use a different\n `sample_weight_mode` on each output by passing a\n dictionary or a list of modes.\n weighted_metrics: List of metrics to be evaluated and weighted\n by sample_weight or class_weight during training and testing.\n target_tensors: By default, Keras will create placeholders for the\n model's target, which will be fed with the target data during\n training. If instead you would like to use your own\n target tensors (in turn, Keras will not expect external\n Numpy data for these targets at training time), you\n can specify them via the `target_tensors` argument. It can be\n a single tensor (for a single-output model), a list of tensors,\n or a dict mapping output names to target tensors.\n distribute: NOT SUPPORTED IN TF 2.0, please create and compile the\n model under distribution strategy scope instead of passing it to\n compile.\n **kwargs: Any additional arguments.\n\n Raises:\n ValueError: In case of invalid arguments for\n `optimizer`, `loss`, `metrics` or `sample_weight_mode`.\n \"\"\"\n _keras_api_gauge.get_cell('compile').set(True)\n self._run_eagerly = kwargs.pop('run_eagerly', None)\n\n if distribute is not None:\n if tf2.enabled():\n raise ValueError(\n 'Distribute argument in compile is not available in TF 2.0 please '\n 'create the model under the distribution strategy scope.')\n logging.warning('Distribute argument in compile is deprecated please '\n 'create the model under the distribution strategy scope.')\n self._distribution_strategy = distribute\n self._compile_distribution = True\n else:\n if distribution_strategy_context.has_strategy():\n # When the user builds the model in the DS scope and cross replica\n # context we want distribution strategy to be set but when building the\n # replica copies of the models internally we should not be compiling\n # with distribution strategy and use the default compilation path.\n if distribution_strategy_context.in_cross_replica_context():\n self._distribution_strategy = (\n distribution_strategy_context.get_strategy())\n\n # Check whether the experimental feature of distributing the Model without\n # cloning is requested.\n # TODO(b/124517980, b/124377929): Remove this temporary undocumented way\n # of enabling the feature and graduate it to the main distributed code path.\n self._cloning = kwargs.pop('cloning', False)\n\n self._validate_compile_param_for_distribution_strategy(self.run_eagerly,\n sample_weight_mode,\n target_tensors,\n weighted_metrics)\n self.optimizer = optimizers.get(optimizer)\n # We've disabled automatic dependency tracking for this method, but do want\n # to add a checkpoint dependency on the optimizer if it's trackable.\n if isinstance(self.optimizer, trackable.Trackable):\n self._track_trackable(\n self.optimizer, name='optimizer', overwrite=True)\n self.loss = loss or {}\n self.loss_weights = loss_weights\n self.sample_weight_mode = sample_weight_mode\n self._compile_metrics = metrics or []\n self._compile_weighted_metrics = weighted_metrics\n if self.run_eagerly and target_tensors is not None:\n raise ValueError(\n 'target_tensors argument is not supported when '\n 'running a model eagerly.')\n\n # _training_endpoints contains a list of _TrainingEndpoint object, which has\n # all the model output/target/loss and related metadata.\n self._training_endpoints = []\n\n # Set tf.distribute.Strategy specific parameters.\n self._distributed_model_cache = {}\n self._distributed_function_cache = {}\n\n if (not context.executing_eagerly() and\n self._distribution_strategy is not None):\n # Ensures a Session is created and configured correctly for Distribution\n # Strategy.\n K.configure_and_create_distributed_session(self._distribution_strategy)\n # Initialize model metric attributes.\n self._init_metric_attributes()\n if not self.built or not self.inputs or not self.outputs:\n # Model is not compilable because it does not know its number of inputs\n # and outputs, nor their shapes and names. We will compile after the first\n # time the model gets called on training data.\n return\n self._is_compiled = True\n\n # Prepare list of loss functions, same size of model outputs.\n self.loss_functions = training_utils.prepare_loss_functions(\n self.loss, self.output_names)\n\n target_tensors = self._process_target_tensor_for_compile(target_tensors)\n\n for o, n, l, t in zip(self.outputs, self.output_names,\n self.loss_functions, target_tensors):\n endpoint = _TrainingEndpoint(o, n, l)\n endpoint.create_training_target(t, run_eagerly=self.run_eagerly)\n self._training_endpoints.append(endpoint)\n\n # Prepare list loss weights, same size of model outputs.\n training_utils.prepare_loss_weights(self._training_endpoints, loss_weights)\n\n # Initialization for Eager mode execution.\n if self.run_eagerly:\n self._compile_eagerly(metrics, weighted_metrics, sample_weight_mode)\n return\n\n with K.get_graph().as_default():\n # Save all metric attributes per output of the model.\n self._cache_output_metric_attributes(metrics, weighted_metrics)\n\n # Set metric attributes on model.\n self._set_metric_attributes()\n\n # Invoke metric functions (unweighted) for all the outputs.\n self._handle_metrics(\n self.outputs,\n targets=self._targets,\n skip_target_masks=self._prepare_skip_target_masks(),\n masks=self._prepare_output_masks())\n\n # Prepare sample weight modes. List with the same length as model outputs.\n training_utils.prepare_sample_weight_modes(\n self._training_endpoints, sample_weight_mode)\n\n # Creates the model loss and weighted metrics sub-graphs.\n self._compile_weights_loss_and_weighted_metrics()\n\n # Functions for train, test and predict will\n # be compiled lazily when required.\n # This saves time when the user is not using all functions.\n self._function_kwargs = kwargs\n\n self.train_function = None\n self.test_function = None\n self.predict_function = None\n\n # Collected trainable weights, sorted in topological order.\n self._collected_trainable_weights = self.trainable_weights\n\n # Validate all variables were correctly created in distribution scope.\n if self._distribution_strategy and not self._compile_distribution:\n for v in self.variables:\n strategy = self._distribution_strategy\n if not strategy.extended.variable_created_in_scope(v):\n raise ValueError(\n 'Variable (%s) was not created in the distribution strategy '\n 'scope of (%s). It is most likely due to not all layers or '\n 'the model or optimizer being created outside the distribution '\n 'strategy scope. Try to make sure your code looks similar '\n 'to the following.\\n'\n 'with strategy.scope():\\n'\n ' model=_create_model()\\n'\n ' model.compile(...)'% (v, strategy))\n\n @property\n def metrics(self):\n \"\"\"Returns the model's metrics added using `compile`, `add_metric` APIs.\"\"\"\n metrics = []\n if self._is_compiled:\n metrics += self._compile_metric_functions\n return metrics + super(Model, self).metrics\n\n @property\n def metrics_names(self):\n \"\"\"Returns the model's display labels for all outputs.\"\"\"\n metrics_names = []\n if self._is_compiled:\n metrics_names += self._compile_metrics_names # Includes names of losses.\n\n # Add metric names from layers.\n for layer in self.layers:\n metrics_names += [m.name for m in layer._metrics] # pylint: disable=protected-access\n metrics_names += [m.name for m in self._metrics]\n return metrics_names\n\n @property\n def run_eagerly(self):\n \"\"\"Settable attribute indicating whether the model should run eagerly.\n\n Running eagerly means that your model will be run step by step,\n like Python code. Your model might run slower, but it should become easier\n for you to debug it by stepping into individual layer calls.\n\n By default, we will attempt to compile your model to a static graph to\n deliver the best execution performance.\n\n Returns:\n Boolean, whether the model should run eagerly.\n \"\"\"\n if self._run_eagerly is True and not context.executing_eagerly():\n raise ValueError('You can only set `run_eagerly=True` if eager execution '\n 'is enabled.')\n if not self.dynamic:\n if self._run_eagerly is None:\n return False\n else:\n return self._run_eagerly\n else:\n if not context.executing_eagerly():\n raise ValueError('Your model contains layers that can only be '\n 'successfully run in eager execution (layers '\n 'constructed with `dynamic=True`). '\n 'You must enable eager execution with '\n '`tf.enable_eager_execution()`.')\n if self._run_eagerly is False:\n # TODO(fchollet): consider using py_func to enable this.\n raise ValueError('Your model contains layers that can only be '\n 'successfully run in eager execution (layers '\n 'constructed with `dynamic=True`). '\n 'You cannot set `run_eagerly=False`.')\n return context.executing_eagerly()\n\n @run_eagerly.setter\n def run_eagerly(self, value):\n self._run_eagerly = value\n\n def fit(self,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n validation_freq=1,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n **kwargs):\n \"\"\"Trains the model for a fixed number of epochs (iterations on a dataset).\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator. Should return a tuple\n of either `(inputs, targets)` or\n `(inputs, targets, sample_weights)`.\n - A generator or `keras.utils.Sequence` returning `(inputs, targets)`\n or `(inputs, targets, sample weights)`.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset, dataset\n iterator, generator, or `keras.utils.Sequence` instance, `y` should\n not be specified (since targets will be obtained from `x`).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of symbolic tensors, dataset, dataset iterators,\n generators, or `keras.utils.Sequence` instances (since they generate\n batches).\n epochs: Integer. Number of epochs to train the model.\n An epoch is an iteration over the entire `x` and `y`\n data provided.\n Note that in conjunction with `initial_epoch`,\n `epochs` is to be understood as \"final epoch\".\n The model is not trained for a number of iterations\n given by `epochs`, but merely until the epoch\n of index `epochs` is reached.\n verbose: 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n Note that the progress bar is not particularly useful when\n logged to a file, so verbose=2 is recommended when not running\n interactively (eg, in a production environment).\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training.\n See `tf.keras.callbacks`.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset, dataset iterator, generator or\n `keras.utils.Sequence` instance.\n validation_data: Data on which to evaluate\n the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data.\n `validation_data` will override `validation_split`.\n `validation_data` could be:\n - tuple `(x_val, y_val)` of Numpy arrays or tensors\n - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays\n - dataset or a dataset iterator\n For the first two cases, `batch_size` must be provided.\n For the last case, `validation_steps` must be provided.\n shuffle: Boolean (whether to shuffle the training data\n before each epoch) or str (for 'batch').\n 'batch' is a special option for dealing with the\n limitations of HDF5 data; it shuffles in batch-sized chunks.\n Has no effect when `steps_per_epoch` is not `None`.\n class_weight: Optional dictionary mapping class indices (integers)\n to a weight (float) value, used for weighting the loss function\n (during training only).\n This can be useful to tell the model to\n \"pay more attention\" to samples from\n an under-represented class.\n sample_weight: Optional Numpy array of weights for\n the training samples, used for weighting the loss function\n (during training only). You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n supported when `x` is a dataset, dataset iterator, generator, or\n `keras.utils.Sequence` instance, instead provide the sample_weights\n as the third element of `x`.\n initial_epoch: Integer.\n Epoch at which to start training\n (useful for resuming a previous training run).\n steps_per_epoch: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring one epoch finished and starting the\n next epoch. When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined. If x is a\n `tf.data` dataset or a dataset iterator, and 'steps_per_epoch'\n is None, the epoch will run until the input dataset is exhausted.\n validation_steps: Only relevant if `validation_data` is provided and\n is a dataset or dataset iterator. Total number of steps (batches of\n samples) to draw before stopping when performing validation\n at the end of every epoch. If validation_data is a `tf.data` dataset\n or a dataset iterator, and 'validation_steps' is None, validation\n will run until the `validation_data` dataset is exhausted.\n validation_freq: Only relevant if validation data is provided. Integer\n or `collections.Container` instance (e.g. list, tuple, etc.). If an\n integer, specifies how many training epochs to run before a new\n validation run is performed, e.g. `validation_freq=2` runs\n validation every 2 epochs. If a Container, specifies the epochs on\n which to run validation, e.g. `validation_freq=[1, 2, 10]` runs\n validation at the end of the 1st, 2nd, and 10th epochs.\n max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n input only. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Used for generator or `keras.utils.Sequence` input\n only. Maximum number of processes to spin up\n when using process-based threading. If unspecified, `workers`\n will default to 1. If 0, will execute the generator on the main\n thread.\n use_multiprocessing: Boolean. Used for generator or\n `keras.utils.Sequence` input only. If `True`, use process-based\n threading. If unspecified, `use_multiprocessing` will default to\n `False`. Note that because this implementation relies on\n multiprocessing, you should not pass non-picklable arguments to\n the generator as they can't be passed easily to children processes.\n **kwargs: Used for backwards compatibility.\n\n Returns:\n A `History` object. Its `History.history` attribute is\n a record of training loss values and metrics values\n at successive epochs, as well as validation loss values\n and validation metrics values (if applicable).\n\n Raises:\n RuntimeError: If the model was never compiled.\n ValueError: In case of mismatch between the provided input data\n and what the model expects.\n \"\"\"\n _keras_api_gauge.get_cell('train').set(True)\n # Legacy support\n if 'nb_epoch' in kwargs:\n logging.warning(\n 'The `nb_epoch` argument in `fit` '\n 'has been renamed `epochs`.')\n epochs = kwargs.pop('nb_epoch')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n self._assert_compile_was_called()\n\n # Case 1: distribution strategy.\n if self._distribution_strategy:\n if K.in_multi_worker_mode():\n # Multi-Worker mode runs the Keras training loop on multiple\n # servers via the Distribute Coordinator.\n def _worker_fn(_):\n \"\"\"Run training inside the distributed coordinator.\"\"\"\n filtered_callbacks = distributed_training_utils \\\n .filter_distributed_callbacks(callbacks)\n return training_distributed.fit_distributed(\n self,\n x=x,\n y=y,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=filtered_callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n shuffle=shuffle,\n class_weight=class_weight,\n sample_weight=sample_weight,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n validation_freq=validation_freq)\n\n # Independent worker only for now.\n return dc.run_distribute_coordinator(\n _worker_fn,\n self._distribution_strategy,\n mode=dc.CoordinatorMode.INDEPENDENT_WORKER)\n else:\n return training_distributed.fit_distributed(\n self,\n x=x,\n y=y,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n shuffle=shuffle,\n class_weight=class_weight,\n sample_weight=sample_weight,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n validation_freq=validation_freq)\n\n batch_size = self._validate_or_infer_batch_size(\n batch_size, steps_per_epoch, x)\n\n # Case 2: generator-like. Input is Python generator, or Sequence object,\n # or a non-distributed Dataset or iterator in eager execution.\n if data_utils.is_generator_or_sequence(x):\n training_utils.check_generator_arguments(\n y, sample_weight, validation_split=validation_split)\n return self.fit_generator(\n x,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n validation_freq=validation_freq,\n class_weight=class_weight,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle,\n initial_epoch=initial_epoch)\n if training_utils.is_eager_dataset_or_iterator(x):\n # Make sure that y, sample_weights, validation_split are not passed.\n training_utils.validate_dataset_input(x, y, sample_weight,\n validation_split)\n if (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2))\n and shuffle):\n training_utils.verify_dataset_shuffled(x)\n\n return self.fit_generator(\n x,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n validation_freq=validation_freq,\n class_weight=class_weight,\n workers=0,\n shuffle=shuffle,\n initial_epoch=initial_epoch)\n\n # Case 3: Symbolic tensors or Numpy array-like.\n # This includes Datasets and iterators in graph mode (since they\n # generate symbolic tensors).\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps_per_epoch',\n steps=steps_per_epoch,\n validation_split=validation_split,\n shuffle=shuffle)\n\n # Prepare validation data.\n if validation_data:\n val_x, val_y, val_sample_weights = self._unpack_validation_data(\n validation_data)\n val_x, val_y, val_sample_weights = self._standardize_user_data(\n val_x,\n val_y,\n sample_weight=val_sample_weights,\n batch_size=batch_size,\n steps=validation_steps,\n steps_name='validation_steps')\n elif validation_split and 0. < validation_split < 1.:\n if training_utils.has_symbolic_tensors(x):\n raise ValueError('If your data is in the form of symbolic tensors, '\n 'you cannot use `validation_split`.')\n if hasattr(x[0], 'shape'):\n split_at = int(x[0].shape[0] * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))\n y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))\n if sample_weights:\n sample_weights, val_sample_weights = (\n slice_arrays(sample_weights, 0, split_at),\n slice_arrays(sample_weights, split_at),\n )\n else:\n val_sample_weights = None\n else:\n if validation_steps:\n raise ValueError('`validation_steps` should not be specified if '\n '`validation_data` is None.')\n val_x = None\n val_y = None\n val_sample_weights = None\n\n if self.run_eagerly:\n return training_generator.fit_generator(\n self, (x, y, sample_weights),\n steps_per_epoch=steps_per_epoch,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n validation_freq=validation_freq,\n workers=0,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_name='steps_per_epoch')\n else:\n return training_arrays.fit_loop(\n self,\n x,\n y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n val_sample_weights=val_sample_weights,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n validation_freq=validation_freq,\n steps_name='steps_per_epoch')\n\n def evaluate(self,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False):\n \"\"\"Returns the loss value & metrics values for the model in test mode.\n\n Computation is done in batches.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n - A generator or `keras.utils.Sequence` instance.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely).\n If `x` is a dataset, dataset iterator, generator or\n `keras.utils.Sequence` instance, `y` should not be specified (since\n targets will be obtained from the iterator/dataset).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` is your data is in the\n form of symbolic tensors, dataset, dataset iterators,\n generators, or `keras.utils.Sequence` instances (since they generate\n batches).\n verbose: 0 or 1. Verbosity mode.\n 0 = silent, 1 = progress bar.\n sample_weight: Optional Numpy array of weights for\n the test samples, used for weighting the loss function.\n You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n supported when `x` is a dataset or a dataset iterator, instead pass\n sample weights as the third element of `x`.\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring the evaluation round finished.\n Ignored with the default value of `None`.\n If x is a `tf.data` dataset or a dataset iterator, and `steps` is\n None, 'evaluate' will run until the dataset is exhausted.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during evaluation.\n See [callbacks](/api_docs/python/tf/keras/callbacks).\n max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n input only. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Used for generator or `keras.utils.Sequence` input\n only. Maximum number of processes to spin up when using\n process-based threading. If unspecified, `workers` will default\n to 1. If 0, will execute the generator on the main thread.\n use_multiprocessing: Boolean. Used for generator or\n `keras.utils.Sequence` input only. If `True`, use process-based\n threading. If unspecified, `use_multiprocessing` will default to\n `False`. Note that because this implementation relies on\n multiprocessing, you should not pass non-picklable arguments to\n the generator as they can't be passed easily to children processes.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: in case of invalid arguments.\n \"\"\"\n _keras_api_gauge.get_cell('evaluate').set(True)\n self._assert_compile_was_called()\n\n # Case 1: distribution strategy.\n if self._distribution_strategy:\n if K.in_multi_worker_mode():\n # Multi-Worker mode runs the Keras evaluation loop on multiple\n # servers via the Distribute Coordinator.\n def _worker_fn(_):\n \"\"\"Run evaluation inside the distributed coordinator.\"\"\"\n filtered_callbacks = distributed_training_utils \\\n .filter_distributed_callbacks(callbacks)\n return training_distributed.evaluate_distributed(\n self,\n x=x,\n y=y,\n batch_size=batch_size,\n verbose=verbose,\n sample_weight=sample_weight,\n steps=steps,\n callbacks=filtered_callbacks)\n\n # Independent worker only for now.\n return dc.run_distribute_coordinator(\n _worker_fn,\n self._distribution_strategy,\n mode=dc.CoordinatorMode.INDEPENDENT_WORKER)\n else:\n return training_distributed.evaluate_distributed(\n self,\n x=x,\n y=y,\n batch_size=batch_size,\n verbose=verbose,\n sample_weight=sample_weight,\n steps=steps,\n callbacks=callbacks)\n\n batch_size = self._validate_or_infer_batch_size(batch_size, steps, x)\n\n # Case 2: generator-like. Input is Python generator, or Sequence object,\n # or a non-distributed Dataset or iterator in eager execution.\n if data_utils.is_generator_or_sequence(x):\n training_utils.check_generator_arguments(y, sample_weight)\n return self.evaluate_generator(\n x,\n steps=steps,\n verbose=verbose,\n callbacks=callbacks,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing)\n if training_utils.is_eager_dataset_or_iterator(x):\n # Make sure that y, sample_weights are not passed.\n training_utils.validate_dataset_input(x, y, sample_weight)\n return training_generator.evaluate_generator(\n self, x,\n steps=steps,\n batch_size=batch_size,\n verbose=verbose,\n workers=0,\n callbacks=callbacks)\n\n # Case 3: Symbolic tensors or Numpy array-like.\n # This includes Datasets and iterators in graph mode (since they\n # generate symbolic tensors).\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps',\n steps=steps)\n\n if self.run_eagerly:\n return training_generator.evaluate_generator(\n self, (x, y, sample_weights),\n steps=steps,\n batch_size=batch_size,\n verbose=verbose,\n workers=0,\n callbacks=callbacks)\n else:\n return training_arrays.test_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps,\n callbacks=callbacks)\n\n def predict(self,\n x,\n batch_size=None,\n verbose=0,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False):\n \"\"\"Generates output predictions for the input samples.\n\n Computation is done in batches.\n\n Arguments:\n x: Input samples. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset or a dataset iterator.\n - A generator or `keras.utils.Sequence` instance.\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` is your data is in the\n form of symbolic tensors, dataset, dataset iterators,\n generators, or `keras.utils.Sequence` instances (since they generate\n batches).\n verbose: Verbosity mode, 0 or 1.\n steps: Total number of steps (batches of samples)\n before declaring the prediction round finished.\n Ignored with the default value of `None`. If x is a `tf.data`\n dataset or a dataset iterator, and `steps` is None, `predict` will\n run until the input dataset is exhausted.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during prediction.\n See [callbacks](/api_docs/python/tf/keras/callbacks).\n max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n input only. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Used for generator or `keras.utils.Sequence` input\n only. Maximum number of processes to spin up when using\n process-based threading. If unspecified, `workers` will default\n to 1. If 0, will execute the generator on the main thread.\n use_multiprocessing: Boolean. Used for generator or\n `keras.utils.Sequence` input only. If `True`, use process-based\n threading. If unspecified, `use_multiprocessing` will default to\n `False`. Note that because this implementation relies on\n multiprocessing, you should not pass non-picklable arguments to\n the generator as they can't be passed easily to children processes.\n\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between the provided\n input data and the model's expectations,\n or in case a stateful model receives a number of samples\n that is not a multiple of the batch size.\n \"\"\"\n _keras_api_gauge.get_cell('predict').set(True)\n # Case 1: distribution strategy.\n if self._distribution_strategy:\n return training_distributed.predict_distributed(self,\n x=x,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps,\n callbacks=callbacks)\n\n batch_size = self._validate_or_infer_batch_size(batch_size, steps, x)\n\n # Case 2: generator-like. Input is Python generator, or Sequence object,\n # or a non-distributed Dataset or iterator in eager execution.\n if data_utils.is_generator_or_sequence(x):\n return self.predict_generator(\n x,\n steps=steps,\n verbose=verbose,\n callbacks=callbacks,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing)\n if training_utils.is_eager_dataset_or_iterator(x):\n return training_generator.predict_generator(\n self,\n x,\n steps=steps,\n batch_size=batch_size,\n verbose=verbose,\n workers=0,\n callbacks=callbacks)\n\n # Case 3: Symbolic tensors or Numpy array-like.\n # This includes Datasets and iterators in graph mode (since they\n # generate symbolic tensors).\n x, _, _ = self._standardize_user_data(\n x, check_steps=True, steps_name='steps', steps=steps)\n\n if self.run_eagerly:\n return training_generator.predict_generator(\n self,\n x,\n steps=steps,\n batch_size=batch_size,\n verbose=verbose,\n workers=0,\n callbacks=callbacks)\n else:\n return training_arrays.predict_loop(\n self,\n x,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps,\n callbacks=callbacks)\n\n def reset_metrics(self):\n \"\"\"Resets the state of metrics.\"\"\"\n if hasattr(self, 'metrics'):\n for m in self.metrics:\n m.reset_states()\n\n # Reset the state of loss metric wrappers.\n if getattr(self, '_output_loss_metrics', None) is not None:\n for m in self._output_loss_metrics:\n m.reset_states()\n\n # Reset metrics on all the distributed (cloned) models.\n if self._distribution_strategy:\n distributed_training_utils._reset_metrics(self) # pylint: disable=protected-access\n\n def train_on_batch(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n reset_metrics=True):\n \"\"\"Runs a single gradient update on a single batch of data.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`, it could be either Numpy\n array(s) or TensorFlow tensor(s). It should be consistent with `x`\n (you cannot have Numpy inputs and tensor targets, or inversely). If\n `x` is a dataset or a dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample. In the case of\n temporal data, you can pass a 2D array with shape (samples,\n sequence_length), to apply a different weight to every timestep of\n every sample. In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile(). This argument is not\n supported when `x` is a dataset or a dataset iterator.\n class_weight: Optional dictionary mapping class indices (integers) to a\n weight (float) to apply to the model's loss for the samples from this\n class during training. This can be useful to tell the model to \"pay\n more attention\" to samples from an under-represented class.\n reset_metrics: If `True`, the metrics returned will be only for this\n batch. If `False`, the metrics will be statefully accumulated across\n batches.\n\n Returns:\n Scalar training loss\n (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n self._assert_compile_was_called()\n # If at this point we are in the replica context, then it is okay to execute\n # the Eager code path. The expected way to get here is to call `fit` that\n # calls `train_on_batch` on each replica.\n if (self._distribution_strategy and\n distribution_strategy_context.in_cross_replica_context()):\n raise NotImplementedError('`train_on_batch` is not supported for models '\n 'distributed with tf.distribute.Strategy.')\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y, sample_weight=sample_weight, class_weight=class_weight,\n extract_tensors_from_dataset=True)\n\n # If `self._distribution_strategy` is True, then we are in a replica context\n # at this point because of the check above. `train_on_batch` is being run\n # for each replica by `self._distribution_strategy` and the same code path\n # as Eager is expected to be taken.\n if self.run_eagerly or self._distribution_strategy:\n outputs = training_eager.train_on_batch(\n self,\n x,\n y,\n sample_weights=sample_weights,\n output_loss_metrics=self._output_loss_metrics)\n else:\n x = training_utils.ModelInputs(x).as_list()\n ins = x + (y or []) + (sample_weights or [])\n\n if not isinstance(K.symbolic_learning_phase(), int):\n ins += [True] # Add learning phase value.\n\n self._update_sample_weight_modes(sample_weights=sample_weights)\n self._make_train_function()\n outputs = self.train_function(ins) # pylint: disable=not-callable\n\n if reset_metrics:\n self.reset_metrics()\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True):\n \"\"\"Test the model on a single batch of samples.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile(). This argument is not\n supported when `x` is a dataset or a dataset iterator.\n reset_metrics: If `True`, the metrics returned will be only for this\n batch. If `False`, the metrics will be statefully accumulated across\n batches.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n self._assert_compile_was_called()\n if (self._distribution_strategy and\n distribution_strategy_context.in_cross_replica_context()):\n raise NotImplementedError('`test_on_batch` is not supported for models '\n 'distributed with tf.distribute.Strategy.')\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True)\n\n # If `self._distribution_strategy` is True, then we are in a replica context\n # at this point.\n if self.run_eagerly or self._distribution_strategy:\n outputs = training_eager.test_on_batch(\n self,\n x,\n y,\n sample_weights=sample_weights,\n output_loss_metrics=self._output_loss_metrics)\n else:\n x = training_utils.ModelInputs(x).as_list()\n inputs = x + (y or []) + (sample_weights or [])\n\n self._update_sample_weight_modes(sample_weights=sample_weights)\n self._make_test_function()\n outputs = self.test_function(inputs) # pylint: disable=not-callable\n\n if reset_metrics:\n self.reset_metrics()\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def predict_on_batch(self, x):\n \"\"\"Returns predictions for a single batch of samples.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset or a dataset iterator.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between given number of inputs and\n expectations of the model.\n \"\"\"\n if (self._distribution_strategy and\n distribution_strategy_context.in_cross_replica_context()):\n raise NotImplementedError(\n '`predict_on_batch` is not supported for models distributed with'\n ' tf.distribute.Strategy.')\n # Validate and standardize user data.\n inputs, _, _ = self._standardize_user_data(\n x, extract_tensors_from_dataset=True)\n # If `self._distribution_strategy` is True, then we are in a replica context\n # at this point.\n if self.run_eagerly or self._distribution_strategy:\n inputs = training_utils.cast_if_floating_dtype(inputs)\n if isinstance(inputs, collections.Sequence):\n # Unwrap lists with only one input, as we do when training on batch\n if len(inputs) == 1:\n inputs = inputs[0]\n\n return self(inputs) # pylint: disable=not-callable\n\n self._make_predict_function()\n outputs = self.predict_function(inputs)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def fit_generator(self,\n generator,\n steps_per_epoch=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_data=None,\n validation_steps=None,\n validation_freq=1,\n class_weight=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0):\n \"\"\"Fits the model on data yielded batch-by-batch by a Python generator.\n\n The generator is run in parallel to the model, for efficiency.\n For instance, this allows you to do real-time data augmentation\n on images on CPU in parallel to training your model on GPU.\n\n The use of `keras.utils.Sequence` guarantees the ordering\n and guarantees the single use of every input per epoch when\n using `use_multiprocessing=True`.\n\n Arguments:\n generator: A generator or an instance of `Sequence`\n (`keras.utils.Sequence`)\n object in order to avoid duplicate data\n when using multiprocessing.\n The output of the generator must be either\n - a tuple `(inputs, targets)`\n - a tuple `(inputs, targets, sample_weights)`.\n This tuple (a single output of the generator) makes a single batch.\n Therefore, all arrays in this tuple must have the same length (equal\n to the size of this batch). Different batches may have different\n sizes.\n For example, the last batch of the epoch is commonly smaller than\n the\n others, if the size of the dataset is not divisible by the batch\n size.\n The generator is expected to loop over its data\n indefinitely. An epoch finishes when `steps_per_epoch`\n batches have been seen by the model.\n steps_per_epoch: Total number of steps (batches of samples)\n to yield from `generator` before declaring one epoch\n finished and starting the next epoch. It should typically\n be equal to the number of samples of your dataset\n divided by the batch size.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n epochs: Integer, total number of iterations on the data.\n verbose: Verbosity mode, 0, 1, or 2.\n callbacks: List of callbacks to be called during training.\n validation_data: This can be either\n - a generator for the validation data\n - a tuple (inputs, targets)\n - a tuple (inputs, targets, sample_weights).\n validation_steps: Only relevant if `validation_data`\n is a generator. Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(validation_data)` as a number of steps.\n validation_freq: Only relevant if validation data is provided. Integer\n or `collections.Container` instance (e.g. list, tuple, etc.). If an\n integer, specifies how many training epochs to run before a new\n validation run is performed, e.g. `validation_freq=2` runs\n validation every 2 epochs. If a Container, specifies the epochs on\n which to run validation, e.g. `validation_freq=[1, 2, 10]` runs\n validation at the end of the 1st, 2nd, and 10th epochs.\n class_weight: Dictionary mapping class indices to a weight\n for the class.\n max_queue_size: Integer. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n shuffle: Boolean. Whether to shuffle the order of the batches at\n the beginning of each epoch. Only used with instances\n of `Sequence` (`keras.utils.Sequence`).\n Has no effect when `steps_per_epoch` is not `None`.\n initial_epoch: Epoch at which to start training\n (useful for resuming a previous training run)\n\n Returns:\n A `History` object.\n\n Example:\n\n ```python\n def generate_arrays_from_file(path):\n while 1:\n f = open(path)\n for line in f:\n # create numpy arrays of input data\n # and labels, from each line in the file\n x1, x2, y = process_line(line)\n yield ({'input_1': x1, 'input_2': x2}, {'output': y})\n f.close()\n\n model.fit_generator(generate_arrays_from_file('/my_file.txt'),\n steps_per_epoch=10000, epochs=10)\n ```\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`fit_generator` is not supported for '\n 'models compiled with tf.distribute.Strategy.')\n _keras_api_gauge.get_cell('train').set(True)\n return training_generator.fit_generator(\n self,\n generator,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n validation_freq=validation_freq,\n class_weight=class_weight,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_name='steps_per_epoch')\n\n def evaluate_generator(self,\n generator,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Evaluates the model on a data generator.\n\n The generator should return the same kind of data\n as accepted by `test_on_batch`.\n\n Arguments:\n generator: Generator yielding tuples (inputs, targets)\n or (inputs, targets, sample_weights)\n or an instance of `keras.utils.Sequence`\n object in order to avoid duplicate data\n when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during evaluation.\n See [callbacks](/api_docs/python/tf/keras/callbacks).\n max_queue_size: maximum size for the generator queue\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n verbose: Verbosity mode, 0 or 1.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: in case of invalid arguments.\n\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`evaluate_generator` is not supported for '\n 'models compiled with tf.distribute.Strategy.')\n _keras_api_gauge.get_cell('evaluate').set(True)\n return training_generator.evaluate_generator(\n self,\n generator,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose,\n callbacks=callbacks)\n\n def predict_generator(self,\n generator,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Generates predictions for the input samples from a data generator.\n\n The generator should return the same kind of data as accepted by\n `predict_on_batch`.\n\n Arguments:\n generator: Generator yielding batches of input samples\n or an instance of `keras.utils.Sequence` object in order to\n avoid duplicate data when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during prediction.\n See [callbacks](/api_docs/python/tf/keras/callbacks).\n max_queue_size: Maximum size for the generator queue.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`predict_generator` is not supported for '\n 'models compiled with tf.distribute.Strategy.')\n _keras_api_gauge.get_cell('predict').set(True)\n return training_generator.predict_generator(\n self,\n generator,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose,\n callbacks=callbacks)\n\n def _validate_compile_param_for_distribution_strategy(\n self, run_eagerly, sample_weight_mode, target_tensors, weighted_metrics):\n # Validate that arguments passed by the user to `compile` are supported by\n # tf.distribute.Strategy.\n if self._distribution_strategy:\n if sample_weight_mode:\n raise NotImplementedError('sample_weight_mode is not supported with '\n 'tf.distribute.Strategy.')\n if weighted_metrics:\n raise NotImplementedError('weighted_metrics is not supported with '\n 'tf.distribute.Strategy.')\n if target_tensors:\n raise ValueError('target_tensors is not supported with '\n 'tf.distribute.Strategy.')\n\n if run_eagerly:\n raise ValueError(\n 'We currently do not support enabling `run_eagerly` with '\n 'distribution strategy.')\n\n if (distributed_training_utils.is_distributing_by_cloning(self) and\n (not self.built or not self.inputs or not self.outputs)):\n raise ValueError(\n 'We currently do not support distribution strategy with a '\n '`Sequential` model that is created without `input_shape`/'\n '`input_dim` set in its first layer or a subclassed model.')\n\n def _process_target_tensor_for_compile(self, target_tensors):\n if self.run_eagerly:\n # target tensor is not supported with run_eagerly. Create a list with None\n # as placeholder for each output.\n return [None for _ in self.output_names]\n\n if target_tensors not in (None, []):\n if isinstance(target_tensors, list):\n if len(target_tensors) != len(self.outputs):\n raise ValueError(\n 'When passing a list as `target_tensors`, '\n 'it should have one entry per model output. '\n 'The model has %s outputs, but you passed target_tensors=%s' %\n (len(self.outputs), target_tensors))\n elif isinstance(target_tensors, dict):\n unexpected_target_tensor_names = set(target_tensors.keys()).difference(\n self.output_names)\n if unexpected_target_tensor_names:\n raise ValueError(\n 'Unknown entry in `target_tensors` dictionary: \"{name}\". '\n 'Only expected the following keys: {keys}'.format(\n name=unexpected_target_tensor_names,\n keys=str(self.output_names)))\n tmp_target_tensors = []\n for name in self.output_names:\n tmp_target_tensors.append(target_tensors.get(name, None))\n target_tensors = tmp_target_tensors\n elif tensor_util.is_tensor(target_tensors):\n target_tensors = [target_tensors]\n else:\n raise TypeError('Expected `target_tensors` to be a list or tuple or '\n 'dict or a single tensor, but got:', target_tensors)\n else:\n # In case target tensor is empty or None, create a list with Nones\n # that has same length as self.output_names. With that, the None check of\n # target tensor can be skipped downstream.\n target_tensors = [None for _ in self.output_names]\n return target_tensors\n\n def _compile_eagerly(self, metrics, weighted_metrics, sample_weight_mode):\n if isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n # TODO(reedwm): Support this.\n raise ValueError('We currently do not support enabling `run_eagerly` '\n 'with a LossScaleOptimizer.')\n\n # Prepare sample weight modes. List with the same length as model outputs.\n training_utils.prepare_sample_weight_modes(\n self._training_endpoints, sample_weight_mode)\n # Prepare sample weights.\n self._prepare_sample_weights()\n # Save all metric attributes per output of the model.\n self._cache_output_metric_attributes(metrics, weighted_metrics)\n self.total_loss = None\n # Set metric attributes on model.\n self._set_metric_attributes()\n\n self._collected_trainable_weights = self.trainable_weights\n\n def _update_sample_weight_modes(self, sample_weights=None):\n \"\"\"Updates sample weight modes based on training/eval inputs.\n\n If model contains `_sample_weight_modes` we check if the input\n `sample_weights` corresponds to the sample weight modes.\n 1. If sample weight mode for output i is 'temporal', we do not\n change it as the `temporal` mode has been set by the user.\n 2. Set sample weight mode to be 'samplewise' for output i if sample\n weight mode was not set before and sample weight inputs are given.\n 3. Reset sample weight mode to None for output i if sample weight mode\n was set to 'samplewise' but there is no sample weight input.\n\n Args:\n sample_weights: List of sample weights of the same length as model outputs\n or None.\n \"\"\"\n if not self._is_compiled:\n return\n if not sample_weights:\n sample_weights = [None] * len(self._training_endpoints)\n for endpoint, sample_weight in zip(self._training_endpoints,\n sample_weights):\n if endpoint.sample_weight_mode == 'temporal':\n # If sample weight mode for endpoint is 'temporal', do nothing.\n continue\n if endpoint.sample_weight_mode is None and sample_weight is not None:\n # Set sample weight mode to be 'samplewise' for output i if sample\n # weight mode was not set before and sample weight inputs are given.\n endpoint.sample_weight_mode = 'samplewise'\n elif (endpoint.sample_weight_mode == 'samplewise' and\n sample_weight is None):\n # Reset sample weight mode to None for output i if sample weight mode\n # was set to 'samplewise' but there is no sample weight input.\n endpoint.sample_weight_mode = None\n\n def _recompile_weights_loss_and_weighted_metrics(self):\n if not self._is_compiled:\n return False\n recompile = any([e.sample_weights_mismatch()\n for e in self._training_endpoints])\n\n if recompile:\n self._compile_weights_loss_and_weighted_metrics()\n return recompile\n\n @trackable.no_automatic_dependency_tracking\n def _compile_weights_loss_and_weighted_metrics(self):\n \"\"\"Compiles the model loss and weighted metric sub-graphs.\"\"\"\n\n with K.get_graph().as_default():\n self._prepare_sample_weights()\n\n masks = self._prepare_output_masks()\n\n # Compute weighted metrics.\n self._handle_metrics(\n self.outputs,\n targets=self._targets,\n skip_target_masks=self._prepare_skip_target_masks(),\n sample_weights=self.sample_weights,\n masks=masks,\n return_weighted_metrics=True)\n\n # Compute total loss.\n # Used to keep track of the total loss value (stateless).\n # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +\n # loss_weight_2 * output_2_loss_fn(...) +\n # layer losses.\n self.total_loss = self._prepare_total_loss(masks)\n\n def _prepare_skip_target_masks(self):\n \"\"\"Boolean mask for whether the target in the output list should be skipped.\n\n If the loss function corresponding to a model output is None, then this\n output will be skipped during total loss calculation and feed targets\n preparation.\n\n Returns:\n A boolean list for whether the corresponding target in the output list\n should be skipped during loss calculation.\n \"\"\"\n return [l is None for l in self.loss_functions]\n\n def _prepare_output_masks(self):\n \"\"\"Returns masks corresponding to model outputs.\"\"\"\n return [getattr(x, '_keras_mask', None) for x in self.outputs]\n\n def _prepare_total_loss(self, masks):\n \"\"\"Computes total loss from loss functions.\n\n Arguments:\n masks: List of mask values corresponding to each model output.\n\n Returns:\n A list of loss weights of python floats.\n\n Raises:\n TypeError: If model run_eagerly is True.\n \"\"\"\n if self.run_eagerly:\n raise TypeError('total loss can not be computed when compiled with '\n 'run_eagerly = True.')\n total_loss = None\n with K.name_scope('loss'):\n for endpoint, mask in zip(self._training_endpoints, masks):\n if endpoint.should_skip_target():\n continue\n y_true = endpoint.training_target.target\n y_pred = endpoint.output\n loss_fn = endpoint.loss_fn\n loss_weight = endpoint.loss_weight\n loss_name = endpoint.loss_name()\n sample_weight = endpoint.sample_weight\n\n with K.name_scope(loss_name):\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n # Update weights with mask.\n if sample_weight is None:\n sample_weight = mask\n else:\n # Update dimensions of weights to match with mask if possible.\n mask, _, sample_weight = (\n losses_utils.squeeze_or_expand_dimensions(\n mask, None, sample_weight))\n sample_weight *= mask\n\n if hasattr(loss_fn, 'reduction'):\n per_sample_losses = loss_fn.call(y_true, y_pred)\n weighted_losses = losses_utils.compute_weighted_loss(\n per_sample_losses,\n sample_weight=sample_weight,\n reduction=losses_utils.ReductionV2.NONE)\n loss_reduction = loss_fn.reduction\n\n # `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all\n # compile use cases.\n if loss_reduction == losses_utils.ReductionV2.AUTO:\n loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n\n # Compute the stateless loss value.\n output_loss = losses_utils.reduce_weighted_loss(\n weighted_losses, reduction=loss_reduction)\n else:\n # Compute the stateless loss value for a custom loss class.\n # Here we assume that the class takes care of loss reduction\n # because if this class returns a vector value we cannot\n # differentiate between use case where a custom optimizer\n # expects a vector loss value vs unreduced per-sample loss value.\n output_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)\n\n if len(self.outputs) > 1:\n # Keep track of stateful result tensor for the loss.\n aggregated_output_loss = endpoint.output_loss_metric(output_loss)\n self._compile_metrics_tensors[loss_name] = aggregated_output_loss\n\n # Scale output loss for distribution. For custom losses we assume\n # reduction was mean.\n if (getattr(loss_fn, 'reduction',\n losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE) ==\n losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE):\n output_loss = losses_utils.scale_loss_for_distribution(output_loss)\n\n if total_loss is None:\n total_loss = loss_weight * output_loss\n else:\n total_loss += loss_weight * output_loss\n if total_loss is None:\n if not self.losses:\n raise ValueError('The model cannot be compiled '\n 'because it has no loss to optimize.')\n else:\n total_loss = 0.\n\n # Add regularization penalties and other layer-specific losses.\n custom_losses = self.get_losses_for(None) + self.get_losses_for(\n self.inputs)\n if custom_losses:\n total_loss += losses_utils.scale_loss_for_distribution(\n math_ops.add_n(custom_losses))\n return total_loss\n\n def _get_callback_model(self):\n \"\"\"Returns the Callback Model for this Model.\"\"\"\n\n if hasattr(self, '_replicated_model') and self._replicated_model:\n # When using training_distributed, we set the callback model\n # to an instance of the `DistributedModel` that we create in\n # the `compile` call. The `DistributedModel` is initialized\n # with the first replicated model. We need to set the callback\n # model to a DistributedModel to allow us to override saving\n # and loading weights when we checkpoint the model during training.\n return self._replicated_model\n if hasattr(self, 'callback_model') and self.callback_model:\n return self.callback_model\n return self\n\n def _make_callback_model(self, grouped_model):\n first_replicated_model = self._distribution_strategy.unwrap(\n grouped_model)[0]\n # We initialize the callback model with the first replicated model.\n self._replicated_model = DistributedCallbackModel(first_replicated_model)\n self._replicated_model.set_original_model(self)\n\n def _validate_or_infer_batch_size(self, batch_size, steps, x):\n \"\"\"Validates that the `batch_size` provided is consistent with InputLayer.\n\n It's possible that the user specified a static batch size in their\n InputLayer. If so, this method checks the provided `batch_size` and `x`\n arguments are consistent with this static batch size. Also, if\n `batch_size` is `None`, this method will attempt to infer the batch size\n from the static batch size of the InputLayer. Lastly, ValueError will be\n raised if `x` is a tf.data.Dataset and `batch_size` is specified as we\n expect users to provide batched datasets.\n\n Arguments:\n batch_size: The batch_size provided as an argument to\n fit/evaluate/predict.\n steps: The steps provided as an argument to fit/evaluate/predict.\n x: The data passed as `x` to fit/evaluate/predict.\n\n Returns:\n The validated batch_size, auto-inferred from the first layer if not\n provided.\n \"\"\"\n if batch_size is not None and isinstance(x, dataset_ops.DatasetV2):\n raise ValueError('The `batch_size` argument must not be specified when'\n ' using dataset as an input.')\n\n layers = super(Model, self).layers # Avoids the override in Sequential.\n if layers:\n first_layer = layers[0]\n static_batch_size = training_utils.get_static_batch_size(first_layer)\n if static_batch_size is not None:\n split_batch_size = self._distribution_strategy and \\\n distributed_training_utils.global_batch_size_supported(\n self._distribution_strategy)\n if split_batch_size:\n num_replicas = self._distribution_strategy.num_replicas_in_sync\n\n # Check `batch_size` argument is consistent with InputLayer.\n if batch_size is not None:\n if split_batch_size:\n if batch_size % num_replicas != 0:\n raise ValueError('The `batch_size` argument value {} cannot be '\n 'divisible by number of replicas {}'.format(\n batch_size, num_replicas))\n per_replica_batch_size = batch_size // num_replicas\n else:\n per_replica_batch_size = batch_size\n\n if per_replica_batch_size != static_batch_size:\n raise ValueError('The `batch_size` argument value {} is '\n 'incompatible with the specified batch size of '\n 'your Input Layer: {}'.format(\n per_replica_batch_size, static_batch_size))\n\n # Check Dataset/Iterator batch size is consistent with InputLayer.\n if isinstance(x, (dataset_ops.DatasetV2, iterator_ops.Iterator,\n iterator_ops.IteratorV2)):\n ds_batch_size = tensor_shape.as_dimension(\n nest.flatten(dataset_ops.get_legacy_output_shapes(x))[0][0]).value\n if ds_batch_size is not None:\n if split_batch_size:\n if ds_batch_size % num_replicas != 0:\n raise ValueError(\n 'The batch output shape of your `Dataset` {} '\n 'cannot be divisible by number of replicas {}'.format(\n ds_batch_size, num_replicas))\n ds_batch_size = ds_batch_size // num_replicas\n\n if ds_batch_size != static_batch_size:\n raise ValueError('The batch output shape of your `Dataset` is '\n '{}, which is incompatible with the specified '\n 'batch size of your Input Layer: {}'.format(\n ds_batch_size, static_batch_size))\n\n # Set inferred batch size from the InputLayer.\n if steps is None:\n batch_size = static_batch_size\n\n if batch_size is None and steps is None:\n # Backwards compatibility\n batch_size = 32\n return batch_size\n\n def _list_functions_for_serialization(self):\n \"\"\"If available, saves a trace of call using self.inputs.\"\"\"\n all_functions = super(Model, self)._list_functions_for_serialization()\n try:\n # pylint:disable=pointless-statement\n self.inputs\n self.input_names\n # pylint:enable=pointless-statement\n except AttributeError:\n # If the model does not have inputs set, because it was not called or its\n # input shapes were not recorded, we won't have a signature so can't trace\n # a function. But the user may still save an object with this Model\n # attached; we won't fail the whole tf.saved_model.save.\n pass\n else:\n if '_default_save_signature' not in all_functions:\n all_functions['_default_save_signature'] = (\n saving_utils.trace_model_call(self))\n return all_functions\n\n def _prepare_sample_weights(self):\n \"\"\"Sets sample weight attribute on the model.\"\"\"\n # List with the same length as model outputs.\n for endpoint in self._training_endpoints:\n endpoint.populate_sample_weight()\n\n def _cache_output_metric_attributes(self, metrics, weighted_metrics):\n \"\"\"Caches metric name and function attributes for every model output.\"\"\"\n output_shapes = []\n for output in self.outputs:\n if output is None or output.shape.rank is None:\n output_shapes.append(None)\n else:\n output_shapes.append(output.shape.as_list())\n self._per_output_metrics = training_utils.collect_per_output_metric_info(\n metrics, self.output_names, output_shapes, self.loss_functions)\n self._per_output_weighted_metrics = (\n training_utils.collect_per_output_metric_info(\n weighted_metrics,\n self.output_names,\n output_shapes,\n self.loss_functions,\n is_weighted=True))\n\n def _add_unique_metric_name(self, metric_name, output_index):\n \"\"\"Makes the metric name unique and adds it to the model's metric name list.\n\n If there are multiple outputs for which the metrics are calculated, the\n metric names have to be made unique by appending an integer.\n\n Arguments:\n metric_name: Metric name that corresponds to the metric specified by the\n user. For example: 'acc'.\n output_index: The index of the model output for which the metric name is\n being added.\n\n Returns:\n string, name of the model's unique metric name\n \"\"\"\n if len(self.output_names) > 1:\n metric_name = '%s_%s' % (self.output_names[output_index], metric_name)\n j = 1\n base_metric_name = metric_name\n while metric_name in self._compile_metrics_names:\n metric_name = '%s_%d' % (base_metric_name, j)\n j += 1\n\n return metric_name\n\n @property\n def _all_metrics_tensors(self):\n \"\"\"Returns a dictionary that maps metric names to metric result tensors.\n\n This maps metric names from `model.metric_names` to result tensors.\n Just like model.metric_names, this includes loss names and tensors.\n \"\"\"\n metrics_tensors = {}\n if self._is_compiled:\n metrics_tensors.update(self._compile_metrics_tensors)\n metrics_tensors.update(super(Model, self)._all_metrics_tensors)\n return metrics_tensors\n\n def _init_metric_attributes(self):\n \"\"\"Initialized model metric attributes.\"\"\"\n # List of all metric names in the model. This includes loss metrics.\n self._compile_metrics_names = ['loss']\n # List of stateful metric functions. Used for resetting metric state during\n # training/eval. This includes loss metric functions.\n self._compile_metric_functions = []\n # Dict of all aggregated metric result tensors. This includes aggregated\n # loss result tensors.\n self._compile_metrics_tensors = {}\n\n def _set_per_output_metric_attributes(self, metrics_dict, output_index):\n \"\"\"Sets the metric attributes on the model for the given output.\n\n Arguments:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n output_index: The index of the model output for which the metric\n attributes are added.\n\n Returns:\n Metrics dict updated with unique metric names as keys.\n \"\"\"\n updated_metrics_dict = collections.OrderedDict()\n for metric_name, metric_fn in metrics_dict.items():\n metric_name = self._add_unique_metric_name(metric_name, output_index)\n\n # Update the name on the metric class to be the unique generated name.\n metric_fn._name = metric_name # pylint: disable=protected-access\n updated_metrics_dict[metric_name] = metric_fn\n # Keep track of metric name and function.\n self._compile_metrics_names.append(metric_name)\n self._compile_metric_functions.append(metric_fn)\n return updated_metrics_dict\n\n def _set_metric_attributes(self):\n \"\"\"Sets the metric attributes on the model for all the model outputs.\"\"\"\n # Add loss metric names to the model metric names list.\n if len(self._training_endpoints) > 1:\n metric_names = [\n e.loss_name() for e in self._training_endpoints\n if not e.should_skip_target()\n ]\n self._compile_metrics_names.extend(metric_names)\n\n updated_per_output_metrics = []\n updated_per_output_weighted_metrics = []\n for i, endpoint in enumerate(self._training_endpoints):\n if endpoint.should_skip_target():\n updated_per_output_metrics.append(self._per_output_metrics[i])\n updated_per_output_weighted_metrics.append(\n self._per_output_weighted_metrics[i])\n continue\n updated_per_output_metrics.append(\n self._set_per_output_metric_attributes(self._per_output_metrics[i],\n i))\n updated_per_output_weighted_metrics.append(\n self._set_per_output_metric_attributes(\n self._per_output_weighted_metrics[i], i))\n\n # Create a metric wrapper for each output loss. This computes mean of an\n # output loss across mini-batches (irrespective of how we reduce within a\n # batch).\n if len(self._training_endpoints) > 1:\n for endpoint in self._training_endpoints:\n endpoint.output_loss_metric = metrics_module.Mean()\n\n self._per_output_metrics = updated_per_output_metrics\n self._per_output_weighted_metrics = updated_per_output_weighted_metrics\n\n def _handle_per_output_metrics(self,\n metrics_dict,\n y_true,\n y_pred,\n mask,\n weights=None):\n \"\"\"Calls metric functions for a single output.\n\n Arguments:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n y_true: Target output.\n y_pred: Predicted output.\n mask: Computed mask value for the current output.\n weights: Weights to be applied on the current output.\n\n Returns:\n A list of metric result tensors.\n \"\"\"\n metric_results = []\n for metric_name, metric_fn in metrics_dict.items():\n with K.name_scope(metric_name):\n metric_result = training_utils.call_metric_function(\n metric_fn, y_true, y_pred, weights=weights, mask=mask)\n metric_results.append(metric_result)\n if not self.run_eagerly:\n self._compile_metrics_tensors[metric_name] = metric_result\n\n return metric_results\n\n def _handle_metrics(self,\n outputs,\n targets=None,\n skip_target_masks=None,\n sample_weights=None,\n masks=None,\n return_weighted_metrics=False,\n return_weighted_and_unweighted_metrics=False):\n \"\"\"Handles calling metric functions.\n\n Arguments:\n outputs: List of outputs (predictions).\n targets: List of targets.\n skip_target_masks: Optional. List of boolean for whether the corresponding\n target should be ignored or not.\n sample_weights: Optional list of sample weight arrays.\n masks: List of computed output mask values.\n return_weighted_metrics: Flag that indicates whether weighted metrics\n should be computed instead of unweighted metrics. This flag is ignored\n when `return_weighted_and_unweighted_metrics` is enabled.\n return_weighted_and_unweighted_metrics: Flag that is used to indicate\n whether both weighted and unweighted metrics should be computed. When\n this is not enabled, we use `return_weighted_metrics` param to indicate\n whether weighted or unweighted metrics should be returned.\n\n Returns:\n A list of metric result tensors.\n \"\"\"\n # TODO(scottzhu): Update this to use the new training_endpoints. Currently\n # the eager and graph logic is bit different.\n skip_target_masks = skip_target_masks or [False] * len(outputs)\n metric_results = []\n with K.name_scope('metrics'):\n # Invoke all metrics added using `compile`.\n for i in range(len(outputs)):\n if skip_target_masks[i]:\n continue\n output = outputs[i] if outputs else None\n target = targets[i] if targets else None\n output_mask = masks[i] if masks else None\n\n if (return_weighted_and_unweighted_metrics or\n not return_weighted_metrics):\n metric_results.extend(\n self._handle_per_output_metrics(self._per_output_metrics[i],\n target, output, output_mask))\n if return_weighted_and_unweighted_metrics or return_weighted_metrics:\n metric_results.extend(\n self._handle_per_output_metrics(\n self._per_output_weighted_metrics[i],\n target,\n output,\n output_mask,\n weights=sample_weights[i] if sample_weights else None))\n return metric_results\n\n def _check_trainable_weights_consistency(self):\n \"\"\"Check trainable weights count consistency.\n\n This will raise a warning if `trainable_weights` and\n `_collected_trainable_weights` are inconsistent (i.e. have different\n number of parameters).\n Inconsistency will typically arise when one modifies `model.trainable`\n without calling `model.compile` again.\n \"\"\"\n if not hasattr(self, '_collected_trainable_weights'):\n return\n\n if len(self.trainable_weights) != len(self._collected_trainable_weights):\n logging.log_first_n(\n logging.WARN, 'Discrepancy between trainable weights and collected'\n ' trainable weights, did you set `model.trainable`'\n ' without calling `model.compile` after ?', 1)\n\n def _make_train_function(self):\n has_recompiled = self._recompile_weights_loss_and_weighted_metrics()\n metrics_tensors = [\n self._all_metrics_tensors[m] for m in self.metrics_names[1:]\n ]\n self._check_trainable_weights_consistency()\n # If we have re-compiled the loss/weighted metric sub-graphs then create\n # train function even if one exists already. This is because\n # `_feed_sample_weights` list has been updated on re-copmpile.\n if getattr(self, 'train_function') is None or has_recompiled:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if not isinstance(K.symbolic_learning_phase(), int):\n inputs += [K.symbolic_learning_phase()]\n\n with K.get_graph().as_default():\n with K.name_scope('training'):\n # Training updates\n updates = self.optimizer.get_updates(\n params=self._collected_trainable_weights, loss=self.total_loss)\n # Unconditional updates\n updates += self.get_updates_for(None)\n # Conditional updates relevant to this model\n updates += self.get_updates_for(self.inputs)\n\n with K.name_scope('training'):\n # Gets loss and metrics. Updates weights at each call.\n fn = K.function(\n inputs, [self.total_loss] + metrics_tensors,\n updates=updates,\n name='train_function',\n **self._function_kwargs)\n setattr(self, 'train_function', fn)\n\n def _make_test_function(self):\n has_recompiled = self._recompile_weights_loss_and_weighted_metrics()\n metrics_tensors = [\n self._all_metrics_tensors[m] for m in self.metrics_names[1:]\n ]\n # If we have re-compiled the loss/weighted metric sub-graphs then create\n # test function even if one exists already. This is because\n # `_feed_sample_weights` list has been updated on re-copmpile.\n if getattr(self, 'test_function') is None or has_recompiled:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n\n with K.name_scope('evaluation'):\n updates = self.state_updates\n # Return loss and metrics, no gradient updates.\n # Does update the network states.\n fn = K.function(\n inputs, [self.total_loss] + metrics_tensors,\n updates=updates,\n name='test_function',\n **self._function_kwargs)\n setattr(self, 'test_function', fn)\n\n def _make_predict_function(self):\n if not hasattr(self, 'predict_function'):\n self.predict_function = None\n if self.predict_function is None:\n inputs = self._feed_inputs\n # Gets network outputs. Does not update weights.\n # Does update the network states.\n kwargs = getattr(self, '_function_kwargs', {})\n with K.name_scope(ModeKeys.PREDICT):\n self.predict_function = K.function(\n inputs,\n self.outputs,\n updates=self.state_updates,\n name='predict_function',\n **kwargs)\n\n def _make_execution_function(self, mode):\n if mode == ModeKeys.TRAIN:\n self._make_train_function()\n return self.train_function\n if mode == ModeKeys.TEST:\n self._make_test_function()\n return self.test_function\n if mode == ModeKeys.PREDICT:\n self._make_predict_function()\n return self.predict_function\n\n def _distribution_standardize_user_data(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n batch_size=None,\n validation_split=0,\n shuffle=False,\n epochs=1,\n allow_partial_batch=False):\n \"\"\"Runs validation checks on input and target data passed by the user.\n\n This is called when using tf.distribute.Strategy to train, evaluate or serve\n the model.\n\n Args:\n x: Input data. A numpy array or `tf.data` dataset.\n y: Target data. A numpy array or None if x is a `tf.data` dataset.\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`.\n class_weight: An optional class-weight array by the user to\n weight the importance of samples in `x` based on the class they belong\n to, as conveyed by `y`.\n batch_size: Integer batch size. If provided, it is used to run additional\n validation checks on stateful models.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n shuffle: Boolean whether to shuffle the training data before each epoch.\n epochs: Integer epochs. If > 1, repeat the numpy training data epochs\n times when converting to training dataset.\n allow_partial_batch: Boolean whether to enforce that all batches have the\n same size.\n\n Returns:\n Dataset instance.\n\n Raises:\n ValueError: In case of invalid user-provided data.\n RuntimeError: If the model was never compiled.\n \"\"\"\n if class_weight:\n raise NotImplementedError('`class_weight` is currently not supported '\n 'when using tf.distribute.Strategy.')\n\n if (sample_weight is not None and sample_weight.all() and\n distributed_training_utils.is_tpu_strategy(\n self._distribution_strategy)):\n raise NotImplementedError('`sample_weight` is currently not supported '\n 'when using TPUStrategy.')\n\n if (self.stateful and distributed_training_utils.is_tpu_strategy(\n self._distribution_strategy) and self._distribution_strategy.\n num_replicas_in_sync != 1):\n raise ValueError('Single core must be used for computation on '\n 'stateful models. Consider adding `device_assignment` '\n 'parameter to TPUStrategy using\\n'\n 'topology = tf.contrib.distribute.'\n 'initialize_tpu_system()\\n'\n 'device_assignment = tf.contrib.tpu.DeviceAssignment('\n 'topology, core_assignment=tf.contrib.tpu.'\n 'SINGLE_CORE_ASSIGNMENT)\\n'\n 'tpu_strategy = tf.contrib.distribute.TPUStrategy('\n 'device_assignment=device_assignment)')\n\n # Validates `steps` and `shuffle` arguments right at the beginning\n # since we use it to construct the dataset object.\n # TODO(anjalisridhar): Remove this check once we refactor the\n # _standardize_user_data code path. This check is already present elsewhere\n # in the codebase.\n if isinstance(x, dataset_ops.DatasetV2):\n if shuffle:\n training_utils.verify_dataset_shuffled(x)\n\n strategy = self._distribution_strategy\n with strategy.scope():\n # We should be sure to call get_session() inside the strategy.scope()\n # so the strategy can affect the session options.\n if ops.executing_eagerly_outside_functions():\n session = None\n else:\n session = K.get_session()\n\n first_x_value = nest.flatten(x)[0]\n if isinstance(first_x_value, np.ndarray):\n x = distributed_training_utils.list_to_tuple(x)\n if y is not None:\n y = distributed_training_utils.list_to_tuple(y)\n if sample_weight is not None:\n sample_weight = distributed_training_utils.list_to_tuple(\n sample_weight)\n in_tuple = (x, y, sample_weight)\n else:\n in_tuple = (x, y)\n else:\n in_tuple = x\n\n ds = strategy.extended.experimental_make_numpy_dataset(in_tuple,\n session=session)\n if shuffle:\n # We want a buffer size that is larger than the batch size provided by\n # the user and provides sufficient randomness. Note that larger\n # numbers introduce more memory usage based on the size of each\n # sample.\n ds = ds.shuffle(max(1024, batch_size * 8))\n if epochs > 1:\n ds = ds.repeat(epochs)\n\n # We need to use the drop_remainder argument to get a known static\n # input shape which is required for TPUs.\n drop_remainder = (not allow_partial_batch and\n strategy.extended.experimental_require_static_shapes)\n\n # TODO(b/131720208): We still drop remainder here if number of examples\n # is divisible by batch size, as sometimes dynamic padder will time out\n # with keras.metrics.CategoricalAccuracy() metric.\n if distributed_training_utils.is_tpu_strategy(\n strategy) and not drop_remainder:\n dataset_size = first_x_value.shape[0]\n if dataset_size % batch_size == 0:\n drop_remainder = True\n\n x = ds.batch(batch_size, drop_remainder=drop_remainder)\n else:\n assert isinstance(x, dataset_ops.DatasetV2)\n training_utils.validate_dataset_input(x, y, sample_weight,\n validation_split)\n return x\n\n def _standardize_user_data(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n batch_size=None,\n check_steps=False,\n steps_name='steps',\n steps=None,\n validation_split=0,\n shuffle=False,\n extract_tensors_from_dataset=False):\n \"\"\"Runs validation checks on input and target data passed by the user.\n\n Also standardizes the data to lists of arrays, in order.\n\n Also builds and compiles the model on the fly if it is a subclassed model\n that has never been called before (and thus has no inputs/outputs).\n\n This is a purely internal method, subject to refactoring at any time.\n\n Args:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`.\n class_weight: An optional class-weight array by the user to\n weight the importance of samples in `x` based on the class they belong\n to, as conveyed by `y`. If both `sample_weight` and `class_weight` are\n provided, the weights are multiplied.\n batch_size: Integer batch size. If provided, it is used to run additional\n validation checks on stateful models.\n check_steps: boolean, True if we want to check for validity of `steps` and\n False, otherwise. For example, when we are standardizing one batch of\n data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`\n value is not required and we should not check for its validity in these\n cases.\n steps_name: The public API's parameter name for `steps`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n shuffle: Boolean whether to shuffle the training data before each epoch.\n extract_tensors_from_dataset: Boolean. When `x` is a dataset instance,\n this indicates whether to extract actual tensors from the dataset or\n instead output the dataset instance itself.\n Set to True when calling from `train_on_batch`/etc.\n\n Returns:\n A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict\n or not), target arrays, sample-weight arrays.\n If the model's input and targets are symbolic, these lists are empty\n (since the model takes no user-provided data, instead the data comes\n from the symbolic inputs/targets).\n\n Raises:\n ValueError: In case of invalid user-provided data.\n RuntimeError: If the model was never compiled.\n \"\"\"\n if isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):\n # Graph mode dataset. We'll pass the dataset as-is (unless\n # `extract_tensors_from_dataset` is True, in which case we extract\n # the tensors from the dataset and we output them.\n training_utils.validate_dataset_input(x, y, sample_weight,\n validation_split)\n if shuffle:\n training_utils.verify_dataset_shuffled(x)\n\n is_dataset = True\n if extract_tensors_from_dataset:\n # We do this for `train_on_batch`/etc.\n x, y, sample_weight = training_utils.extract_tensors_from_dataset(x)\n elif isinstance(x, iterator_ops.Iterator):\n # Graph mode iterator. We extract the symbolic tensors.\n training_utils.validate_dataset_input(x, y, sample_weight,\n validation_split)\n iterator = x\n x, y, sample_weight = training_utils.unpack_iterator_input(iterator)\n is_dataset = True\n else:\n is_dataset = False\n\n # Validates `steps` argument based on x's type.\n if check_steps:\n training_utils.check_steps_argument(x, steps, steps_name)\n\n # First, we build/compile the model on the fly if necessary.\n all_inputs = []\n is_build_called = False\n is_compile_called = False\n # Whether this is a subclassed model that expects dictionary inputs\n # rather than list inputs (e.g. FeatureColumn-based models).\n dict_inputs = False\n if not self.inputs:\n # We need to use `x_input` to set the model inputs.\n\n # If input data is a dataset iterator in graph mode or if it is an eager\n # iterator and only one batch of samples is required, we fetch the data\n # tensors from the iterator and then standardize them.\n if isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):\n x_input, y_input, _ = training_utils.extract_tensors_from_dataset(x)\n else:\n x_input = x\n y_input = y\n # We type-check that `x_input` and `y_input` are either single arrays\n # or lists of arrays.\n if isinstance(x_input, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tensor_util.is_tensor(v) for v in x_input):\n raise ValueError('Please provide as model inputs either a single '\n 'array or a list of arrays. You passed: x=' + str(x))\n all_inputs += list(x_input)\n elif isinstance(x_input, dict):\n dict_inputs = True\n keys = sorted(x_input.keys())\n all_inputs = [x_input[k] for k in keys]\n else:\n if (not isinstance(x_input, np.ndarray) and\n not tensor_util.is_tensor(x_input)):\n raise ValueError('Please provide as model inputs either a single '\n 'array or a list of arrays. You passed: x=' + str(x))\n all_inputs.append(x_input)\n\n # Build the model using the retrieved inputs (value or symbolic).\n # If values or generated from a dataset, then in symbolic-mode\n # placeholders will be created to match the value shapes.\n is_build_called = True\n if is_dataset:\n cast_inputs = nest.map_structure(lambda v: v.shape, x_input)\n elif training_utils.has_tensors(x_input):\n cast_inputs = training_utils.cast_if_floating_dtype(x_input)\n else:\n cast_inputs = x_input\n self._set_inputs(cast_inputs)\n else:\n y_input = y\n dict_inputs = isinstance(self.inputs, dict)\n\n if not self._is_compiled and self.optimizer:\n # On-the-fly compilation of the model.\n if y_input is not None:\n # We need to use `y` to set the model targets.\n if training_utils.has_tensors(y_input):\n y_input = training_utils.cast_if_floating_dtype(y_input)\n if isinstance(y_input, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tensor_util.is_tensor(v) for v in y_input):\n raise ValueError('Please provide as model targets either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n all_inputs += list(y_input)\n elif isinstance(y_input, dict):\n raise ValueError('You cannot pass a dictionary as model targets.')\n else:\n if (not isinstance(y_input, np.ndarray) and\n not tensor_util.is_tensor(y_input)):\n raise ValueError('Please provide as model targets either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n all_inputs.append(y_input)\n\n # Typecheck that all inputs are *either* value *or* symbolic.\n # TODO(fchollet): this check could be removed in Eager mode?\n if any(tensor_util.is_tensor(v) for v in all_inputs):\n if not all(tensor_util.is_tensor(v) for v in all_inputs):\n raise ValueError('Do not pass inputs that mix Numpy arrays and '\n 'TensorFlow tensors. '\n 'You passed: x=' + str(x) + '; y=' + str(y))\n\n if is_dataset or context.executing_eagerly():\n target_tensors = None\n else:\n # Handle target tensors if any passed.\n if y_input is not None:\n if not isinstance(y_input, (list, tuple)):\n y_input = [y_input]\n target_tensors = [v for v in y_input if _is_symbolic_tensor(v)]\n else:\n target_tensors = None\n is_compile_called = True\n self.compile(\n optimizer=self.optimizer,\n loss=self.loss,\n metrics=self._compile_metrics,\n weighted_metrics=self._compile_weighted_metrics,\n loss_weights=self.loss_weights,\n target_tensors=target_tensors,\n run_eagerly=self.run_eagerly,\n cloning=self._cloning)\n\n # In graph mode, if we had just set inputs and targets as symbolic tensors\n # by invoking build and compile on the model respectively, we do not have to\n # feed anything to the model. Model already has input and target data as\n # part of the graph.\n # Note: in this case, `any` and `all` are equivalent since we disallow\n # mixed symbolic/value inputs.\n if (not self.run_eagerly and is_build_called and is_compile_called and\n not is_dataset and any(_is_symbolic_tensor(v) for v in all_inputs)):\n return [], [], None\n\n # What follows is input validation and standardization to list format,\n # in the case where all inputs are value arrays.\n\n if self.run_eagerly:\n # In eager mode, do not do shape validation\n # since the network has no input nodes (placeholders) to be fed.\n feed_input_names = self.input_names\n feed_input_shapes = None\n elif not self._is_graph_network:\n # Case: symbolic-mode subclassed network. Do not do shape validation.\n feed_input_names = self._feed_input_names\n feed_input_shapes = None\n else:\n # Case: symbolic-mode graph network.\n # In this case, we run extensive shape validation checks.\n feed_input_names = self._feed_input_names\n feed_input_shapes = self._feed_input_shapes\n\n # Standardize the inputs.\n if not isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):\n # TODO(fchollet): run static checks with dataset output shape(s).\n x = training_utils.standardize_input_data(\n x,\n feed_input_names,\n feed_input_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='input')\n\n if y is not None:\n if not self._is_graph_network:\n feed_output_names = self._feed_output_names\n feed_output_shapes = None\n # Sample weighting not supported in this case.\n # TODO(fchollet): consider supporting it.\n feed_sample_weight_modes = [None for _ in self.outputs]\n else:\n feed_output_names = self._feed_output_names\n feed_output_shapes = self._feed_output_shapes\n feed_sample_weight_modes = self._sample_weight_modes\n\n # Standardize the outputs.\n y = training_utils.standardize_input_data(\n y,\n feed_output_names,\n # Don't enforce target shapes to match output shapes.\n # Precise checks will be run in `check_loss_and_target_compatibility`.\n shapes=None,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='target')\n\n # Generate sample-wise weight values given the `sample_weight` and\n # `class_weight` arguments.\n sample_weights = training_utils.standardize_sample_weights(\n sample_weight, feed_output_names)\n class_weights = training_utils.standardize_class_weights(\n class_weight, feed_output_names)\n sample_weights = [\n training_utils.standardize_weights(ref, sw, cw, mode)\n for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,\n feed_sample_weight_modes)\n ]\n # Check that all arrays have the same length.\n if not self._distribution_strategy:\n training_utils.check_array_lengths(x, y, sample_weights)\n if self._is_graph_network and not self.run_eagerly:\n # Additional checks to avoid users mistakenly using improper loss fns.\n training_utils.check_loss_and_target_compatibility(\n y, self._feed_loss_fns, feed_output_shapes)\n\n # If sample weight mode has not been set and weights are None for all the\n # model outputs, return None (we do not create placeholders for\n # sample weights) so we do not want to feed any value.\n is_sample_weight_mode_set = any(\n s is not None for s in feed_sample_weight_modes)\n if (not is_sample_weight_mode_set and\n all(s is None for s in sample_weights)):\n sample_weights = None # If the list contains only None, return None\n else:\n y = []\n sample_weights = None\n\n if self.stateful and batch_size:\n # Check that for stateful networks, number of samples is a multiple\n # of the static batch size.\n if x[0].shape[0] % batch_size != 0:\n raise ValueError('In a stateful network, '\n 'you should only pass inputs with '\n 'a number of samples that can be '\n 'divided by the batch size. Found: ' +\n str(x[0].shape[0]) + ' samples')\n\n # If dictionary inputs were provided, we return a dictionary as well.\n if dict_inputs and not isinstance(x, (dataset_ops.DatasetV1,\n dataset_ops.DatasetV2)):\n x = dict(zip(feed_input_names, x))\n return x, y, sample_weights\n\n def _unpack_validation_data(self, validation_data):\n if (isinstance(validation_data, (iterator_ops.Iterator,\n iterator_ops.IteratorV2,\n dataset_ops.DatasetV2))):\n val_x = validation_data\n val_y = None\n val_sample_weight = None\n elif len(validation_data) == 2:\n val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence\n val_sample_weight = None\n elif len(validation_data) == 3:\n val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence\n else:\n raise ValueError(\n 'When passing a `validation_data` argument, '\n 'it must contain either 2 items (x_val, y_val), '\n 'or 3 items (x_val, y_val, val_sample_weights), '\n 'or alternatively it could be a dataset or a '\n 'dataset or a dataset iterator. '\n 'However we received `validation_data=%s`' % validation_data)\n return val_x, val_y, val_sample_weight\n\n # TODO(omalleyt): Consider changing to a more descriptive function name.\n def _set_inputs(self, inputs, outputs=None, training=None):\n \"\"\"Set model's input and output specs based on the input data received.\n\n This is to be used for Model subclasses, which do not know at instantiation\n time what their inputs look like.\n\n Args:\n inputs: Single array, or list of arrays. The arrays could be placeholders,\n Numpy arrays, data tensors, or TensorShapes.\n - if placeholders: the model is built on top of these placeholders,\n and we expect Numpy data to be fed for them when calling `fit`/etc.\n - if Numpy data or TensorShapes: we create placeholders matching the\n TensorShapes or shapes of the Numpy arrays. We expect Numpy data to be\n fed for these placeholders when calling `fit`/etc.\n - if data tensors: the model is built on top of these tensors.\n We do not expect any Numpy data to be provided when calling `fit`/etc.\n outputs: None, a data tensor, or a list of tensors. If None, the\n outputs will be determined by invoking `self.call()`, otherwise the\n provided value will be used.\n training: Boolean or None. Only relevant in symbolic mode. Specifies\n whether to build the model's graph in inference mode (False), training\n mode (True), or using the Keras learning phase (None).\n Raises:\n ValueError: If dict inputs are passed to a Sequential Model where the\n first layer isn't FeatureLayer.\n \"\"\"\n inputs = self._set_input_attrs(inputs)\n\n if outputs is None:\n kwargs = {'training': training} if self._expects_training_arg else {}\n try:\n outputs = self(inputs, **kwargs)\n except NotImplementedError:\n # This Model or a submodel is dynamic and hasn't overridden\n # `compute_output_shape`.\n outputs = None\n\n self._set_output_attrs(outputs)\n\n @trackable.no_automatic_dependency_tracking\n def _set_input_attrs(self, inputs):\n \"\"\"Sets attributes related to the inputs of the Model.\"\"\"\n if self.inputs:\n raise ValueError('Model inputs are already set.')\n\n if self.__class__.__name__ == 'Sequential' and not self.built:\n if tensor_util.is_tensor(inputs):\n input_shape = (None,) + tuple(inputs.shape.as_list()[1:])\n elif isinstance(inputs, tensor_shape.TensorShape):\n input_shape = (None,) + tuple(inputs.as_list()[1:])\n elif isinstance(inputs, dict):\n # We assert that the first layer is a FeatureLayer.\n if not training_utils.is_feature_layer(self.layers[0]):\n raise ValueError('Passing a dictionary input to a Sequential Model '\n 'which doesn\\'t have FeatureLayer as the first layer'\n ' is an error.')\n input_shape = (None,)\n else:\n input_shape = (None,) + tuple(inputs.shape[1:])\n self._build_input_shape = input_shape\n\n # On-the-fly setting of symbolic model inputs (either by using the tensor\n # provided, or by creating a placeholder if Numpy data was provided).\n model_inputs = training_utils.ModelInputs(inputs)\n inputs = model_inputs.get_symbolic_inputs()\n self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)\n self.input_names = model_inputs.get_input_names()\n\n self._feed_inputs = []\n self._feed_input_names = []\n self._feed_input_shapes = []\n\n for k, v in model_inputs.as_dict():\n if K.is_placeholder(v):\n self._feed_input_names.append(k)\n self._feed_inputs.append(v)\n self._feed_input_shapes.append(K.int_shape(v))\n\n return inputs\n\n @trackable.no_automatic_dependency_tracking\n def _set_output_attrs(self, outputs):\n \"\"\"Sets attributes related to the outputs of the Model.\"\"\"\n outputs = nest.flatten(outputs)\n self.outputs = outputs\n self.output_names = training_utils.generic_output_names(outputs)\n # TODO(scottzhu): Should we cleanup the self._training_endpoints here?\n self.built = True\n\n @property\n def _targets(self):\n \"\"\"The output target tensors for the model.\"\"\"\n return [\n e.training_target.target\n for e in self._training_endpoints\n if e.has_training_target()\n ]\n\n @property\n def _feed_targets(self):\n return [\n e.training_target.target\n for e in self._training_endpoints\n if e.has_feedable_training_target()\n ]\n\n @property\n def _feed_output_names(self):\n return [\n e.output_name\n for e in self._training_endpoints\n if e.has_feedable_training_target()\n ]\n\n @property\n def _feed_output_shapes(self):\n return [\n e.feed_output_shape\n for e in self._training_endpoints\n if e.has_feedable_training_target()\n ]\n\n @property\n def _feed_loss_fns(self):\n return [\n e.loss_fn\n for e in self._training_endpoints\n if e.has_feedable_training_target()\n ]\n\n @property\n def _loss_weights_list(self):\n return [e.loss_weight for e in self._training_endpoints]\n\n @property\n def _output_loss_metrics(self):\n if hasattr(self, '_training_endpoints'):\n return [\n e.output_loss_metric\n for e in self._training_endpoints\n if e.output_loss_metric is not None\n ]\n return None\n\n @property\n def sample_weights(self):\n return [e.sample_weight for e in self._training_endpoints]\n\n @property\n def _sample_weight_modes(self):\n return [e.sample_weight_mode for e in self._training_endpoints]\n\n @property\n def _feed_sample_weights(self):\n return [e.sample_weight for e in self._training_endpoints\n if e.sample_weight is not None]\n\n def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):\n \"\"\"Maybe load initial epoch from ckpt considering possible worker recovery.\n\n When `_ckpt_saved_epoch` attribute is not None in a `Model` object at the\n time the training starts, this is under multi-worker training setting and\n indicates the worker is recovering from previous failure. In this case,\n infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous\n unfinished training from certain epoch.\n\n Arguments:\n initial_epoch: The original initial_epoch user passes in in `fit()`.\n mode: The training mode.\n\n Returns:\n If the training is recovering from previous failure under multi-worker\n training setting, return the epoch the training is supposed to continue\n at. Otherwise, return the `initial_epoch` the user passes in.\n \"\"\"\n # TODO(rchao): Add recovery for validation case\n # (when mode == ModeKeys.TEST).\n if mode == ModeKeys.TRAIN and self._ckpt_saved_epoch is not None:\n # The most recently saved epoch is one epoch prior to the epoch it failed\n # at, so return '_ckpt_saved_epoch' plus one.\n return int(self._ckpt_saved_epoch) + 1\n return initial_epoch\n\n def _assert_compile_was_called(self):\n # Checks whether `compile` has been called. If it has been called,\n # then the optimizer is set. This is different from whether the\n # model is compiled\n # (i.e. whether the model is built and its inputs/outputs are set).\n if not self.optimizer:\n raise RuntimeError('You must compile your model before '\n 'training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n\nclass DistributedCallbackModel(Model):\n \"\"\"Model that is used for callbacks with tf.distribute.Strategy.\"\"\"\n\n def __init__(self, model):\n super(DistributedCallbackModel, self).__init__()\n self.optimizer = model.optimizer\n\n def set_original_model(self, orig_model):\n self._original_model = orig_model\n\n def save_weights(self, filepath, overwrite=True, save_format=None):\n self._replicated_model.save_weights(filepath, overwrite=overwrite,\n save_format=save_format)\n\n def save(self, filepath, overwrite=True, include_optimizer=True):\n # save weights from the distributed model to the original model\n distributed_model_weights = self.get_weights()\n self._original_model.set_weights(distributed_model_weights)\n # TODO(anjalisridhar): Do we need to save the original model here?\n # Saving the first replicated model works as well.\n self._original_model.save(filepath, overwrite=True, include_optimizer=False)\n\n def load_weights(self, filepath, by_name=False):\n self._original_model.load_weights(filepath, by_name=False)\n # Copy the weights from the original model to each of the replicated models.\n orig_model_weights = self._original_model.get_weights()\n distributed_training_utils.set_weights(\n self._original_model._distribution_strategy, self, # pylint: disable=protected-access\n orig_model_weights)\n\n def __getattr__(self, item):\n # Whitelisted atttributes of the model that can be accessed by the user\n # during a callback.\n if item not in ('_setattr_tracking', '_layers'):\n logging.warning('You are accessing attribute ' + item + ' of the '\n 'DistributedCallbackModel that may not have been set '\n 'correctly.')\n return super(DistributedCallbackModel, self).__getattr__(item)\n\n\nclass _TrainingEndpoint(object):\n \"\"\"A container for the training output/target and related entities.\n\n In the case of model with multiple outputs, there is a one-to-one mapping\n between model output (y_pred), model target (y_true), loss, metrics etc.\n By unifying these entities into one class, different entity can access\n information between each other, rather than currently access different list of\n attributes of the model.\n \"\"\"\n\n def __init__(self,\n output,\n output_name,\n loss_fn,\n loss_weight=None,\n training_target=None,\n output_loss_metric=None,\n sample_weight=None,\n sample_weight_mode=None):\n \"\"\"Initialize the _TrainingEndpoint.\n\n Note that the output and output_name should be stable as long as the model\n structure doesn't change. The training_target suppose to be mutable since\n the information is provided via `compile()`\n\n Args:\n output: the output tensor of the model.\n output_name: the unique name of the output tensor.\n loss_fn: the loss function for the output tensor.\n loss_weight: float, the weights for the loss.\n training_target: the _TrainingTarget for the model.\n output_loss_metric: the metric object for the loss function.\n sample_weight: the weights for how a sample is weighted during metric and\n loss calculation. Could be None.\n sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for\n how the sample_weight is populated.\n \"\"\"\n self._output = output\n self._output_name = output_name\n self._loss_fn = loss_fn\n self._loss_weight = loss_weight\n self._training_target = training_target\n self._output_loss_metric = output_loss_metric\n self._sample_weight = sample_weight\n self._sample_weight_mode = sample_weight_mode\n\n @property\n def output(self):\n return self._output\n\n @property\n def output_name(self):\n return self._output_name\n\n @property\n def shape(self):\n return K.int_shape(self.output)\n\n @property\n def loss_fn(self):\n return self._loss_fn\n\n @property\n def loss_weight(self):\n return self._loss_weight\n\n @loss_weight.setter\n def loss_weight(self, value):\n self._loss_weight = value\n\n @property\n def training_target(self):\n return self._training_target\n\n @training_target.setter\n def training_target(self, value):\n self._training_target = value\n\n def create_training_target(self, target, run_eagerly=False):\n \"\"\"Create training_target instance and update the self.training_target.\n\n Note that the input target should just be a tensor or None, and\n corresponding training target will be created based on the output and\n loss_fn.\n\n Args:\n target: the target tensor for the current output. Could be None.\n run_eagerly: boolean, whether the model is in run_eagerly mode.\n\n Raises:\n ValueError if the training_target field for the current instance has\n already been populated.\n \"\"\"\n if self.has_training_target():\n raise ValueError('The training_target field for the _TrainingEndpoint '\n 'instance has already been populated')\n if run_eagerly:\n # When run_eagerly, the target tensor is ignored, and the None placeholder\n # is created instead.\n self.training_target = _TrainingTarget(\n None, feedable=True, skip_target_weights=False)\n return\n\n if self.should_skip_target():\n self.training_target = _TrainingTarget(None)\n else:\n if target is not None and not K.is_placeholder(target):\n feedable = False\n skip_target_weights = True\n else:\n feedable = True\n skip_target_weights = False\n\n if target is None:\n target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get(\n self.loss_fn, K.dtype(self.output))\n\n target = K.placeholder(\n ndim=len(self.shape),\n name=self.output_name + '_target',\n sparse=K.is_sparse(self.output),\n dtype=target_dtype)\n\n self.training_target = _TrainingTarget(\n target,\n feedable=feedable,\n skip_target_weights=skip_target_weights)\n\n @property\n def output_loss_metric(self):\n return self._output_loss_metric\n\n @output_loss_metric.setter\n def output_loss_metric(self, value):\n self._output_loss_metric = value\n\n @property\n def sample_weight(self):\n return self._sample_weight\n\n @sample_weight.setter\n def sample_weight(self, value):\n self._sample_weight = value\n\n @property\n def sample_weight_mode(self):\n return self._sample_weight_mode\n\n @sample_weight_mode.setter\n def sample_weight_mode(self, value):\n self._sample_weight_mode = value\n\n def should_skip_target(self):\n return self._loss_fn is None\n\n def should_skip_target_weights(self):\n return (self.should_skip_target() or self.training_target is None or\n self.training_target.skip_target_weights)\n\n def has_training_target(self):\n return self.training_target is not None\n\n def has_feedable_training_target(self):\n return (not self.should_skip_target() and\n self.training_target is not None and self.training_target.feedable)\n\n def loss_name(self):\n if self._loss_fn is not None:\n return self._output_name + '_loss'\n return None\n\n @property\n def feed_output_shape(self):\n \"\"\"The output shape for the feedable target.\"\"\"\n if not self.has_feedable_training_target():\n return None\n\n if ((isinstance(self.loss_fn, losses.LossFunctionWrapper) and\n self.loss_fn.fn == losses.sparse_categorical_crossentropy)) or (\n isinstance(self.loss_fn, losses.SparseCategoricalCrossentropy)):\n if K.image_data_format() == 'channels_first':\n return (self.shape[0], 1) + self.shape[2:]\n else:\n return self.shape[:-1] + (1,)\n elif (not isinstance(self.loss_fn, losses.Loss) or\n (isinstance(self.loss_fn, losses.LossFunctionWrapper) and\n (getattr(losses, self.loss_fn.fn.__name__, None) is None))):\n # If the given loss is not an instance of the `Loss` class (custom\n # class) or if the loss function that is wrapped is not in the\n # `losses` module, then it is a user-defined loss and we make no\n # assumptions about it.\n return None\n else:\n return self.shape\n\n def sample_weights_mismatch(self):\n \"\"\"Check if the sample weight and the mode match or not.\"\"\"\n # If there is a mismatch between sample weight mode and the placeholders\n # created, then recompile the sub-graphs that depend on sample weights.\n return (\n (self.sample_weight_mode is not None and self.sample_weight is None) or\n (self.sample_weight_mode is None and self.sample_weight is not None))\n\n def populate_sample_weight(self):\n \"\"\"Populate the sample weight and based on the sample weight mode.\"\"\"\n if (self.should_skip_target_weights() or\n self.sample_weight_mode is None or context.executing_eagerly()):\n self._sample_weight = None\n return\n\n assert self.sample_weight_mode in ['temporal', 'samplewise']\n if self.sample_weight_mode == 'temporal':\n default_value = [[1.]]\n shape = [None, None]\n else:\n # self.sample_weight_mode == 'samplewise'\n default_value = [1.]\n shape = [None]\n\n self._sample_weight = array_ops.placeholder_with_default(\n constant_op.constant(default_value, dtype=K.floatx()),\n shape=shape,\n name=self.output_name + '_sample_weights')\n\n\nclass _TrainingTarget(object):\n \"\"\"Container for a target tensor (y_true) and its metadata (shape, loss...).\n\n Arguments:\n target: A target tensor for the model. It may be `None` if the\n output is excluded from loss computation. It is still kept as None\n since each output of the model should have a corresponding target. If\n the target is None, the rest of the attributes will be None as well.\n feedable: Boolean, whether the target is feedable (requires data to be\n passed in `fit` or `train_on_batch`), or not (model compiled with\n `target_tensors` argument).\n skip_target_weights: Boolean, whether the target should be skipped during\n weights calculation.\n \"\"\"\n\n def __init__(self, target, feedable=False, skip_target_weights=True):\n self._target = target\n self._feedable = feedable\n self._skip_target_weights = skip_target_weights\n\n @property\n def target(self):\n return self._target\n\n @property\n def feedable(self):\n return self._feedable\n\n @property\n def skip_target_weights(self):\n return self._skip_target_weights\n\n\ndef _is_symbolic_tensor(x):\n return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)\n"
] |
[
[
"tensorflow.python.keras.distribute.distributed_training_utils.global_batch_size_supported",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.keras.backend.function",
"tensorflow.python.keras.backend.symbolic_learning_phase",
"tensorflow.python.keras.engine.training_utils.unpack_iterator_input",
"tensorflow.python.keras.engine.training_utils.collect_per_output_metric_info",
"tensorflow.python.keras.engine.training_utils.check_generator_arguments",
"tensorflow.python.keras.engine.training_generator.fit_generator",
"tensorflow.python.keras.engine.training_utils.generic_output_names",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.keras.utils.generic_utils.slice_arrays",
"tensorflow.python.keras.engine.training_utils.check_loss_and_target_compatibility",
"tensorflow.python.keras.engine.training_utils.check_array_lengths",
"tensorflow.python.keras.engine.training_distributed.predict_distributed",
"tensorflow.python.keras.engine.training_generator.predict_generator",
"tensorflow.python.keras.engine.training_utils.get_static_batch_size",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.keras.engine.training_utils.prepare_loss_weights",
"tensorflow.python.keras.backend.is_sparse",
"tensorflow.python.keras.engine.training_arrays.test_loop",
"tensorflow.python.keras.engine.training_utils.is_feature_layer",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.engine.training_eager.test_on_batch",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes",
"tensorflow.python.keras.optimizers.get",
"tensorflow.python.keras.backend.image_data_format",
"tensorflow.python.keras.backend.configure_and_create_distributed_session",
"tensorflow.python.keras.engine.training_utils.prepare_sample_weight_modes",
"tensorflow.python.keras.engine.training_arrays.fit_loop",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.keras.engine.training_utils.standardize_sample_weights",
"tensorflow.python.keras.engine.training_utils.verify_dataset_shuffled",
"tensorflow.python.keras.distribute.distributed_training_utils.is_distributing_by_cloning",
"tensorflow.python.keras.backend.in_multi_worker_mode",
"tensorflow.python.distribute.distribution_strategy_context.in_cross_replica_context",
"tensorflow.python.keras.engine.training_utils.call_metric_function",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.keras.distribute.distributed_training_utils.filter_distributed_callbacks",
"tensorflow.python.keras.engine.training_utils.cast_if_floating_dtype",
"tensorflow.python.keras.distribute.distributed_training_utils.set_weights",
"tensorflow.python.keras.engine.training_utils.check_steps_argument",
"tensorflow.python.keras.engine.training_utils.has_tensors",
"tensorflow.python.keras.engine.training_generator.evaluate_generator",
"tensorflow.python.distribute.distribute_coordinator.run_distribute_coordinator",
"tensorflow.python.keras.utils.losses_utils.reduce_weighted_loss",
"tensorflow.python.keras.utils.data_utils.is_generator_or_sequence",
"tensorflow.python.keras.backend.dtype",
"tensorflow.python.keras.metrics.Mean",
"tensorflow.python.keras.engine.training_arrays.predict_loop",
"tensorflow.python.keras.engine.training_distributed.evaluate_distributed",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.utils.losses_utils.scale_loss_for_distribution",
"tensorflow.python.keras.engine.training_utils.prepare_loss_functions",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.engine.training_utils.has_symbolic_tensors",
"tensorflow.python.keras.engine.training_utils.ModelInputs",
"tensorflow.python.keras.backend.is_placeholder",
"tensorflow.python.keras.engine.training_utils.validate_dataset_input",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.keras.engine.training_utils.is_eager_dataset_or_iterator",
"tensorflow.python.keras.engine.training_distributed.fit_distributed",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.keras.engine.network._is_hdf5_filepath",
"tensorflow.python.keras.utils.losses_utils.compute_weighted_loss",
"tensorflow.python.keras.engine.training_utils.standardize_input_data",
"tensorflow.python.keras.engine.training_utils.standardize_weights",
"tensorflow.python.tf2.enabled",
"tensorflow.python.keras.distribute.distributed_training_utils.is_tpu_strategy",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.eager.monitoring.BoolGauge",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.engine.training_utils.standardize_class_weights",
"tensorflow.python.keras.engine.training_utils.extract_tensors_from_dataset",
"tensorflow.python.keras.engine.training_eager.train_on_batch",
"tensorflow.python.keras.distribute.distributed_training_utils.list_to_tuple",
"tensorflow.python.keras.utils.losses_utils.squeeze_or_expand_dimensions",
"tensorflow.python.keras.saving.saving_utils.trace_model_call",
"tensorflow.python.keras.distribute.distributed_training_utils._reset_metrics"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hany606/PettingZoo
|
[
"ef958ff3dfb3759e980759b507448ea96ac8ba28"
] |
[
"pettingzoo/test/parallel_test.py"
] |
[
"import random\nimport warnings\n\nimport numpy as np\n\nfrom pettingzoo.utils.conversions import (aec_to_parallel_wrapper, parallel_to_aec_wrapper,\n turn_based_aec_to_parallel_wrapper)\nfrom pettingzoo.utils.wrappers import BaseWrapper\n\nfrom .api_test import missing_attr_warning\n\n\ndef sample_action(env, obs, agent):\n agent_obs = obs[agent]\n if isinstance(agent_obs, dict) and \"action_mask\" in agent_obs:\n legal_actions = np.flatnonzero(agent_obs[\"action_mask\"])\n if len(legal_actions) == 0:\n return 0\n return random.choice(legal_actions)\n return env.action_space(agent).sample()\n\n\ndef parallel_api_test(par_env, num_cycles=1000):\n par_env.max_cycles = num_cycles\n\n if not hasattr(par_env, 'possible_agents'):\n warnings.warn(missing_attr_warning.format(name='possible_agents'))\n\n assert not isinstance(par_env.unwrapped, aec_to_parallel_wrapper)\n assert not isinstance(par_env.unwrapped, parallel_to_aec_wrapper)\n assert not isinstance(par_env.unwrapped, turn_based_aec_to_parallel_wrapper)\n assert not isinstance(par_env.unwrapped, BaseWrapper)\n\n # checks that reset takes an argument seed\n par_env.reset(seed=0)\n\n MAX_RESETS = 2\n for _ in range(MAX_RESETS):\n obs = par_env.reset()\n assert isinstance(obs, dict)\n assert set(obs.keys()) == (set(par_env.agents))\n done = {agent: False for agent in par_env.agents}\n live_agents = set(par_env.agents[:])\n has_finished = set()\n for _ in range(num_cycles):\n actions = {agent: sample_action(par_env, obs, agent) for agent in par_env.agents if agent in done and not done[agent]}\n obs, rew, done, info = par_env.step(actions)\n for agent in par_env.agents:\n assert agent not in has_finished, \"agent cannot be revived once done\"\n\n if agent not in live_agents:\n live_agents.add(agent)\n\n assert isinstance(obs, dict)\n assert isinstance(rew, dict)\n assert isinstance(done, dict)\n assert isinstance(info, dict)\n\n agents_set = set(live_agents)\n keys = 'observation reward done info'.split()\n vals = [obs, rew, done, info]\n for k, v in zip(keys, vals):\n key_set = set(v.keys())\n if key_set == agents_set:\n continue\n if len(key_set) < len(agents_set):\n warnings.warn('Live agent was not given {}'.format(k))\n else:\n warnings.warn('Agent was given {} but was done last turn'.format(k))\n\n if hasattr(par_env, 'possible_agents'):\n assert set(par_env.agents).issubset(set(par_env.possible_agents)), \"possible_agents defined but does not contain all agents\"\n\n has_finished |= {agent for agent, d in done.items() if d}\n if not par_env.agents and has_finished != set(par_env.possible_agents):\n warnings.warn('No agents present but not all possible_agents are done')\n elif not par_env.agents:\n warnings.warn('No agents present')\n\n for agent in par_env.agents:\n assert par_env.observation_space(agent) is par_env.observation_space(agent), \"observation_space should return the exact same space object (not a copy) for an agent. Consider decorating your observation_space(self, agent) method with @functools.lru_cache(maxsize=None)\"\n assert par_env.action_space(agent) is par_env.action_space(agent), \"action_space should return the exact same space object (not a copy) for an agent (ensures that action space seeding works as expected). Consider decorating your action_space(self, agent) method with @functools.lru_cache(maxsize=None)\"\n\n for agent, d in done.items():\n if d:\n live_agents.remove(agent)\n\n assert set(par_env.agents) == live_agents\n\n if len(live_agents) == 0:\n break\n"
] |
[
[
"numpy.flatnonzero"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rjzamora/dask-sql
|
[
"c3ad6a9f6b01ce02127fde7501eaf322c8160f7e"
] |
[
"tests/integration/test_select.py"
] |
[
"import dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\nfrom dask_sql.utils import ParsingException\n\n\ndef test_select(c, df):\n result_df = c.sql(\"SELECT * FROM df\")\n result_df = result_df.compute()\n\n assert_frame_equal(result_df, df)\n\n\ndef test_select_alias(c, df):\n result_df = c.sql(\"SELECT a as b, b as a FROM df\")\n result_df = result_df.compute()\n\n expected_df = pd.DataFrame(index=df.index)\n expected_df[\"b\"] = df.a\n expected_df[\"a\"] = df.b\n\n assert_frame_equal(result_df[[\"a\", \"b\"]], expected_df[[\"a\", \"b\"]])\n\n\ndef test_select_column(c, df):\n result_df = c.sql(\"SELECT a FROM df\")\n result_df = result_df.compute()\n\n assert_frame_equal(result_df, df[[\"a\"]])\n\n\ndef test_select_different_types(c):\n expected_df = pd.DataFrame(\n {\n \"date\": pd.to_datetime([\"2022-01-21 17:34\", \"2022-01-21\", \"17:34\", pd.NaT]),\n \"string\": [\"this is a test\", \"another test\", \"äölüć\", \"\"],\n \"integer\": [1, 2, -4, 5],\n \"float\": [-1.1, np.NaN, pd.NA, np.sqrt(2)],\n }\n )\n c.create_table(\"df\", expected_df)\n df = c.sql(\n \"\"\"\n SELECT *\n FROM df\n \"\"\"\n )\n df = df.compute()\n\n assert_frame_equal(df, expected_df)\n\n\ndef test_select_expr(c, df):\n result_df = c.sql(\"SELECT a + 1 AS a, b AS bla, a - 1 FROM df\")\n result_df = result_df.compute()\n\n expected_df = pd.DataFrame(\n {\"a\": df[\"a\"] + 1, \"bla\": df[\"b\"], '\"df\".\"a\" - 1': df[\"a\"] - 1,}\n )\n assert_frame_equal(result_df, expected_df)\n\n\ndef test_select_of_select(c, df):\n result_df = c.sql(\n \"\"\"\n SELECT 2*c AS e, d - 1 AS f\n FROM\n (\n SELECT a - 1 AS c, 2*b AS d\n FROM df\n ) AS \"inner\"\n \"\"\"\n )\n result_df = result_df.compute()\n\n expected_df = pd.DataFrame({\"e\": 2 * (df[\"a\"] - 1), \"f\": 2 * df[\"b\"] - 1})\n assert_frame_equal(result_df, expected_df)\n\n\ndef test_select_of_select_with_casing(c, df):\n result_df = c.sql(\n \"\"\"\n SELECT AAA, aaa, aAa\n FROM\n (\n SELECT a - 1 AS aAa, 2*b AS aaa, a + b AS AAA\n FROM df\n ) AS \"inner\"\n \"\"\"\n )\n result_df = result_df.compute()\n\n expected_df = pd.DataFrame(\n {\"AAA\": df[\"a\"] + df[\"b\"], \"aaa\": 2 * df[\"b\"], \"aAa\": df[\"a\"] - 1}\n )\n\n assert_frame_equal(result_df, expected_df)\n\n\ndef test_wrong_input(c):\n with pytest.raises(ParsingException):\n c.sql(\"\"\"SELECT x FROM df\"\"\")\n\n with pytest.raises(ParsingException):\n c.sql(\"\"\"SELECT x FROM df\"\"\")\n\n\ndef test_timezones(c, datetime_table):\n result_df = c.sql(\n \"\"\"\n SELECT * FROM datetime_table\n \"\"\"\n )\n result_df = result_df.compute()\n\n assert_frame_equal(result_df, datetime_table)\n\n\[email protected](\n \"input_table\",\n [\"datetime_table\", pytest.param(\"gpu_datetime_table\", marks=pytest.mark.gpu),],\n)\ndef test_date_casting(c, input_table, request):\n datetime_table = request.getfixturevalue(input_table)\n result_df = c.sql(\n f\"\"\"\n SELECT\n CAST(timezone AS DATE) AS timezone,\n CAST(no_timezone AS DATE) AS no_timezone,\n CAST(utc_timezone AS DATE) AS utc_timezone\n FROM {input_table}\n \"\"\"\n )\n\n expected_df = datetime_table\n expected_df[\"timezone\"] = (\n expected_df[\"timezone\"].astype(\"<M8[ns]\").dt.floor(\"D\").astype(\"<M8[ns]\")\n )\n expected_df[\"no_timezone\"] = (\n expected_df[\"no_timezone\"].astype(\"<M8[ns]\").dt.floor(\"D\").astype(\"<M8[ns]\")\n )\n expected_df[\"utc_timezone\"] = (\n expected_df[\"utc_timezone\"].astype(\"<M8[ns]\").dt.floor(\"D\").astype(\"<M8[ns]\")\n )\n\n dd.assert_eq(result_df, expected_df)\n\n\[email protected](\n \"input_table\",\n [\"datetime_table\", pytest.param(\"gpu_datetime_table\", marks=pytest.mark.gpu),],\n)\ndef test_timestamp_casting(c, input_table, request):\n datetime_table = request.getfixturevalue(input_table)\n result_df = c.sql(\n f\"\"\"\n SELECT\n CAST(timezone AS TIMESTAMP) AS timezone,\n CAST(no_timezone AS TIMESTAMP) AS no_timezone,\n CAST(utc_timezone AS TIMESTAMP) AS utc_timezone\n FROM {input_table}\n \"\"\"\n )\n\n expected_df = datetime_table.astype(\"<M8[ns]\")\n dd.assert_eq(result_df, expected_df)\n\n\ndef test_multi_case_when(c):\n df = pd.DataFrame({\"a\": [1, 6, 7, 8, 9]})\n c.create_table(\"df\", df)\n\n actual_df = c.sql(\n \"\"\"\n SELECT\n CASE WHEN a BETWEEN 6 AND 8 THEN 1 ELSE 0 END AS C\n FROM df\n \"\"\"\n ).compute()\n\n expected_df = pd.DataFrame({\"C\": [0, 1, 1, 1, 0]}, dtype=np.int32)\n assert_frame_equal(actual_df, expected_df)\n"
] |
[
[
"pandas.to_datetime",
"pandas.testing.assert_frame_equal",
"numpy.sqrt",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dpetersonVT23/RA_Duty_Scheduler
|
[
"6fec38bbb96e878a78b9247c6a74e73132da519a"
] |
[
"mplcal.py"
] |
[
"# https://github.com/meta4/mplcal\n# Copyright (c) 2020, Peter Wilson\n#\n# All rights reserved.\n\n# import statements\nimport calendar\nimport matplotlib.pyplot as plt\n\ncalendar.setfirstweekday(6) # Sunday is 1st day in US\nw_days = 'Sun Mon Tue Wed Thu Fri Sat'.split()\nm_names = '''January February March April May June July August September October November December'''.split()\n\n\nclass DayNotInMonthError(ValueError):\n pass\n\n\nclass MplCalendar(object):\n def __init__(self, year, month):\n self.year = year\n self.month = month\n self.cal = calendar.monthcalendar(year, month)\n # A month of events are stored as a list of lists of list.\n # Nesting, from outer to inner, Week, Day, Event_str\n # Save the events data in the same format\n self.events = [[[] for day in week] for week in self.cal]\n\n def _monthday_to_index(self, day):\n \"\"\"\n The 2-d index of the day in the list of lists.\n If the day is not in the month raise a DayNotInMonthError,\n which is a subclass of ValueError.\n \"\"\"\n for week_n, week in enumerate(self.cal):\n try:\n i = week.index(day)\n return week_n, i\n except ValueError:\n pass\n raise DayNotInMonthError(\"There aren't {} days in the month\".format(day))\n\n # method to add an event string for the specified day\n def add_event(self, day, event_str):\n week, w_day = self._monthday_to_index(day)\n self.events[week][w_day].append(event_str)\n\n # method to create the calendar figure\n def _render(self, **kwargs):\n plot_defaults = dict(sharex=True, sharey=True, figsize=(11, 8.5), dpi=80,)\n plot_defaults.update(kwargs)\n f, axs = plt.subplots(len(self.cal), 7, **plot_defaults)\n for week, ax_row in enumerate(axs):\n for week_day, ax in enumerate(ax_row):\n ax.set_xticks([])\n ax.set_yticks([])\n if self.cal[week][week_day] != 0:\n ax.text(.02, .98, str(self.cal[week][week_day]), verticalalignment='top', horizontalalignment='left')\n contents = \"\\n\".join(self.events[week][week_day])\n ax.text(.03, .85, contents, verticalalignment='top', horizontalalignment='left', fontsize=9)\n\n # use the titles of the first row as the weekdays\n for n, day in enumerate(w_days):\n axs[0][n].set_title(day)\n\n # place subplots in a close grid\n f.subplots_adjust(hspace=0)\n f.subplots_adjust(wspace=0)\n f.suptitle(m_names[self.month-1] + ' ' + str(self.year),\n fontsize=20, fontweight='bold')\n\n # method to display the calendar\n def show(self, **kwargs):\n self._render(**kwargs)\n plt.show()\n\n # method to save the calendar to the specified image file\n def save(self, filename, **kwargs):\n self._render(**kwargs)\n plt.savefig(filename)\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rom1mouret/ml-essentials
|
[
"27220fd65bffa350d75aec76592ad5d786108d41"
] |
[
"examples/linreg.py"
] |
[
"#!/usr/bin/env python3\n\nfrom contextlib import contextmanager\nimport pandas as pd\nimport numpy as np\nimport random\nimport torch\nimport time\nimport os\nimport argparse\nfrom scipy import sparse\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OneHotEncoder\n\n@contextmanager\ndef timeit(name: str) -> None:\n before = time.time()\n try:\n yield\n finally:\n duration = time.time() - before\n print(\"%s: %.3f sec.\" % (name, duration))\n\nparser = argparse.ArgumentParser(description='Linear Regression')\nparser.add_argument('csv_file', metavar='csv-file', type=str, nargs=1)\nparser.add_argument('target', metavar='target-column', type=str, nargs=1)\nparser.add_argument('exclude', metavar='excluded-columns', type=str, nargs='*')\nparser.add_argument('-testratio', metavar='ratio', type=float, default=0.5, nargs=None)\nparser.add_argument('-epochs', metavar='epochs', type=int, default=1, nargs=None)\nparser.add_argument('-batchsize', metavar='batch size', type=int, default=256, nargs=None)\nparser.add_argument('-lr', metavar='learning rate', type=float, default=0.001, nargs=None)\nparser.add_argument('-decay', metavar='weight decay', type=float, default=0.0, nargs=None)\nparser.add_argument('-momentum', metavar='gradient momentum', type=float, default=0.1, nargs=None)\nparser.add_argument('-sep', metavar='separator', type=str, default=\",\", nargs=None)\nargs = parser.parse_args()\n\ntarget_col = args.target[0]\n\nwith timeit(\"CSV parsing\"):\n excluded = set(args.exclude)\n df = pd.read_csv(args.csv_file[0], sep=args.sep[0], header=0, na_values=[\"\", \" \", \"NA\", \"-\"])\n numerical = [\n col for col, t in df.dtypes.iteritems()\n if t in (np.int64, np.float64) and t not in excluded\n ]\n categorical = [\n col for col, t in df.dtypes.iteritems()\n if t == np.object and t not in excluded\n ]\n numerical.remove(target_col)\n df[categorical] = df[categorical].astype(str) # required for one-hot\n\nwith timeit(\"set split\"):\n train_set, test_set = train_test_split(df, shuffle=True, test_size=args.testratio)\n\nwith timeit(\"training+running imputer\"):\n X_num = train_set[numerical].values # already makes a copy\n imputer = SimpleImputer(copy=False)\n X_num = imputer.fit_transform(X_num)\n\nwith timeit(\"training+running scaler\"):\n scaler = StandardScaler(copy=False)\n X_num = scaler.fit_transform(X_num)\n\n# with timeit(\"hash encoding\"):\n# X_cat = df[categorical]\n# hash = HashingEncoder(n_components=32).fit(X_cat)\n# X_cat = hash.transform(X_cat)\n\nif len(categorical) > 0:\n with timeit(\"one-hot encoding\"):\n X_cat = train_set[categorical].values\n #cat_imputer = SimpleImputer(copy=False, strategy='most_frequent')\n #X_cat = cat_imputer.fit_transform(X_cat)\n one_hot = OneHotEncoder(sparse=True, handle_unknown='ignore')\n X_cat = one_hot.fit_transform(X_cat)\n dim = X_cat.shape[1] + X_num.shape[1]\nelse:\n dim = X_num.shape[1]\n\nprint(\"dimensions:\", dim)\n\ny_true = train_set[args.target[0]].values.astype(np.float32)\ny_scale = y_true.std()\ny_true /= y_scale\nregressor = torch.nn.Linear(dim, 1)\ntorch.nn.init.kaiming_normal_(regressor.weight, nonlinearity='linear')\noptimizer = torch.optim.SGD(\n regressor.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.decay\n)\n\nwith timeit(\"training\"):\n indices = list(range(len(X_num)))\n n_sections = 1 + len(X_num) // args.batchsize\n for epoch in range(args.epochs):\n print(\"epoch\", epoch)\n random.shuffle(indices)\n for idx in np.array_split(indices, n_sections):\n y_batch = torch.Tensor(y_true[idx])\n num = torch.Tensor(X_num[idx, :])\n if len(categorical) > 0:\n cat = torch.Tensor(X_cat[idx, :].todense())\n batch = torch.cat([num, cat], dim=1)\n else:\n batch = num\n optimizer.zero_grad()\n y_pred = regressor(batch).squeeze(1)\n loss = (y_batch - y_pred).pow(2).sum()\n loss.backward()\n optimizer.step()\n\n regressor.eval()\n\nwith timeit(\"running imputer on testing data\"):\n X_num = test_set[numerical].values\n X_num = imputer.transform(X_num)\n\nwith timeit(\"running scaler on testing data\"):\n X_num = scaler.transform(X_num)\n\nif len(categorical) > 0:\n with timeit(\"running one-hot on testing data\"):\n X_cat = test_set[categorical].values\n X_cat = one_hot.transform(X_cat)\n\nwith timeit(\"predicting\"):\n batch_size = 4096\n y = []\n for i in range(0, len(X_num), batch_size):\n end = min(len(X_num), i+batch_size)\n num = torch.Tensor(X_num[i:end, :])\n if len(categorical) > 0:\n cat = torch.Tensor(X_cat[i:end, :].todense())\n batch = torch.cat([num, cat], dim=1)\n else:\n batch = num\n y += regressor(batch).squeeze(1).tolist()\n\n y = np.array(y) * y_scale\n y_true = test_set[args.target[0]].values.astype(np.float32)\n mae = np.abs(y_true - y).mean()\n print(\"MAE\", mae)\n ref = np.abs(y_true - y_true.mean()).mean()\n print(\"Baseline\", ref)\n\n\noutdir = \"outdir\"\n\n# with timeit(\"writing\"):\n# batch_size = 1024\n# for j, i in enumerate(range(0, len(X_num), batch_size)):\n# d = X_cat[i:i+batch_size, :].todense()\n# X = np.concatenate([X_num[i:i+batch_size], d], axis=1)\n# print(\"X dim\", X.shape[1])\n# pd.DataFrame(X).to_csv(\"%s/output%i.csv\" % (outdir, j), index=False)\n\nwith timeit(\"reading again\"):\n n = 0\n for filename in os.listdir(outdir):\n df = pd.read_csv(os.path.join(outdir, filename))\n n += len(df)\n print(\"number of rows:\", n)\n"
] |
[
[
"numpy.array",
"pandas.read_csv",
"numpy.abs",
"torch.Tensor",
"torch.cat",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.impute.SimpleImputer",
"sklearn.model_selection.train_test_split",
"torch.nn.Linear",
"sklearn.preprocessing.StandardScaler",
"numpy.array_split",
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Dimkarpenko/Minecraft
|
[
"c51e1faa20a4ab08b516bff1d3fcd62f20d3474d"
] |
[
"minecraft.py"
] |
[
"from ursina import *\nfrom ursina.prefabs.first_person_controller import FirstPersonController\nfrom ursina.prefabs.panel import Panel\nfrom ursina.shaders import *\nfrom perlin_noise import PerlinNoise\nfrom numpy import floor,abs\nfrom random import randint,randrange\nimport psutil\nimport os\nimport json\n\nversion = '0.0.4a'\n\napp = Ursina()\n\nglobal block_pick\nglobal spawnpoint\n\napplication.development_mode = False\n\nwith open('launch.json') as json_file:\n\tdata = json.load(json_file)\n\trender_fog = data[\"scene\"][0][\"render_fog\"]\n\tterrainWidth = data[\"scene\"][0][\"terrainwidth\"]\n\tenable_shaders = data[\"scene\"][0][\"enable_shaders\"]\n\twindow.borderless = data[\"window\"][0][\"borderless\"]\n\twindow.fullscreen = data[\"window\"][0][\"fullscreen\"]\n\twindow.render_mode = data[\"window\"][0][\"render_mode\"]\n\tnickname = data[\"scene\"][0][\"nickname\"]\n\ngrass_texture = load_texture('assets/textures/grass_block.png')\nstone_texture = load_texture('assets/textures/stone_block.png')\nbrick_texture = load_texture('assets/textures/brick_block.png')\ndirt_texture = load_texture('assets/textures/dirt_block.png')\nplank_texture = load_texture('assets/textures/planks_block.png')\nsand_texture = load_texture('assets/textures/sand_block.png')\ngravel_texture = load_texture('assets/textures/gravel_block.png')\noak_texture = load_texture('assets/textures/oak_block.png')\noak_leaf_texture = load_texture('assets/textures/oak_leaf_block.png')\ncobblestone_texture = load_texture('assets/textures/cobblestone_block.png')\ngranite_texture = load_texture('assets/textures/granite_block.png')\nbedrock_texture = load_texture('assets/textures/bedrock_block.png')\nsky_texture = load_texture('assets/skybox.png')\narm_texture = load_texture('assets/hand.png')\ninventory_texture = load_texture('assets/inventory_point.png')\ncursor_texture = load_texture('assets/crosshair.png')\npunch_sound = Audio('assets/audio/break1',loop = False, autoplay = False)\nblock_pick = 0\nspawnpoint = (0,0,0)\n\ngrass_texture_2 = load_texture('assets/textures/grass_texture.png')\nstone_texture_2 = load_texture('assets/textures/stone_texture.png')\nbrick_texture_2 = load_texture('assets/textures/brick_texture.png')\ndirt_texture_2 = load_texture('assets/textures/dirt_texture.png')\nplank_texture_2 = load_texture('assets/textures/planks_texture.png')\nsand_texture_2 = load_texture('assets/textures/sand_texture.png')\ngravel_texture_2 = load_texture('assets/textures/gravel_texture.png')\noak_texture_2 = load_texture('assets/textures/oak_texture.png')\noak_leaf_texture_2 = load_texture('assets/textures/oak_leaf_texture.png')\ncobblestone_texture_2 = load_texture('assets/textures/cobblestone_texture.png')\ngranite_texture_2 = load_texture('assets/textures/granite_texture.png')\n\nwindow.title = f\"Minecraft {version}\"\nwindow.icon = \"assets/icon.ico\"\nbutton_font = 'assets/fonts/minecraft.ttf'\nwindow.exit_button.visible = False\nwindow.fps_counter.visible = False\nwindow.center_on_screen()\n\nif render_fog == True:\n\tscene.fog_color = color.white\n\tscene.fog_density = 0.02\n\nif enable_shaders == True:\n\tEntity.default_shader = lit_with_shadows_shader\n\tsun = DirectionalLight()\n\tsun.look_at(Vec3(1,-1,-1))\n\no = Panel(scale=5)\no.visible = False\n\nversion = Text(position=Vec3(-.87,0.48,0),font = button_font,text=version)\ncoordinates = Text(position=Vec3(-.87,0.44,0),font = button_font)\ncpu_panel = Text(position=Vec3(-.87,0.40,0),font = button_font,ignore_paused=True)\nblock_name_l = Text(position=Vec3(-0.05,-.4,0),font = button_font)\nconsole_output = Text(position=Vec3(-.87,-.4,0),font = button_font)\n\nterrain = Entity(model=None,collider=None)\nnoise = PerlinNoise(octaves=2,seed=int(randrange(100,999)))\n\namp = 6\nfreq = 24\nfps=60\ni=0\ncpu = 0\nram = 0\nmax_y = 0\nblock_names = {}\nblock_ids = {}\n\ndef check_fall():\n\tif player.y < -110.0:\n\t\tPlayerCommands.kill(message=f'Player {nickname} fell out of the world')\n\ndef update():\n\tglobal block_pick,fps,i,max_y,cpu,ram,block_names,block_name_l,block_ids\n\tcheck_fall()\n\n\tif i > 60:\n\t\tfps = str(int(1//time.dt))\n\t\tcpu = psutil.cpu_percent()\n\t\tram = psutil.virtual_memory().percent\n\t\ti = 0\n\ti += 1\n\n\tpid = os.getpid()\n\tpython_process = psutil.Process(pid)\n\tmemoryUse = python_process.memory_info()[0]/2.**30\n\n\tcoordinates.text = f'Position: {round(player.x)},{round(player.y)},{round(player.z)} / {fps} fps'\n\tcpu_panel.text = f'CPU: {cpu}% / RAM: {ram}% / Memory use: {round(memoryUse,2)} GB'\n\n\tif held_keys['left mouse'] or held_keys['right mouse']:\n\t\thand.active()\n\telse:\n\t\thand.passive()\n\n\tif held_keys['escape']: application.quit()\n\tif held_keys['e']:\n\t\tapplication.pause()\n\t\to.visible = True\n\t\tinventory.visible = True\n\t\tmouse.locked = False\n\t\tmouse.visible = True\n\tif held_keys['p']:PlayerCommands.setspawn(player.x,player.y,player.z)\n\tif held_keys['r']:PlayerCommands.move_random()\n\tif held_keys['k']:PlayerCommands.kill(message=f'Player {nickname} killed')\n\nclass PlayerCommands():\n\tglobal spawnpoint\n\tdef __init__(self,**kwargs):\n\t\tpass\n\t\n\tdef kill(message):\n\t\tplayer.position=spawnpoint\n\t\tconsole_output.text = message\n\n\tdef setspawn(x,y,z):\n\t\tglobal spawnpoint\n\t\tspawnpoint = (x,y,z)\n\t\tconsole_output.text = f'Spawnpoint set {round(x)},{round(y)},{round(z)}'\n\n\tdef move_random():\n\t\trand_x = randint(0,terrainWidth-2)\n\t\trand_z = randint(0,terrainWidth-2)\n\t\tplayer.position = (rand_x,max_y,rand_z)\n\t\tconsole_output.text = f'Player {nickname} moved to {round(rand_x)},{round(max_y)},{round(rand_z)}'\n\ncurrent_block = Button(position=Vec2(.82,0.42),scale=.1,model='quad',color=color.white,disabled=True,texture=grass_texture_2,visible=False)\n\nclass Inventory(Entity):\n global block_pick,block_names\n def __init__(self,**kwargs):\n super().__init__(\n parent = camera.ui,\n model = 'quad',\n scale = (.5, .7),\n origin = (-.5, .5),\n position = (-.25,.35),\n texture = inventory_texture,\n texture_scale = (5,7),\n color = color.rgb(198,198,198, a=255)\n )\n\n self.item_parent = Entity(parent=self, scale=(1/5,1/7))\n\n def find_free_spot(self): \n taken_spots = [(int(e.x), int(e.y)) for e in self.item_parent.children] \n for y in range(7): \n for x in range(5): \n if not (x,-y) in taken_spots: \n return (x,-y) \n\n def append(self,item,texture,id):\n inv = Button(\n\t\t parent = inventory.item_parent,\n\t\t model = 'quad',\n\t\t origin = (-.66,.66),\n\t\t\tscale = .75,\n\t\t color = color.white,\n\t\t\ttexture = texture,\n\t\t position = self.find_free_spot(), \n\t\t z = -.1,\n\t\t )\n block_names[id] = item\n block_ids[id] = texture\n\n def choose_block():\n global block_pick\n mouse.locked = True\n mouse.visible = False\n inventory.visible = False\n o.visible = False\n application.resume() \n block_pick = id\n if block_pick != 0:\n block_name_l.visible = True\n current_block.visible = True\n block_name_l.text = str(block_names[block_pick])\n current_block.texture = block_ids[block_pick]\n if block_pick == 0:\n block_name_l.visible = False\n current_block.visible = False\n\t\t\n inv.on_click = choose_block\n\nclass Voxel(Button):\n\tdef __init__(self, position = (0,0,0), texture = grass_texture):\n\t\tsuper().__init__(\n\t\t\tparent = scene,\n\t\t\tposition = position,\n\t\t\tmodel = 'assets/block',\n\t\t\torigin_y = 0.5,\n\t\t\ttexture = texture,\n\t\t\tcolor = color.color(0,0,random.uniform(0.9,1)),\n\t\t\tscale = 0.5,\n\t\t\t)\n\n\tdef input(self,key):\n\t\tif self.hovered:\n\t\t\tif key == 'right mouse down' and block_pick != 0:\n\t\t\t\tpunch_sound.play()\n\t\t\t\tif block_pick == 1: voxel = Voxel(position = self.position + mouse.normal, texture = grass_texture)\n\t\t\t\tif block_pick == 2: voxel = Voxel(position = self.position + mouse.normal, texture = stone_texture)\n\t\t\t\tif block_pick == 3: voxel = Voxel(position = self.position + mouse.normal, texture = brick_texture)\n\t\t\t\tif block_pick == 4: voxel = Voxel(position = self.position + mouse.normal, texture = dirt_texture)\n\t\t\t\tif block_pick == 5: voxel = Voxel(position = self.position + mouse.normal, texture = plank_texture)\n\t\t\t\tif block_pick == 6: voxel = Voxel(position = self.position + mouse.normal, texture = sand_texture)\n\t\t\t\tif block_pick == 7: voxel = Voxel(position = self.position + mouse.normal, texture = gravel_texture)\n\t\t\t\tif block_pick == 8: voxel = Voxel(position = self.position + mouse.normal, texture = oak_texture)\n\t\t\t\tif block_pick == 9: voxel = Voxel(position = self.position + mouse.normal, texture = oak_leaf_texture)\n\t\t\t\tif block_pick == 10: voxel = Voxel(position = self.position + mouse.normal, texture = cobblestone_texture)\n\t\t\t\tif block_pick == 11: voxel = Voxel(position = self.position + mouse.normal, texture = granite_texture)\n\n\t\t\tif key == 'left mouse down':\n\t\t\t\tif self.texture != bedrock_texture:\n\t\t\t\t\tpunch_sound.play()\n\t\t\t\t\tdestroy(self)\n\nclass Hand(Entity):\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\tparent = camera.ui,\n\t\t\tmodel = 'assets/arm',\n\t\t\ttexture = arm_texture,\n\t\t\tscale = 0.2,\n\t\t\trotation = Vec3(150,-10,0),\n\t\t\tposition = Vec2(0.4,-0.6))\n\n\tdef active(self):\n\t\tself.position = Vec2(0.3,-0.5)\n\n\tdef passive(self):\n\t\tself.position = Vec2(0.4,-0.6)\n\n#render terrain\nfor i in range(terrainWidth*terrainWidth):\n voxel = Voxel(texture=grass_texture)\n voxel.x = floor(i/terrainWidth)\n voxel.z = floor(i%terrainWidth)\n voxel.y = floor((noise([voxel.x/freq,voxel.z/freq]))*amp)\n if voxel.y > max_y:max_y = voxel.y\n'''\nfor b in range(1):\n\tfor i in range(terrainWidth*terrainWidth):\n\t\tvoxel = Voxel(texture=dirt_texture)\n\t\tvoxel.x = floor(i/terrainWidth)\n\t\tvoxel.z = floor(i%terrainWidth)\n\t\tvoxel.y = floor(((noise([voxel.x/freq,voxel.z/freq]))*amp)-(b+1))\n'''\nfor b in range(1):\n\tfor i in range(terrainWidth*terrainWidth):\n\t\tvoxel = Voxel(texture=bedrock_texture)\n\t\tvoxel.x = floor(i/terrainWidth)\n\t\tvoxel.z = floor(i%terrainWidth)\n\t\tvoxel.y = floor(((noise([voxel.x/freq,voxel.z/freq]))*amp)-(b+1))\n\nterrain.combine()\nterrain.collider = 'mesh'\nterrain.texture = grass_texture\n\nplayer = FirstPersonController()\nspawnpoint = (terrainWidth/2,8,terrainWidth/2)\nplayer.position = spawnpoint\nplayer.cursor.disable()\nplayer.cursor = Entity(parent=camera.ui, model='quad', color=color.white, scale=.03,rotation_z=90,texture=cursor_texture,default_shader=None) \nplayer.gravity = 0.6\n#camera.fov = 150\n\nsky = Sky(texture=sky_texture)\nhand = Hand()\ninventory = Inventory(default_shader=None)\ninventory.visible = False\n\nprint(f'=====\\nterrain width: {terrainWidth}\\nseed: {noise.seed}\\nplayer spawn: {spawnpoint}\\nmax y: {max_y}\\nnickname: {nickname}\\n=====')\n \ninventory.append('Grass',grass_texture_2,1)\ninventory.append('Stone',stone_texture_2,2)\ninventory.append('Brick',brick_texture_2,3) \ninventory.append('Dirt',dirt_texture_2,4) \ninventory.append('Planks',plank_texture_2,5) \ninventory.append('Sand',sand_texture_2,6) \ninventory.append('Gravel',gravel_texture_2,7) \ninventory.append('Oak',oak_texture_2,8) \ninventory.append('Leaf',oak_leaf_texture_2,9) \ninventory.append('Cobblestone',cobblestone_texture_2,10) \ninventory.append('Granite',granite_texture_2,11) \n\napp.run()\n"
] |
[
[
"numpy.floor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pymc-labs/plaster
|
[
"8535afd78d357c305aa522b30019b1039fa7caab"
] |
[
"plaster/run/nn_v2/zests/zest_nn_v2_integration.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom munch import Munch\nfrom plaster.run.nn_v2.nn_v2_params import NNV2Params\nfrom plaster.run.nn_v2.nn_v2_worker import nn_v2\nfrom plaster.run.prep import prep_fixtures\nfrom plaster.run.prep.prep_params import PrepParams\nfrom plaster.run.prep.prep_worker import prep\nfrom plaster.run.sigproc_v2.sigproc_v2_fixtures import simple_sigproc_v2_result_fixture\nfrom plaster.run.sim_v2 import sim_v2_worker\nfrom plaster.run.sim_v2.sim_v2_params import SimV2Params\nfrom plaster.run.sim_v2.sim_v2_result import SimV2Result\nfrom zest import zest\n\n\[email protected](\"TODO nn_v2 broken need to repair\")\[email protected](\"integration\")\ndef zest_nn_v2_integration():\n prep_result = prep_fixtures.result_random_fixture(2)\n\n def _run(labels=\"DE\", sigproc_result=None, _prep_result=prep_result):\n sim_v2_result = SimV2Result.from_prep_fixture(_prep_result, labels=labels)\n\n # Flip just to convince myself that it is working\n # (ie they aren't accidentally in the right order)\n sim_v2_result.test_radmat = np.flip(sim_v2_result.test_radmat, axis=0).copy()\n sim_v2_result.test_true_pep_iz = np.flip(\n sim_v2_result.test_true_pep_iz, axis=0\n ).copy()\n\n sim_v2_result.test_true_dye_iz = np.flip(\n sim_v2_result.test_true_dye_iz, axis=0\n ).copy()\n\n nn_v2_params = NNV2Params(\n source=\"zest_nn_v2_worker\",\n n_neighbors=10,\n priors=sim_v2_result.params.priors,\n )\n\n nn_v2_result = nn_v2(\n nn_v2_params,\n _prep_result,\n sim_v2_result,\n sigproc_result=sigproc_result,\n rad_filter_result=None,\n )\n\n return nn_v2_result, sim_v2_result\n\n def it_runs_single_channel():\n for tries in range(10):\n nn_v2_result, sim_v2_result = _run(labels=\"DE\")\n trues = sim_v2_result.test_true_pep_iz\n n_right = (nn_v2_result.calls().pep_i == trues).sum()\n n_total = trues.shape[0]\n if n_right >= int(0.3 * n_total):\n break\n else:\n raise AssertionError(\"never exceeded 40%\")\n\n def it_runs_multi_channel():\n prep_result = prep_fixtures.result_random_fixture(10)\n nn_v2_result, sim_v2_result = _run(labels=\"DE,ABC\", _prep_result=prep_result)\n trues = sim_v2_result.test_true_pep_iz\n n_right = (nn_v2_result.calls().pep_i == trues).sum()\n n_total = trues.shape[0]\n assert n_right >= int(0.3 * n_total)\n\n @zest.skip(reason=\"WIP\")\n def run_without_sigproc():\n nn_v2_result, sim_v2_result = _run(sigproc_result=None)\n\n a = (\n sim_v2_result.test_true_dye_iz == nn_v2_result._test_calls.dyt_i.values\n ).sum()\n\n def it_returns_calls():\n raise NotImplementedError\n\n def it_returns_all():\n raise NotImplementedError\n\n def it_filters_nul_calls():\n raise NotImplementedError\n\n def it_filters_k_range():\n raise NotImplementedError\n\n def it_filters_k_score():\n raise NotImplementedError\n\n zest()\n\n @zest.skip(reason=\"WIP\")\n def it_runs_with_sigproc():\n raise NotImplementedError\n # TODO Need to deal with sigproc v2 calibration fixtures\n\n sigproc_result = simple_sigproc_v2_result_fixture(prep_result)\n nn_v2_result, sim_v2_result = _run(labels=\"DE\", sigproc_result=sigproc_result)\n\n zest()\n\n\[email protected](\"TODO nn_v2 broken need to repair\")\ndef zest_v2_stress_like_e2e():\n # This was dying with a \"double free or corruption (!prev)\"\n # This was a bug in n_dyetracks counting now fixed, but leaving this test in for regression.\n\n prep_params = PrepParams(\n decoy_mode=None,\n n_peps_limit=None,\n n_ptms_limit=5,\n protease=None,\n proteins=[\n Munch(\n abundance=None,\n name=\"pep25\",\n ptm_locs=\"\",\n is_poi=0,\n sequence=\"GCAGCAGAG \",\n )\n ],\n proteins_of_interest=[],\n )\n pro_spec_df = pd.DataFrame(prep_params.proteins)\n prep_result = prep(prep_params, pro_spec_df)\n\n sim_v2_param_block = Munch(\n allow_train_test_to_be_identical=False,\n enable_ptm_labels=False,\n dyes=[Munch(dye_name=\"dye_0\", channel_name=\"ch0\")],\n labels=[Munch(aa=\"C\", dye_name=\"dye_0\", label_name=\"label_0\", ptm_only=False,)],\n priors_desc={\n \"p_non_fluorescent\": Munch(\n class_name=\"MLEPrior\", params=Munch(value=0.07),\n ),\n \"p_bleach\": Munch(class_name=\"MLEPrior\", params=Munch(value=0.05),),\n \"gain_mu\": Munch(class_name=\"MLEPrior\", params=Munch(value=7500.0),),\n \"gain_sigma\": Munch(class_name=\"MLEPrior\", params=Munch(value=0.16),),\n \"bg_mu\": Munch(class_name=\"MLEPrior\", params=Munch(value=300.0),),\n \"bg_sigma\": Munch(class_name=\"MLEPrior\", params=Munch(value=700.0),),\n \"p_detach\": Munch(class_name=\"MLEPrior\", params=Munch(value=0.05),),\n \"p_edman_failure\": Munch(class_name=\"MLEPrior\", params=Munch(value=0.06),),\n \"row_k_sigma\": Munch(class_name=\"MLEPrior\", params=Munch(value=0.15),),\n },\n is_survey=False,\n n_edmans=8,\n n_mocks=1,\n n_pres=0,\n n_samples_test=1000,\n n_samples_train=5000,\n random_seed=None,\n test_includes_dyemat=False,\n train_includes_radmat=False,\n use_lognormal_model=True,\n )\n\n sim_v2_params = SimV2Params(include_dfs=True, **sim_v2_param_block)\n\n sim_v2_result = sim_v2_worker.sim_v2(sim_v2_params, prep_result)\n sim_v2_result._generate_flu_info(prep_result)\n\n nn_v2_params = NNV2Params(\n source=\"zest_v2_stress_like_e2e\", priors_desc=sim_v2_param_block.priors_desc\n )\n nn_result = nn_v2(nn_v2_params, prep_result, sim_v2_result, None)\n df = nn_result.calls()\n assert np.all(df.pep_i == 1)\n"
] |
[
[
"numpy.all",
"numpy.flip",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
saksham219/scvelo
|
[
"41fb2a90ae6a71577cf2c55b80e1ade4407891b7",
"41fb2a90ae6a71577cf2c55b80e1ade4407891b7"
] |
[
"scvelo/preprocessing/neighbors.py",
"scvelo/plotting/velocity.py"
] |
[
"from .. import settings\nfrom .. import logging as logg\n\nfrom scanpy.api import Neighbors\nfrom scanpy.api.pp import pca\nfrom scipy.sparse import issparse\nimport numpy as np\n\n\ndef neighbors(adata, n_neighbors=30, n_pcs=30, use_rep=None, knn=True, random_state=0, method='umap',\n metric='euclidean', metric_kwds={}, copy=False):\n \"\"\"\n Compute a neighborhood graph of observations [McInnes18]_.\n The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_,\n which also provides a method for estimating connectivities of data points -\n the connectivity of the manifold (`method=='umap'`). If `method=='diffmap'`,\n connectivities are computed according to [Coifman05]_, in the adaption of\n [Haghverdi16]_.\n Parameters\n ----------\n adata\n Annotated data matrix.\n n_neighbors\n The size of local neighborhood (in terms of number of neighboring data\n points) used for manifold approximation. Larger values result in more\n global views of the manifold, while smaller values result in more local\n data being preserved. In general values should be in the range 2 to 100.\n If `knn` is `True`, number of nearest neighbors to be searched. If `knn`\n is `False`, a Gaussian kernel width is set to the distance of the\n `n_neighbors` neighbor.\n n_pcs : `int` or `None` (default: None)\n Use this many PCs. If n_pcs==0 use .X if use_rep is None.\n\n use_rep : `None`, `'X'` or any key for `.obsm` (default: None)\n Use the indicated representation. If `None`, the representation is chosen automatically:\n for .n_vars < 50, .X is used, otherwise ‘X_pca’ is used.\n knn\n If `True`, use a hard threshold to restrict the number of neighbors to\n `n_neighbors`, that is, consider a knn graph. Otherwise, use a Gaussian\n Kernel to assign low weights to neighbors more distant than the\n `n_neighbors` nearest neighbor.\n random_state\n A numpy random seed.\n method : {{'umap', 'gauss', `sklearn`, `None`}} (default: `'umap'`)\n Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_\n with adaptive width [Haghverdi16]_) for computing connectivities.\n metric\n A known metric’s name or a callable that returns a distance.\n metric_kwds\n Options for the metric.\n copy\n Return a copy instead of writing to adata.\n Returns\n -------\n Depending on `copy`, updates or returns `adata` with the following:\n connectivities : sparse matrix (`.uns['neighbors']`, dtype `float32`)\n Weighted adjacency matrix of the neighborhood graph of data\n points. Weights should be interpreted as connectivities.\n distances : sparse matrix (`.uns['neighbors']`, dtype `float32`)\n Instead of decaying weights, this stores distances for each pair of\n neighbors.\n \"\"\"\n logg.info('computing neighbors', r=True)\n adata = adata.copy() if copy else adata\n if adata.isview: adata._init_as_actual(adata.copy())\n\n if (use_rep is None or use_rep is 'X_pca') \\\n and ('X_pca' not in adata.obsm.keys() or n_pcs > adata.obsm['X_pca'].shape[1]):\n pca(adata, n_comps=n_pcs, svd_solver='arpack')\n\n adata.uns['neighbors'] = {}\n adata.uns['neighbors']['params'] = {'n_neighbors': n_neighbors, 'method': method}\n\n if method is 'sklearn':\n from sklearn.neighbors import NearestNeighbors\n neighbors = NearestNeighbors(n_neighbors=n_neighbors)\n neighbors.fit(adata.obsm['X_pca'] if use_rep is None else adata.obsm[use_rep])\n adata.uns['neighbors']['distances'] = neighbors.kneighbors_graph(mode='distance')\n adata.uns['neighbors']['connectivities'] = neighbors.kneighbors_graph(mode='connectivity')\n\n else:\n neighbors = Neighbors(adata)\n neighbors.compute_neighbors(n_neighbors=n_neighbors, knn=knn, n_pcs=n_pcs, use_rep=use_rep, method=method,\n metric=metric, metric_kwds=metric_kwds, random_state=random_state, write_knn_indices=True)\n adata.uns['neighbors']['distances'] = neighbors.distances\n adata.uns['neighbors']['connectivities'] = neighbors.connectivities\n adata.uns['neighbors']['indices'] = neighbors.knn_indices\n\n logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\\n')\n logg.hint(\n 'added to `.uns[\\'neighbors\\']`\\n'\n ' \\'distances\\', weighted adjacency matrix\\n'\n ' \\'connectivities\\', weighted adjacency matrix')\n return adata if copy else None\n\n\ndef select_distances(dist, n_neighbors=None):\n D = dist.copy()\n n_counts = (D > 0).sum(1).A1 if issparse(D) else (D > 0).sum(1)\n n_neighbors = n_counts.min() if n_neighbors is None else min(n_counts.min(), n_neighbors)\n rows = np.where(n_counts > n_neighbors)[0]\n cumsum_neighs = np.insert(n_counts.cumsum(), 0, 0)\n dat = D.data\n\n for row in rows:\n n0, n1 = cumsum_neighs[row], cumsum_neighs[row + 1]\n rm_idx = n0 + dat[n0:n1].argsort()[n_neighbors:]\n dat[rm_idx] = 0\n D.eliminate_zeros()\n return D\n\n\ndef select_connectivities(connectivities, n_neighbors=None):\n C = connectivities.copy()\n n_counts = (C > 0).sum(1).A1 if issparse(C) else (C > 0).sum(1)\n n_neighbors = n_counts.min() if n_neighbors is None else min(n_counts.min(), n_neighbors)\n rows = np.where(n_counts > n_neighbors)[0]\n cumsum_neighs = np.insert(n_counts.cumsum(), 0, 0)\n dat = C.data\n\n for row in rows:\n n0, n1 = cumsum_neighs[row], cumsum_neighs[row + 1]\n rm_idx = n0 + dat[n0:n1].argsort()[::-1][n_neighbors:]\n dat[rm_idx] = 0\n C.eliminate_zeros()\n return C\n\n\ndef neighbors_to_be_recomputed(adata, n_neighbors=None):\n # check if neighbors graph is disrupted\n n_neighs = (adata.uns['neighbors']['distances'] > 0).sum(1)\n result = n_neighs.max() - n_neighs.min() >= 2\n # check if neighbors graph has sufficient number of neighbors\n if n_neighbors is not None:\n result = result or n_neighbors > adata.uns['neighbors']['params']['n_neighbors']\n return result\n\n\ndef get_connectivities(adata, mode='connectivities', n_neighbors=None, recurse_neighbors=False):\n C = adata.uns['neighbors'][mode]\n if n_neighbors is not None and n_neighbors < adata.uns['neighbors']['params']['n_neighbors']:\n C = select_connectivities(C, n_neighbors) if mode == 'connectivities' else select_distances(C, n_neighbors)\n connectivities = C > 0\n connectivities.setdiag(1)\n if recurse_neighbors:\n connectivities += connectivities.dot(connectivities * .5)\n connectivities.data = np.clip(connectivities.data, 0, 1)\n connectivities = connectivities.multiply(1. / connectivities.sum(1))\n return connectivities.tocsr().astype(np.float32)",
"from ..preprocessing.moments import second_order_moments\nfrom ..tools.rank_velocity_genes import rank_velocity_genes\nfrom .scatter import scatter\nfrom .utils import savefig_or_show, default_basis, default_size\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import rcParams\nfrom matplotlib.ticker import MaxNLocator\nimport matplotlib.pyplot as pl\nfrom scipy.sparse import issparse\n\n\ndef velocity(adata, var_names=None, basis=None, groupby=None, groups=None, mode=None, fits='all', layers='all',\n color=None, color_map='RdBu_r', colorbar=False, perc=[2,98], use_raw=False, size=None, alpha=.5,\n fontsize=None, figsize=None, dpi=None, show=True, save=None, ax=None, ncols=None, **kwargs):\n \"\"\"Phase and velocity plot for set of genes.\n\n The phase plot shows spliced against unspliced expressions with steady-state fit.\n Further the embedding is shown colored by velocity and expression.\n\n Arguments\n ---------\n adata: :class:`~anndata.AnnData`\n Annotated data matrix.\n var_names: `str` or list of `str` (default: `None`)\n Which variables to show.\n basis: `str` (default: `'umap'`)\n Key for embedding coordinates.\n mode: `'stochastic'` or `None` (default: `None`)\n Whether to show show covariability phase portrait.\n fits: `str` or list of `str` (default: `'all'`)\n Which steady-state estimates to show.\n layers: `str` or list of `str` (default: `'all'`)\n Which layers to show.\n color: `str`, list of `str` or `None` (default: `None`)\n Key for annotations of observations/cells or variables/genes\n color_map: `str` (default: `matplotlib.rcParams['image.cmap']`)\n String denoting matplotlib color map.\n perc: tuple, e.g. [2,98] (default: `None`)\n Specify percentile for continuous coloring.\n size: `float` (default: 5)\n Point size.\n alpha: `float` (default: 1)\n Set blending - 0 transparent to 1 opaque.\n fontsize: `float` (default: `None`)\n Label font size.\n figsize: tuple (default: `(7,5)`)\n Figure size.\n dpi: `int` (default: 80)\n Figure dpi.\n show: `bool`, optional (default: `None`)\n Show the plot, do not return axis.\n save: `bool` or `str`, optional (default: `None`)\n If `True` or a `str`, save the figure. A string is appended to the default filename.\n Infer the filetype if ending on {'.pdf', '.png', '.svg'}.\n ax: `matplotlib.Axes`, optional (default: `None`)\n A matplotlib axes object. Only works if plotting a single component.\n\n \"\"\"\n basis = default_basis(adata) if basis is None else basis\n\n if isinstance(groupby, str) and groupby in adata.obs.keys():\n if 'rank_velocity_genes' not in adata.uns.keys() or adata.uns['rank_velocity_genes']['params']['groupby'] != groupby:\n rank_velocity_genes(adata, vkey='velocity', n_genes=10, groupby=groupby)\n names = np.array(adata.uns['rank_velocity_genes']['names'].tolist())\n if groups is None:\n var_names = names[:, 0]\n else:\n groups = [groups] if isinstance(groups, str) else groups\n idx = np.array([any([g in group for g in groups]) for group in adata.obs[groupby].cat.categories])\n var_names = np.hstack(names[idx, :int(10 / idx.sum())])\n elif var_names is not None:\n var_names = [var_names] if isinstance(var_names, str) else [var for var in var_names if var in adata.var_names]\n else:\n raise ValueError('No var_names or groups specified.')\n var_names = pd.unique(var_names)\n\n (skey, ukey) = ('spliced', 'unspliced') if use_raw else ('Ms', 'Mu')\n layers = ['velocity', skey, 'variance_velocity'] if layers == 'all' else layers\n layers = [layer for layer in layers if layer in adata.layers.keys()]\n\n fits = adata.layers.keys() if fits == 'all' else fits\n fits = [fit for fit in fits if all(['velocity' in fit, fit + '_gamma' in adata.var.keys()])]\n stochastic_fits = [fit for fit in fits if 'variance_' + fit in adata.layers.keys()]\n\n nplts = (1 + len(layers) + (mode == 'stochastic') * 2)\n ncols = 1 if ncols is None else ncols\n nrows = int(np.ceil(len(var_names) / ncols))\n ncols = int(ncols * nplts)\n figsize = rcParams['figure.figsize'] if figsize is None else figsize\n ax = pl.figure(figsize=(figsize[0] * ncols / 2, figsize[1] * nrows / 2), dpi=dpi) if ax is None else ax\n gs = pl.GridSpec(nrows, ncols, wspace=0.3, hspace=0.5)\n\n size = default_size(adata) / 2 if size is None else size # since fontsize is halved in width and height\n fontsize = rcParams['font.size'] if fontsize is None else fontsize\n for v, var in enumerate(var_names):\n _adata = adata[:, var]\n s, u = _adata.layers[skey], _adata.layers[ukey]\n if issparse(s): s, u = s.A, u.A\n\n # spliced/unspliced phase portrait with steady-state estimate\n ax = pl.subplot(gs[v * nplts])\n scatter(adata, basis=var, color=color, colorbar=colorbar, frameon=True, title=var, size=size, use_raw=use_raw,\n alpha=alpha, fontsize=fontsize, xlabel='spliced', ylabel='unspliced', show=False, ax=ax, save=False,\n legend_loc=None if v < len(var_names)-1 else 'lower right', **kwargs)\n\n # velocity and expression plots\n for l, layer in enumerate(layers):\n ax = pl.subplot(gs[v * nplts + l + 1])\n title = 'expression' if layer == skey else layer\n scatter(adata, basis=basis, color=var, layer=layer, color_map=color_map, colorbar=colorbar, title=title,\n perc=perc, use_raw=use_raw, fontsize=fontsize, size=size, alpha=alpha, frameon=False, show=False, ax=ax, save=False, **kwargs)\n\n if mode == 'stochastic':\n ss, us = second_order_moments(_adata)\n ss, us = ss.flatten(), us.flatten()\n fit = stochastic_fits[0]\n\n ax = pl.subplot(gs[v * nplts + len(layers) + 1])\n offset = _adata.var[fit + '_offset'] if fit + '_offset' in adata.var.keys() else 0\n beta = _adata.var[fit + '_beta'] if fit + '_beta' in adata.var.keys() else 1\n x = 2 * (ss - s**2) - s\n y = 2 * (us - u * s) + u + 2 * s * offset / beta\n\n scatter(adata, x=x, y=y, color=color, colorbar=colorbar, title=var, fontsize=40/ncols, size=size, perc=perc,\n xlabel=r'2 $\\Sigma_s - \\langle s \\rangle$', ylabel=r'2 $\\Sigma_{us} + \\langle u \\rangle$',\n use_raw=use_raw, frameon=True, ax=ax, save=False, show=False, **kwargs)\n\n xnew = np.linspace(x.min(), x.max() * 1.02)\n for fit in stochastic_fits:\n gamma = _adata.var[fit + '_gamma'].values if fit + '_gamma' in adata.var.keys() else 1\n beta = _adata.var[fit + '_beta'].values if fit + '_beta' in adata.var.keys() else 1\n offset2 = _adata.var[fit + '_offset2'].values if fit + '_offset2' in adata.var.keys() else 0\n\n pl.plot(xnew, gamma / beta * xnew + offset2 / beta, c='k', linestyle='--')\n if v == len(var_names) - 1: pl.legend(fits, loc='lower right', prop={'size': 34/ncols})\n\n savefig_or_show('', dpi=dpi, save=save, show=show)\n if not show: return ax\n"
] |
[
[
"sklearn.neighbors.NearestNeighbors",
"numpy.where",
"scipy.sparse.issparse",
"numpy.clip"
],
[
"matplotlib.pyplot.legend",
"scipy.sparse.issparse",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"pandas.unique",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
2005606/arcface-tf2-new
|
[
"6a34a66fb6ef8a873beb893f970a126e7bd4d526"
] |
[
"modules/utils.py"
] |
[
"import yaml\nimport numpy as np\nimport tensorflow as tf\nfrom absl import logging\n\n\ndef set_memory_growth():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices(\n 'GPU')\n logging.info(\n \"Detect {} Physical GPUs, {} Logical GPUs.\".format(\n len(gpus), len(logical_gpus)))\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n logging.info(e)\n\n\ndef load_yaml(load_path):\n \"\"\"load yaml file\"\"\"\n with open(load_path, 'r') as f:\n loaded = yaml.load(f, Loader=yaml.Loader)\n\n return loaded\n\n\ndef get_ckpt_inf(ckpt_path, steps_per_epoch):\n \"\"\"get ckpt information\"\"\"\n split_list = ckpt_path.split('e_')[-1].split('_b_')\n epochs = int(split_list[0])\n batchs = int(split_list[-1].split('.ckpt')[0])\n steps = (epochs - 1) * steps_per_epoch + batchs\n\n return epochs, steps + 1\n\n\ndef l2_norm(x, axis=1):\n \"\"\"l2 norm\"\"\"\n norm = np.linalg.norm(x, axis=axis, keepdims=True)\n output = x / norm\n\n return output\n"
] |
[
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.list_logical_devices",
"numpy.linalg.norm",
"tensorflow.config.experimental.set_memory_growth"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
djalmabright/matplotlib
|
[
"35e769d6c1fa47996935cfd69024ed0c95018cce"
] |
[
"mathtext_example.py"
] |
[
"from __future__ import print_function\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport re\nimport gc\n\n# Selection of features following \"Writing mathematical expressions\" tutorial\nmathtext_titles = {\n 0: \"Header demo\",\n 1: \"Subscripts and superscripts\",\n 2: \"Fractions, binomials and stacked numbers\",\n 3: \"Radicals\",\n 4: \"Fonts\",\n 5: \"Accents\",\n 6: \"Greek, Hebrew\",\n 7: \"Delimiters, functions and Symbols\"}\nn_lines = len(mathtext_titles)\n\n# Randomly picked examples\nmathext_demos = {\n 0: r\"$W^{3\\beta}_{\\delta_1 \\rho_1 \\sigma_2} = \"\n r\"U^{3\\beta}_{\\delta_1 \\rho_1} + \\frac{1}{8 \\pi 2} \"\n r\"\\int^{\\alpha_2}_{\\alpha_2} d \\alpha^\\prime_2 \\left[\\frac{ \"\n r\"U^{2\\beta}_{\\delta_1 \\rho_1} - \\alpha^\\prime_2U^{1\\beta}_\"\n r\"{\\rho_1 \\sigma_2} }{U^{0\\beta}_{\\rho_1 \\sigma_2}}\\right]$\",\n\n 1: r\"$\\alpha_i > \\beta_i,\\ \"\n r\"\\alpha_{i+1}^j = {\\rm sin}(2\\pi f_j t_i) e^{-5 t_i/\\tau},\\ \"\n r\"\\ldots$\",\n\n 2: r\"$\\frac{3}{4},\\ \\binom{3}{4},\\ \\stackrel{3}{4},\\ \"\n r\"\\left(\\frac{5 - \\frac{1}{x}}{4}\\right),\\ \\ldots$\",\n\n 3: r\"$\\sqrt{2},\\ \\sqrt[3]{x},\\ \\ldots$\",\n\n 4: r\"$\\mathrm{Roman}\\ , \\ \\mathit{Italic}\\ , \\ \\mathtt{Typewriter} \\ \"\n r\"\\mathrm{or}\\ \\mathcal{CALLIGRAPHY}$\",\n\n 5: r\"$\\acute a,\\ \\bar a,\\ \\breve a,\\ \\dot a,\\ \\ddot a, \\ \\grave a, \\ \"\n r\"\\hat a,\\ \\tilde a,\\ \\vec a,\\ \\widehat{xyz},\\ \\widetilde{xyz},\\ \"\n r\"\\ldots$\",\n\n 6: r\"$\\alpha,\\ \\beta,\\ \\chi,\\ \\delta,\\ \\lambda,\\ \\mu,\\ \"\n r\"\\Delta,\\ \\Gamma,\\ \\Omega,\\ \\Phi,\\ \\Pi,\\ \\Upsilon,\\ \\nabla,\\ \"\n r\"\\aleph,\\ \\beth,\\ \\daleth,\\ \\gimel,\\ \\ldots$\",\n\n 7: r\"$\\coprod,\\ \\int,\\ \\oint,\\ \\prod,\\ \\sum,\\ \"\n r\"\\log,\\ \\sin,\\ \\approx,\\ \\oplus,\\ \\star,\\ \\varpropto,\\ \"\n r\"\\infty,\\ \\partial,\\ \\Re,\\ \\leftrightsquigarrow, \\ \\ldots$\"}\n\n\ndef doall():\n # Colors used in mpl online documentation.\n mpl_blue_rvb = (191./255., 209./256., 212./255.)\n mpl_orange_rvb = (202/255., 121/256., 0./255.)\n mpl_grey_rvb = (51./255., 51./255., 51./255.)\n\n # Creating figure and axis.\n plt.figure(figsize=(6, 7))\n plt.axes([0.01, 0.01, 0.98, 0.90], facecolor=\"white\", frameon=True)\n plt.gca().set_xlim(0., 1.)\n plt.gca().set_ylim(0., 1.)\n plt.gca().set_title(\"Matplotlib's math rendering engine\",\n color=mpl_grey_rvb, fontsize=14, weight='bold')\n plt.gca().set_xticklabels(\"\", visible=False)\n plt.gca().set_yticklabels(\"\", visible=False)\n\n # Gap between lines in axes coords\n line_axesfrac = (1. / (n_lines))\n\n # Plotting header demonstration formula\n full_demo = mathext_demos[0]\n plt.annotate(full_demo,\n xy=(0.5, 1. - 0.59*line_axesfrac),\n xycoords='data', color=mpl_orange_rvb, ha='center',\n fontsize=20)\n\n # Plotting features demonstration formulae\n for i_line in range(1, n_lines):\n baseline = 1. - (i_line)*line_axesfrac\n baseline_next = baseline - line_axesfrac*1.\n title = mathtext_titles[i_line] + \":\"\n fill_color = ['white', mpl_blue_rvb][i_line % 2]\n plt.fill_between([0., 1.], [baseline, baseline],\n [baseline_next, baseline_next],\n color=fill_color, alpha=0.5)\n plt.annotate(title,\n xy=(0.07, baseline - 0.3*line_axesfrac),\n xycoords='data', color=mpl_grey_rvb, weight='bold')\n demo = mathext_demos[i_line]\n plt.annotate(demo,\n xy=(0.05, baseline - 0.75*line_axesfrac),\n xycoords='data', color=mpl_grey_rvb,\n fontsize=16)\n\n for i in range(n_lines):\n s = mathext_demos[i]\n print(i, s)\n plt.show()\n\ndoall()"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pawel-Kranzberg/pandas
|
[
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d",
"6f90cb3d7bd5891d15a427252fba00027ca6084d"
] |
[
"pandas/tests/indexing/common.py",
"asv_bench/benchmarks/tslibs/tz_convert.py",
"pandas/tests/indexes/period/test_period.py",
"pandas/tests/frame/test_repr_info.py",
"pandas/compat/numpy/__init__.py",
"pandas/util/_validators.py",
"pandas/tests/frame/methods/test_join.py",
"pandas/core/arrays/interval.py"
] |
[
"\"\"\" common utilities \"\"\"\nimport itertools\n\nimport numpy as np\n\nfrom pandas import (\n DataFrame,\n Float64Index,\n MultiIndex,\n Series,\n UInt64Index,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef _mklbl(prefix, n):\n return [f\"{prefix}{i}\" for i in range(n)]\n\n\ndef _axify(obj, key, axis):\n # create a tuple accessor\n axes = [slice(None)] * obj.ndim\n axes[axis] = key\n return tuple(axes)\n\n\nclass Base:\n \"\"\" indexing comprehensive base class \"\"\"\n\n _kinds = {\"series\", \"frame\"}\n _typs = {\n \"ints\",\n \"uints\",\n \"labels\",\n \"mixed\",\n \"ts\",\n \"floats\",\n \"empty\",\n \"ts_rev\",\n \"multi\",\n }\n\n def setup_method(self, method):\n\n self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))\n self.frame_ints = DataFrame(\n np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)\n )\n\n self.series_uints = Series(\n np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))\n )\n self.frame_uints = DataFrame(\n np.random.randn(4, 4),\n index=UInt64Index(range(0, 8, 2)),\n columns=UInt64Index(range(0, 12, 3)),\n )\n\n self.series_floats = Series(\n np.random.rand(4), index=Float64Index(range(0, 8, 2))\n )\n self.frame_floats = DataFrame(\n np.random.randn(4, 4),\n index=Float64Index(range(0, 8, 2)),\n columns=Float64Index(range(0, 12, 3)),\n )\n\n m_idces = [\n MultiIndex.from_product([[1, 2], [3, 4]]),\n MultiIndex.from_product([[5, 6], [7, 8]]),\n MultiIndex.from_product([[9, 10], [11, 12]]),\n ]\n\n self.series_multi = Series(np.random.rand(4), index=m_idces[0])\n self.frame_multi = DataFrame(\n np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]\n )\n\n self.series_labels = Series(np.random.randn(4), index=list(\"abcd\"))\n self.frame_labels = DataFrame(\n np.random.randn(4, 4), index=list(\"abcd\"), columns=list(\"ABCD\")\n )\n\n self.series_mixed = Series(np.random.randn(4), index=[2, 4, \"null\", 8])\n self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, \"null\", 8])\n\n self.series_ts = Series(\n np.random.randn(4), index=date_range(\"20130101\", periods=4)\n )\n self.frame_ts = DataFrame(\n np.random.randn(4, 4), index=date_range(\"20130101\", periods=4)\n )\n\n dates_rev = date_range(\"20130101\", periods=4).sort_values(ascending=False)\n self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)\n self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)\n\n self.frame_empty = DataFrame()\n self.series_empty = Series(dtype=object)\n\n # form agglomerates\n for kind in self._kinds:\n d = {}\n for typ in self._typs:\n d[typ] = getattr(self, f\"{kind}_{typ}\")\n\n setattr(self, kind, d)\n\n def generate_indices(self, f, values=False):\n \"\"\"\n generate the indices\n if values is True , use the axis values\n is False, use the range\n \"\"\"\n axes = f.axes\n if values:\n axes = (list(range(len(ax))) for ax in axes)\n\n return itertools.product(*axes)\n\n def get_value(self, name, f, i, values=False):\n \"\"\" return the value for the location i \"\"\"\n # check against values\n if values:\n return f.values[i]\n\n elif name == \"iat\":\n return f.iloc[i]\n else:\n assert name == \"at\"\n return f.loc[i]\n\n def check_values(self, f, func, values=False):\n\n if f is None:\n return\n axes = f.axes\n indices = itertools.product(*axes)\n\n for i in indices:\n result = getattr(f, func)[i]\n\n # check against values\n if values:\n expected = f.values[i]\n else:\n expected = f\n for a in reversed(i):\n expected = expected.__getitem__(a)\n\n tm.assert_almost_equal(result, expected)\n\n def check_result(self, method, key, typs=None, axes=None, fails=None):\n def _eq(axis, obj, key):\n \"\"\" compare equal for these 2 keys \"\"\"\n axified = _axify(obj, key, axis)\n try:\n getattr(obj, method).__getitem__(axified)\n\n except (IndexError, TypeError, KeyError) as detail:\n\n # if we are in fails, the ok, otherwise raise it\n if fails is not None:\n if isinstance(detail, fails):\n return\n raise\n\n if typs is None:\n typs = self._typs\n\n if axes is None:\n axes = [0, 1]\n else:\n assert axes in [0, 1]\n axes = [axes]\n\n # check\n for kind in self._kinds:\n\n d = getattr(self, kind)\n for ax in axes:\n for typ in typs:\n assert typ in self._typs\n\n obj = d[typ]\n if ax < obj.ndim:\n _eq(axis=ax, obj=obj, key=key)\n",
"import numpy as np\nfrom pytz import UTC\n\nfrom pandas._libs.tslibs.tzconversion import tz_localize_to_utc\n\nfrom .tslib import (\n _sizes,\n _tzs,\n tzlocal_obj,\n)\n\ntry:\n old_sig = False\n from pandas._libs.tslibs.tzconversion import tz_convert_from_utc\nexcept ImportError:\n old_sig = True\n from pandas._libs.tslibs.tzconversion import tz_convert as tz_convert_from_utc\n\n\nclass TimeTZConvert:\n params = [\n _sizes,\n [x for x in _tzs if x is not None],\n ]\n param_names = [\"size\", \"tz\"]\n\n def setup(self, size, tz):\n if size == 10 ** 6 and tz is tzlocal_obj:\n # tzlocal is cumbersomely slow, so skip to keep runtime in check\n raise NotImplementedError\n\n arr = np.random.randint(0, 10, size=size, dtype=\"i8\")\n self.i8data = arr\n\n def time_tz_convert_from_utc(self, size, tz):\n # effectively:\n # dti = DatetimeIndex(self.i8data, tz=tz)\n # dti.tz_localize(None)\n if old_sig:\n tz_convert_from_utc(self.i8data, UTC, tz)\n else:\n tz_convert_from_utc(self.i8data, tz)\n\n def time_tz_localize_to_utc(self, size, tz):\n # effectively:\n # dti = DatetimeIndex(self.i8data)\n # dti.tz_localize(tz, ambiguous=\"NaT\", nonexistent=\"NaT\")\n tz_localize_to_utc(self.i8data, tz, ambiguous=\"NaT\", nonexistent=\"NaT\")\n",
"import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n NaT,\n Period,\n PeriodIndex,\n Series,\n date_range,\n offsets,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.indexes.datetimelike import DatetimeLike\n\n\nclass TestPeriodIndex(DatetimeLike):\n _holder = PeriodIndex\n\n @pytest.fixture(\n params=[\n tm.makePeriodIndex(10),\n period_range(\"20130101\", periods=10, freq=\"D\")[::-1],\n ],\n ids=[\"index_inc\", \"index_dec\"],\n )\n def index(self, request):\n return request.param\n\n def create_index(self) -> PeriodIndex:\n return period_range(\"20130101\", periods=5, freq=\"D\")\n\n def test_pickle_compat_construction(self):\n pass\n\n @pytest.mark.parametrize(\"freq\", [\"D\", \"M\", \"A\"])\n def test_pickle_round_trip(self, freq):\n idx = PeriodIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN], freq=freq)\n result = tm.round_trip_pickle(idx)\n tm.assert_index_equal(result, idx)\n\n def test_where(self):\n # This is handled in test_indexing\n pass\n\n def test_no_millisecond_field(self):\n msg = \"type object 'DatetimeIndex' has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex.millisecond\n\n msg = \"'DatetimeIndex' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex([]).millisecond\n\n def test_make_time_series(self):\n index = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n series = Series(1, index=index)\n assert isinstance(series, Series)\n\n def test_view_asi8(self):\n idx = PeriodIndex([], freq=\"M\")\n\n exp = np.array([], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01\", NaT], freq=\"M\")\n\n exp = np.array([492, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n exp = np.array([14975, -9223372036854775808], dtype=np.int64)\n idx = PeriodIndex([\"2011-01-01\", NaT], freq=\"D\")\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n def test_values(self):\n idx = PeriodIndex([], freq=\"M\")\n\n exp = np.array([], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n\n exp = np.array([], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01\", NaT], freq=\"M\")\n\n exp = np.array([Period(\"2011-01\", freq=\"M\"), NaT], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n exp = np.array([492, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01-01\", NaT], freq=\"D\")\n\n exp = np.array([Period(\"2011-01-01\", freq=\"D\"), NaT], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n exp = np.array([14975, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n def test_period_index_length(self):\n pi = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 9\n\n pi = period_range(freq=\"Q\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 4 * 9\n\n pi = period_range(freq=\"M\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 12 * 9\n\n start = Period(\"02-Apr-2005\", \"B\")\n i1 = period_range(start=start, periods=20)\n assert len(i1) == 20\n assert i1.freq == start.freq\n assert i1[0] == start\n\n end_intv = Period(\"2006-12-31\", \"W\")\n i1 = period_range(end=end_intv, periods=10)\n assert len(i1) == 10\n assert i1.freq == end_intv.freq\n assert i1[-1] == end_intv\n\n end_intv = Period(\"2006-12-31\", \"1w\")\n i2 = period_range(end=end_intv, periods=10)\n assert len(i1) == len(i2)\n assert (i1 == i2).all()\n assert i1.freq == i2.freq\n\n msg = \"start and end must have same freq\"\n with pytest.raises(ValueError, match=msg):\n period_range(start=start, end=end_intv)\n\n end_intv = Period(\"2005-05-01\", \"B\")\n i1 = period_range(start=start, end=end_intv)\n\n msg = (\n \"Of the three parameters: start, end, and periods, exactly two \"\n \"must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n period_range(start=start)\n\n # infer freq from first element\n i2 = PeriodIndex([end_intv, Period(\"2005-05-05\", \"B\")])\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n i2 = PeriodIndex(np.array([end_intv, Period(\"2005-05-05\", \"B\")]))\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n # Mixed freq should fail\n vals = [end_intv, Period(\"2006-12-31\", \"w\")]\n msg = r\"Input has different freq=W-SUN from PeriodIndex\\(freq=B\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n PeriodIndex(vals)\n vals = np.array(vals)\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(vals)\n\n def test_fields(self):\n # year, month, day, hour, minute\n # second, weekofyear, week, dayofweek, weekday, dayofyear, quarter\n # qyear\n pi = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2005\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"Q\", start=\"1/1/2001\", end=\"12/1/2002\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"M\", start=\"1/1/2001\", end=\"1/1/2002\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"D\", start=\"12/1/2001\", end=\"6/1/2001\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"B\", start=\"12/1/2001\", end=\"6/1/2001\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"H\", start=\"12/31/2001\", end=\"1/1/2002 23:00\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"Min\", start=\"12/31/2001\", end=\"1/1/2002 00:20\")\n self._check_all_fields(pi)\n\n pi = period_range(\n freq=\"S\", start=\"12/31/2001 00:00:00\", end=\"12/31/2001 00:05:00\"\n )\n self._check_all_fields(pi)\n\n end_intv = Period(\"2006-12-31\", \"W\")\n i1 = period_range(end=end_intv, periods=10)\n self._check_all_fields(i1)\n\n def _check_all_fields(self, periodindex):\n fields = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"weekofyear\",\n \"week\",\n \"dayofweek\",\n \"day_of_week\",\n \"dayofyear\",\n \"day_of_year\",\n \"quarter\",\n \"qyear\",\n \"days_in_month\",\n ]\n\n periods = list(periodindex)\n s = Series(periodindex)\n\n for field in fields:\n field_idx = getattr(periodindex, field)\n assert len(periodindex) == len(field_idx)\n for x, val in zip(periods, field_idx):\n assert getattr(x, field) == val\n\n if len(s) == 0:\n continue\n\n field_s = getattr(s.dt, field)\n assert len(periodindex) == len(field_s)\n for x, val in zip(periods, field_s):\n assert getattr(x, field) == val\n\n def test_is_(self):\n create_index = lambda: period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n index = create_index()\n assert index.is_(index)\n assert not index.is_(create_index())\n assert index.is_(index.view())\n assert index.is_(index.view().view().view().view().view())\n assert index.view().is_(index)\n ind2 = index.view()\n index.name = \"Apple\"\n assert ind2.is_(index)\n assert not index.is_(index[:])\n assert not index.is_(index.asfreq(\"M\"))\n assert not index.is_(index.asfreq(\"A\"))\n\n assert not index.is_(index - 2)\n assert not index.is_(index - 0)\n\n def test_index_duplicate_periods(self):\n # monotonic\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq=\"A-JUN\")\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"2007\"]\n expected = ts[1:3]\n tm.assert_series_equal(result, expected)\n result[:] = 1\n assert (ts[1:3] == 1).all()\n\n # not monotonic\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq=\"A-JUN\")\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"2007\"]\n expected = ts[idx == \"2007\"]\n tm.assert_series_equal(result, expected)\n\n def test_index_unique(self):\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq=\"A-JUN\")\n expected = PeriodIndex([2000, 2007, 2009], freq=\"A-JUN\")\n tm.assert_index_equal(idx.unique(), expected)\n assert idx.nunique() == 3\n\n def test_shift(self):\n # This is tested in test_arithmetic\n pass\n\n def test_negative_ordinals(self):\n Period(ordinal=-1000, freq=\"A\")\n Period(ordinal=0, freq=\"A\")\n\n idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq=\"A\")\n idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq=\"A\")\n tm.assert_index_equal(idx1, idx2)\n\n def test_pindex_fieldaccessor_nat(self):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2012-03\", \"2012-04\"], freq=\"D\", name=\"name\"\n )\n\n exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name=\"name\")\n tm.assert_index_equal(idx.year, exp)\n exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name=\"name\")\n tm.assert_index_equal(idx.month, exp)\n\n def test_pindex_qaccess(self):\n pi = PeriodIndex([\"2Q05\", \"3Q05\", \"4Q05\", \"1Q06\", \"2Q06\"], freq=\"Q\")\n s = Series(np.random.rand(len(pi)), index=pi).cumsum()\n # Todo: fix these accessors!\n assert s[\"05Q4\"] == s[2]\n\n def test_pindex_multiples(self):\n expected = PeriodIndex(\n [\"2011-01\", \"2011-03\", \"2011-05\", \"2011-07\", \"2011-09\", \"2011-11\"],\n freq=\"2M\",\n )\n\n pi = period_range(start=\"1/1/11\", end=\"12/31/11\", freq=\"2M\")\n tm.assert_index_equal(pi, expected)\n assert pi.freq == offsets.MonthEnd(2)\n assert pi.freqstr == \"2M\"\n\n pi = period_range(start=\"1/1/11\", periods=6, freq=\"2M\")\n tm.assert_index_equal(pi, expected)\n assert pi.freq == offsets.MonthEnd(2)\n assert pi.freqstr == \"2M\"\n\n def test_iteration(self):\n index = period_range(start=\"1/1/10\", periods=4, freq=\"B\")\n\n result = list(index)\n assert isinstance(result[0], Period)\n assert result[0].freq == index.freq\n\n def test_with_multi_index(self):\n # #1705\n index = date_range(\"1/1/2012\", periods=4, freq=\"12H\")\n index_as_arrays = [index.to_period(freq=\"D\"), index.hour]\n\n s = Series([0, 1, 2, 3], index_as_arrays)\n\n assert isinstance(s.index.levels[0], PeriodIndex)\n\n assert isinstance(s.index.values[0][0], Period)\n\n def test_pickle_freq(self):\n # GH2891\n prng = period_range(\"1/1/2011\", \"1/1/2012\", freq=\"M\")\n new_prng = tm.round_trip_pickle(prng)\n assert new_prng.freq == offsets.MonthEnd()\n assert new_prng.freqstr == \"M\"\n\n def test_map(self):\n # test_map_dictlike generally tests\n\n index = PeriodIndex([2005, 2007, 2009], freq=\"A\")\n result = index.map(lambda x: x.ordinal)\n exp = Index([x.ordinal for x in index])\n tm.assert_index_equal(result, exp)\n\n def test_format_empty(self):\n # GH35712\n empty_idx = self._holder([], freq=\"A\")\n assert empty_idx.format() == []\n assert empty_idx.format(name=True) == [\"\"]\n\n\ndef test_maybe_convert_timedelta():\n pi = PeriodIndex([\"2000\", \"2001\"], freq=\"D\")\n offset = offsets.Day(2)\n assert pi._maybe_convert_timedelta(offset) == 2\n assert pi._maybe_convert_timedelta(2) == 2\n\n offset = offsets.BusinessDay()\n msg = r\"Input has different freq=B from PeriodIndex\\(freq=D\\)\"\n with pytest.raises(ValueError, match=msg):\n pi._maybe_convert_timedelta(offset)\n\n\ndef test_is_monotonic_with_nat():\n # GH#31437\n # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex,\n # in particular never be monotonic when we have NaT\n dti = date_range(\"2016-01-01\", periods=3)\n pi = dti.to_period(\"D\")\n tdi = Index(dti.view(\"timedelta64[ns]\"))\n\n for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert obj.is_monotonic\n assert obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti1 = dti.insert(0, NaT)\n pi1 = dti1.to_period(\"D\")\n tdi1 = Index(dti1.view(\"timedelta64[ns]\"))\n\n for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti2 = dti.insert(3, NaT)\n pi2 = dti2.to_period(\"H\")\n tdi2 = Index(dti2.view(\"timedelta64[ns]\"))\n\n for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n\[email protected](\"array\", [True, False])\ndef test_dunder_array(array):\n obj = PeriodIndex([\"2000-01-01\", \"2001-01-01\"], freq=\"D\")\n if array:\n obj = obj._data\n\n expected = np.array([obj[0], obj[1]], dtype=object)\n result = np.array(obj)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(obj)\n tm.assert_numpy_array_equal(result, expected)\n\n expected = obj.asi8\n for dtype in [\"i8\", \"int64\", np.int64]:\n result = np.array(obj, dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(obj, dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n for dtype in [\"float64\", \"int32\", \"uint64\"]:\n msg = \"argument must be\"\n with pytest.raises(TypeError, match=msg):\n np.array(obj, dtype=dtype)\n with pytest.raises(TypeError, match=msg):\n np.array(obj, dtype=getattr(np, dtype))\n",
"from datetime import (\n datetime,\n timedelta,\n)\nfrom io import StringIO\nimport warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n DataFrame,\n MultiIndex,\n NaT,\n PeriodIndex,\n Series,\n Timestamp,\n date_range,\n option_context,\n period_range,\n)\nimport pandas._testing as tm\n\nimport pandas.io.formats.format as fmt\n\n\nclass TestDataFrameReprInfoEtc:\n def test_repr_bytes_61_lines(self, using_array_manager):\n # GH#12857\n lets = list(\"ACDEFGHIJKLMNOP\")\n slen = 50\n nseqs = 1000\n words = [[np.random.choice(lets) for x in range(slen)] for _ in range(nseqs)]\n df = DataFrame(words).astype(\"U1\")\n # TODO(Arraymanager) astype(\"U1\") actually gives this dtype instead of object\n if not using_array_manager:\n assert (df.dtypes == object).all()\n\n # smoke tests; at one point this raised with 61 but not 60\n repr(df)\n repr(df.iloc[:60, :])\n repr(df.iloc[:61, :])\n\n def test_repr_unicode_level_names(self, frame_or_series):\n index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=[\"\\u0394\", \"i1\"])\n\n obj = DataFrame(np.random.randn(2, 4), index=index)\n if frame_or_series is Series:\n obj = obj[0]\n repr(obj)\n\n def test_assign_index_sequences(self):\n # GH#2200\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}).set_index(\n [\"a\", \"b\"]\n )\n index = list(df.index)\n index[0] = (\"faz\", \"boo\")\n df.index = index\n repr(df)\n\n # this travels an improper code path\n index[0] = [\"faz\", \"boo\"]\n df.index = index\n repr(df)\n\n def test_repr_with_mi_nat(self, float_string_frame):\n df = DataFrame({\"X\": [1, 2]}, index=[[NaT, Timestamp(\"20130101\")], [\"a\", \"b\"]])\n result = repr(df)\n expected = \" X\\nNaT a 1\\n2013-01-01 b 2\"\n assert result == expected\n\n def test_multiindex_na_repr(self):\n # only an issue with long columns\n df3 = DataFrame(\n {\n \"A\" * 30: {(\"A\", \"A0006000\", \"nuit\"): \"A0006000\"},\n \"B\" * 30: {(\"A\", \"A0006000\", \"nuit\"): np.nan},\n \"C\" * 30: {(\"A\", \"A0006000\", \"nuit\"): np.nan},\n \"D\" * 30: {(\"A\", \"A0006000\", \"nuit\"): np.nan},\n \"E\" * 30: {(\"A\", \"A0006000\", \"nuit\"): \"A\"},\n \"F\" * 30: {(\"A\", \"A0006000\", \"nuit\"): np.nan},\n }\n )\n\n idf = df3.set_index([\"A\" * 30, \"C\" * 30])\n repr(idf)\n\n def test_repr_name_coincide(self):\n index = MultiIndex.from_tuples(\n [(\"a\", 0, \"foo\"), (\"b\", 1, \"bar\")], names=[\"a\", \"b\", \"c\"]\n )\n\n df = DataFrame({\"value\": [0, 1]}, index=index)\n\n lines = repr(df).split(\"\\n\")\n assert lines[2].startswith(\"a 0 foo\")\n\n def test_repr_to_string(\n self,\n multiindex_year_month_day_dataframe_random_data,\n multiindex_dataframe_random_data,\n ):\n ymd = multiindex_year_month_day_dataframe_random_data\n frame = multiindex_dataframe_random_data\n\n repr(frame)\n repr(ymd)\n repr(frame.T)\n repr(ymd.T)\n\n buf = StringIO()\n frame.to_string(buf=buf)\n ymd.to_string(buf=buf)\n frame.T.to_string(buf=buf)\n ymd.T.to_string(buf=buf)\n\n def test_repr_empty(self):\n # empty\n repr(DataFrame())\n\n # empty with index\n frame = DataFrame(index=np.arange(1000))\n repr(frame)\n\n def test_repr_mixed(self, float_string_frame):\n buf = StringIO()\n\n # mixed\n repr(float_string_frame)\n float_string_frame.info(verbose=False, buf=buf)\n\n @pytest.mark.slow\n def test_repr_mixed_big(self):\n # big mixed\n biggie = DataFrame(\n {\"A\": np.random.randn(200), \"B\": tm.makeStringIndex(200)}, index=range(200)\n )\n biggie.loc[:20, \"A\"] = np.nan\n biggie.loc[:20, \"B\"] = np.nan\n\n repr(biggie)\n\n def test_repr(self, float_frame):\n buf = StringIO()\n\n # small one\n repr(float_frame)\n float_frame.info(verbose=False, buf=buf)\n\n # even smaller\n float_frame.reindex(columns=[\"A\"]).info(verbose=False, buf=buf)\n float_frame.reindex(columns=[\"A\", \"B\"]).info(verbose=False, buf=buf)\n\n # exhausting cases in DataFrame.info\n\n # columns but no index\n no_index = DataFrame(columns=[0, 1, 3])\n repr(no_index)\n\n # no columns or index\n DataFrame().info(buf=buf)\n\n df = DataFrame([\"a\\n\\r\\tb\"], columns=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n assert \"\\t\" not in repr(df)\n assert \"\\r\" not in repr(df)\n assert \"a\\n\" not in repr(df)\n\n def test_repr_dimensions(self):\n df = DataFrame([[1, 2], [3, 4]])\n with option_context(\"display.show_dimensions\", True):\n assert \"2 rows x 2 columns\" in repr(df)\n\n with option_context(\"display.show_dimensions\", False):\n assert \"2 rows x 2 columns\" not in repr(df)\n\n with option_context(\"display.show_dimensions\", \"truncate\"):\n assert \"2 rows x 2 columns\" not in repr(df)\n\n @pytest.mark.slow\n def test_repr_big(self):\n # big one\n biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200))\n repr(biggie)\n\n def test_repr_unsortable(self, float_frame):\n # columns are not sortable\n\n warn_filters = warnings.filters\n warnings.filterwarnings(\"ignore\", category=FutureWarning, module=\".*format\")\n\n unsortable = DataFrame(\n {\n \"foo\": [1] * 50,\n datetime.today(): [1] * 50,\n \"bar\": [\"bar\"] * 50,\n datetime.today() + timedelta(1): [\"bar\"] * 50,\n },\n index=np.arange(50),\n )\n repr(unsortable)\n\n fmt.set_option(\"display.precision\", 3, \"display.column_space\", 10)\n repr(float_frame)\n\n fmt.set_option(\"display.max_rows\", 10, \"display.max_columns\", 2)\n repr(float_frame)\n\n fmt.set_option(\"display.max_rows\", 1000, \"display.max_columns\", 1000)\n repr(float_frame)\n\n tm.reset_display_options()\n\n warnings.filters = warn_filters\n\n def test_repr_unicode(self):\n uval = \"\\u03c3\\u03c3\\u03c3\\u03c3\"\n\n df = DataFrame({\"A\": [uval, uval]})\n\n result = repr(df)\n ex_top = \" A\"\n assert result.split(\"\\n\")[0].rstrip() == ex_top\n\n df = DataFrame({\"A\": [uval, uval]})\n result = repr(df)\n assert result.split(\"\\n\")[0].rstrip() == ex_top\n\n def test_unicode_string_with_unicode(self):\n df = DataFrame({\"A\": [\"\\u05d0\"]})\n str(df)\n\n def test_repr_unicode_columns(self):\n df = DataFrame({\"\\u05d0\": [1, 2, 3], \"\\u05d1\": [4, 5, 6], \"c\": [7, 8, 9]})\n repr(df.columns) # should not raise UnicodeDecodeError\n\n def test_str_to_bytes_raises(self):\n # GH 26447\n df = DataFrame({\"A\": [\"abc\"]})\n msg = \"^'str' object cannot be interpreted as an integer$\"\n with pytest.raises(TypeError, match=msg):\n bytes(df)\n\n def test_very_wide_info_repr(self):\n df = DataFrame(np.random.randn(10, 20), columns=tm.rands_array(10, 20))\n repr(df)\n\n def test_repr_column_name_unicode_truncation_bug(self):\n # #1906\n df = DataFrame(\n {\n \"Id\": [7117434],\n \"StringCol\": (\n \"Is it possible to modify drop plot code\"\n \"so that the output graph is displayed \"\n \"in iphone simulator, Is it possible to \"\n \"modify drop plot code so that the \"\n \"output graph is \\xe2\\x80\\xa8displayed \"\n \"in iphone simulator.Now we are adding \"\n \"the CSV file externally. I want to Call \"\n \"the File through the code..\"\n ),\n }\n )\n\n with option_context(\"display.max_columns\", 20):\n assert \"StringCol\" in repr(df)\n\n def test_latex_repr(self):\n result = r\"\"\"\\begin{tabular}{llll}\n\\toprule\n{} & 0 & 1 & 2 \\\\\n\\midrule\n0 & $\\alpha$ & b & c \\\\\n1 & 1 & 2 & 3 \\\\\n\\bottomrule\n\\end{tabular}\n\"\"\"\n with option_context(\"display.latex.escape\", False, \"display.latex.repr\", True):\n df = DataFrame([[r\"$\\alpha$\", \"b\", \"c\"], [1, 2, 3]])\n assert result == df._repr_latex_()\n\n # GH 12182\n assert df._repr_latex_() is None\n\n def test_repr_categorical_dates_periods(self):\n # normal DataFrame\n dt = date_range(\"2011-01-01 09:00\", freq=\"H\", periods=5, tz=\"US/Eastern\")\n p = period_range(\"2011-01\", freq=\"M\", periods=5)\n df = DataFrame({\"dt\": dt, \"p\": p})\n exp = \"\"\" dt p\n0 2011-01-01 09:00:00-05:00 2011-01\n1 2011-01-01 10:00:00-05:00 2011-02\n2 2011-01-01 11:00:00-05:00 2011-03\n3 2011-01-01 12:00:00-05:00 2011-04\n4 2011-01-01 13:00:00-05:00 2011-05\"\"\"\n\n assert repr(df) == exp\n\n df2 = DataFrame({\"dt\": Categorical(dt), \"p\": Categorical(p)})\n assert repr(df2) == exp\n\n @pytest.mark.parametrize(\"arg\", [np.datetime64, np.timedelta64])\n @pytest.mark.parametrize(\n \"box, expected\",\n [[Series, \"0 NaT\\ndtype: object\"], [DataFrame, \" 0\\n0 NaT\"]],\n )\n def test_repr_np_nat_with_object(self, arg, box, expected):\n # GH 25445\n result = repr(box([arg(\"NaT\")], dtype=object))\n assert result == expected\n\n def test_frame_datetime64_pre1900_repr(self):\n df = DataFrame({\"year\": date_range(\"1/1/1700\", periods=50, freq=\"A-DEC\")})\n # it works!\n repr(df)\n\n def test_frame_to_string_with_periodindex(self):\n index = PeriodIndex([\"2011-1\", \"2011-2\", \"2011-3\"], freq=\"M\")\n frame = DataFrame(np.random.randn(3, 4), index=index)\n\n # it works!\n frame.to_string()\n",
"\"\"\" support numpy compatibility across versions \"\"\"\n\nfrom distutils.version import LooseVersion\nimport re\n\nimport numpy as np\n\n# numpy versioning\n_np_version = np.__version__\n_nlv = LooseVersion(_np_version)\nnp_version_under1p17 = _nlv < LooseVersion(\"1.17\")\nnp_version_under1p18 = _nlv < LooseVersion(\"1.18\")\nnp_version_under1p19 = _nlv < LooseVersion(\"1.19\")\nnp_version_under1p20 = _nlv < LooseVersion(\"1.20\")\nis_numpy_dev = \".dev\" in str(_nlv)\n_min_numpy_ver = \"1.16.5\"\n\n\nif _nlv < _min_numpy_ver:\n raise ImportError(\n f\"this version of pandas is incompatible with numpy < {_min_numpy_ver}\\n\"\n f\"your numpy version is {_np_version}.\\n\"\n f\"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version\"\n )\n\n\n_tz_regex = re.compile(\"[+-]0000$\")\n\n\ndef _tz_replacer(tstring):\n if isinstance(tstring, str):\n if tstring.endswith(\"Z\"):\n tstring = tstring[:-1]\n elif _tz_regex.search(tstring):\n tstring = tstring[:-5]\n return tstring\n\n\ndef np_datetime64_compat(tstring: str, unit: str = \"ns\"):\n \"\"\"\n provide compat for construction of strings to numpy datetime64's with\n tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation\n warning, when need to pass '2015-01-01 09:00:00'\n \"\"\"\n tstring = _tz_replacer(tstring)\n return np.datetime64(tstring, unit)\n\n\ndef np_array_datetime64_compat(arr, dtype=\"M8[ns]\"):\n \"\"\"\n provide compat for construction of an array of strings to a\n np.array(..., dtype=np.datetime64(..))\n tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation\n warning, when need to pass '2015-01-01 09:00:00'\n \"\"\"\n # is_list_like; can't import as it would be circular\n if hasattr(arr, \"__iter__\") and not isinstance(arr, (str, bytes)):\n arr = [_tz_replacer(s) for s in arr]\n else:\n arr = _tz_replacer(arr)\n\n return np.array(arr, dtype=dtype)\n\n\n__all__ = [\n \"np\",\n \"_np_version\",\n \"np_version_under1p17\",\n \"is_numpy_dev\",\n]\n",
"\"\"\"\nModule that contains many useful utilities\nfor validating data or function arguments\n\"\"\"\nfrom typing import (\n Iterable,\n Sequence,\n Union,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas.core.dtypes.common import is_bool\n\n\ndef _check_arg_length(fname, args, max_fname_arg_count, compat_args):\n \"\"\"\n Checks whether 'args' has length of at most 'compat_args'. Raises\n a TypeError if that is not the case, similar to in Python when a\n function is called with too many arguments.\n \"\"\"\n if max_fname_arg_count < 0:\n raise ValueError(\"'max_fname_arg_count' must be non-negative\")\n\n if len(args) > len(compat_args):\n max_arg_count = len(compat_args) + max_fname_arg_count\n actual_arg_count = len(args) + max_fname_arg_count\n argument = \"argument\" if max_arg_count == 1 else \"arguments\"\n\n raise TypeError(\n f\"{fname}() takes at most {max_arg_count} {argument} \"\n f\"({actual_arg_count} given)\"\n )\n\n\ndef _check_for_default_values(fname, arg_val_dict, compat_args):\n \"\"\"\n Check that the keys in `arg_val_dict` are mapped to their\n default values as specified in `compat_args`.\n\n Note that this function is to be called only when it has been\n checked that arg_val_dict.keys() is a subset of compat_args\n \"\"\"\n for key in arg_val_dict:\n # try checking equality directly with '=' operator,\n # as comparison may have been overridden for the left\n # hand object\n try:\n v1 = arg_val_dict[key]\n v2 = compat_args[key]\n\n # check for None-ness otherwise we could end up\n # comparing a numpy array vs None\n if (v1 is not None and v2 is None) or (v1 is None and v2 is not None):\n match = False\n else:\n match = v1 == v2\n\n if not is_bool(match):\n raise ValueError(\"'match' is not a boolean\")\n\n # could not compare them directly, so try comparison\n # using the 'is' operator\n except ValueError:\n match = arg_val_dict[key] is compat_args[key]\n\n if not match:\n raise ValueError(\n f\"the '{key}' parameter is not supported in \"\n f\"the pandas implementation of {fname}()\"\n )\n\n\ndef validate_args(fname, args, max_fname_arg_count, compat_args):\n \"\"\"\n Checks whether the length of the `*args` argument passed into a function\n has at most `len(compat_args)` arguments and whether or not all of these\n elements in `args` are set to their default values.\n\n Parameters\n ----------\n fname : str\n The name of the function being passed the `*args` parameter\n args : tuple\n The `*args` parameter passed into a function\n max_fname_arg_count : int\n The maximum number of arguments that the function `fname`\n can accept, excluding those in `args`. Used for displaying\n appropriate error messages. Must be non-negative.\n compat_args : dict\n A dictionary of keys and their associated default values.\n In order to accommodate buggy behaviour in some versions of `numpy`,\n where a signature displayed keyword arguments but then passed those\n arguments **positionally** internally when calling downstream\n implementations, a dict ensures that the original\n order of the keyword arguments is enforced.\n\n Raises\n ------\n TypeError\n If `args` contains more values than there are `compat_args`\n ValueError\n If `args` contains values that do not correspond to those\n of the default values specified in `compat_args`\n \"\"\"\n _check_arg_length(fname, args, max_fname_arg_count, compat_args)\n\n # We do this so that we can provide a more informative\n # error message about the parameters that we are not\n # supporting in the pandas implementation of 'fname'\n kwargs = dict(zip(compat_args, args))\n _check_for_default_values(fname, kwargs, compat_args)\n\n\ndef _check_for_invalid_keys(fname, kwargs, compat_args):\n \"\"\"\n Checks whether 'kwargs' contains any keys that are not\n in 'compat_args' and raises a TypeError if there is one.\n \"\"\"\n # set(dict) --> set of the dictionary's keys\n diff = set(kwargs) - set(compat_args)\n\n if diff:\n bad_arg = list(diff)[0]\n raise TypeError(f\"{fname}() got an unexpected keyword argument '{bad_arg}'\")\n\n\ndef validate_kwargs(fname, kwargs, compat_args):\n \"\"\"\n Checks whether parameters passed to the **kwargs argument in a\n function `fname` are valid parameters as specified in `*compat_args`\n and whether or not they are set to their default values.\n\n Parameters\n ----------\n fname : str\n The name of the function being passed the `**kwargs` parameter\n kwargs : dict\n The `**kwargs` parameter passed into `fname`\n compat_args: dict\n A dictionary of keys that `kwargs` is allowed to have and their\n associated default values\n\n Raises\n ------\n TypeError if `kwargs` contains keys not in `compat_args`\n ValueError if `kwargs` contains keys in `compat_args` that do not\n map to the default values specified in `compat_args`\n \"\"\"\n kwds = kwargs.copy()\n _check_for_invalid_keys(fname, kwargs, compat_args)\n _check_for_default_values(fname, kwds, compat_args)\n\n\ndef validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args):\n \"\"\"\n Checks whether parameters passed to the *args and **kwargs argument in a\n function `fname` are valid parameters as specified in `*compat_args`\n and whether or not they are set to their default values.\n\n Parameters\n ----------\n fname: str\n The name of the function being passed the `**kwargs` parameter\n args: tuple\n The `*args` parameter passed into a function\n kwargs: dict\n The `**kwargs` parameter passed into `fname`\n max_fname_arg_count: int\n The minimum number of arguments that the function `fname`\n requires, excluding those in `args`. Used for displaying\n appropriate error messages. Must be non-negative.\n compat_args: dict\n A dictionary of keys that `kwargs` is allowed to\n have and their associated default values.\n\n Raises\n ------\n TypeError if `args` contains more values than there are\n `compat_args` OR `kwargs` contains keys not in `compat_args`\n ValueError if `args` contains values not at the default value (`None`)\n `kwargs` contains keys in `compat_args` that do not map to the default\n value as specified in `compat_args`\n\n See Also\n --------\n validate_args : Purely args validation.\n validate_kwargs : Purely kwargs validation.\n\n \"\"\"\n # Check that the total number of arguments passed in (i.e.\n # args and kwargs) does not exceed the length of compat_args\n _check_arg_length(\n fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args\n )\n\n # Check there is no overlap with the positional and keyword\n # arguments, similar to what is done in actual Python functions\n args_dict = dict(zip(compat_args, args))\n\n for key in args_dict:\n if key in kwargs:\n raise TypeError(\n f\"{fname}() got multiple values for keyword argument '{key}'\"\n )\n\n kwargs.update(args_dict)\n validate_kwargs(fname, kwargs, compat_args)\n\n\ndef validate_bool_kwarg(value, arg_name, none_allowed=True, int_allowed=False):\n \"\"\"\n Ensure that argument passed in arg_name can be interpreted as boolean.\n\n Parameters\n ----------\n value : bool\n Value to be validated.\n arg_name : str\n Name of the argument. To be reflected in the error message.\n none_allowed : bool, default True\n Whether to consider None to be a valid boolean.\n int_allowed : bool, default False\n Whether to consider integer value to be a valid boolean.\n\n Returns\n -------\n value\n The same value as input.\n\n Raises\n ------\n ValueError\n If the value is not a valid boolean.\n \"\"\"\n good_value = is_bool(value)\n if none_allowed:\n good_value = good_value or value is None\n\n if int_allowed:\n good_value = good_value or isinstance(value, int)\n\n if not good_value:\n raise ValueError(\n f'For argument \"{arg_name}\" expected type bool, received '\n f\"type {type(value).__name__}.\"\n )\n return value\n\n\ndef validate_axis_style_args(data, args, kwargs, arg_name, method_name):\n \"\"\"\n Argument handler for mixed index, columns / axis functions\n\n In an attempt to handle both `.method(index, columns)`, and\n `.method(arg, axis=.)`, we have to do some bad things to argument\n parsing. This translates all arguments to `{index=., columns=.}` style.\n\n Parameters\n ----------\n data : DataFrame\n args : tuple\n All positional arguments from the user\n kwargs : dict\n All keyword arguments from the user\n arg_name, method_name : str\n Used for better error messages\n\n Returns\n -------\n kwargs : dict\n A dictionary of keyword arguments. Doesn't modify ``kwargs``\n inplace, so update them with the return value here.\n\n Examples\n --------\n >>> df._validate_axis_style_args((str.upper,), {'columns': id},\n ... 'mapper', 'rename')\n {'columns': <function id>, 'index': <method 'upper' of 'str' objects>}\n\n This emits a warning\n >>> df._validate_axis_style_args((str.upper, id), {},\n ... 'mapper', 'rename')\n {'columns': <function id>, 'index': <method 'upper' of 'str' objects>}\n \"\"\"\n # TODO: Change to keyword-only args and remove all this\n\n out = {}\n # Goal: fill 'out' with index/columns-style arguments\n # like out = {'index': foo, 'columns': bar}\n\n # Start by validating for consistency\n if \"axis\" in kwargs and any(x in kwargs for x in data._AXIS_TO_AXIS_NUMBER):\n msg = \"Cannot specify both 'axis' and any of 'index' or 'columns'.\"\n raise TypeError(msg)\n\n # First fill with explicit values provided by the user...\n if arg_name in kwargs:\n if args:\n msg = f\"{method_name} got multiple values for argument '{arg_name}'\"\n raise TypeError(msg)\n\n axis = data._get_axis_name(kwargs.get(\"axis\", 0))\n out[axis] = kwargs[arg_name]\n\n # More user-provided arguments, now from kwargs\n for k, v in kwargs.items():\n try:\n ax = data._get_axis_name(k)\n except ValueError:\n pass\n else:\n out[ax] = v\n\n # All user-provided kwargs have been handled now.\n # Now we supplement with positional arguments, emitting warnings\n # when there's ambiguity and raising when there's conflicts\n\n if len(args) == 0:\n pass # It's up to the function to decide if this is valid\n elif len(args) == 1:\n axis = data._get_axis_name(kwargs.get(\"axis\", 0))\n out[axis] = args[0]\n elif len(args) == 2:\n if \"axis\" in kwargs:\n # Unambiguously wrong\n msg = \"Cannot specify both 'axis' and any of 'index' or 'columns'\"\n raise TypeError(msg)\n\n msg = (\n f\"Interpreting call\\n\\t'.{method_name}(a, b)' as \"\n f\"\\n\\t'.{method_name}(index=a, columns=b)'.\\nUse named \"\n \"arguments to remove any ambiguity. In the future, using \"\n \"positional arguments for 'index' or 'columns' will raise \"\n \"a 'TypeError'.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n out[data._get_axis_name(0)] = args[0]\n out[data._get_axis_name(1)] = args[1]\n else:\n msg = f\"Cannot specify all of '{arg_name}', 'index', 'columns'.\"\n raise TypeError(msg)\n return out\n\n\ndef validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):\n \"\"\"\n Validate the keyword arguments to 'fillna'.\n\n This checks that exactly one of 'value' and 'method' is specified.\n If 'method' is specified, this validates that it's a valid method.\n\n Parameters\n ----------\n value, method : object\n The 'value' and 'method' keyword arguments for 'fillna'.\n validate_scalar_dict_value : bool, default True\n Whether to validate that 'value' is a scalar or dict. Specifically,\n validate that it is not a list or tuple.\n\n Returns\n -------\n value, method : object\n \"\"\"\n from pandas.core.missing import clean_fill_method\n\n if value is None and method is None:\n raise ValueError(\"Must specify a fill 'value' or 'method'.\")\n elif value is None and method is not None:\n method = clean_fill_method(method)\n\n elif value is not None and method is None:\n if validate_scalar_dict_value and isinstance(value, (list, tuple)):\n raise TypeError(\n '\"value\" parameter must be a scalar or dict, but '\n f'you passed a \"{type(value).__name__}\"'\n )\n\n elif value is not None and method is not None:\n raise ValueError(\"Cannot specify both 'value' and 'method'.\")\n\n return value, method\n\n\ndef validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray:\n \"\"\"\n Validate percentiles (used by describe and quantile).\n\n This function checks if the given float or iterable of floats is a valid percentile\n otherwise raises a ValueError.\n\n Parameters\n ----------\n q: float or iterable of floats\n A single percentile or an iterable of percentiles.\n\n Returns\n -------\n ndarray\n An ndarray of the percentiles if valid.\n\n Raises\n ------\n ValueError if percentiles are not in given interval([0, 1]).\n \"\"\"\n q_arr = np.asarray(q)\n # Don't change this to an f-string. The string formatting\n # is too expensive for cases where we don't need it.\n msg = \"percentiles should all be in the interval [0, 1]. Try {} instead.\"\n if q_arr.ndim == 0:\n if not 0 <= q_arr <= 1:\n raise ValueError(msg.format(q_arr / 100.0))\n else:\n if not all(0 <= qs <= 1 for qs in q_arr):\n raise ValueError(msg.format(q_arr / 100.0))\n return q_arr\n\n\ndef validate_ascending(\n ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True,\n):\n \"\"\"Validate ``ascending`` kwargs for ``sort_index`` method.\"\"\"\n kwargs = {\"none_allowed\": False, \"int_allowed\": True}\n if not isinstance(ascending, (list, tuple)):\n return validate_bool_kwarg(ascending, \"ascending\", **kwargs)\n\n return [validate_bool_kwarg(item, \"ascending\", **kwargs) for item in ascending]\n",
"from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\n\[email protected]\ndef frame_with_period_index():\n return DataFrame(\n data=np.arange(20).reshape(4, 5),\n columns=list(\"abcde\"),\n index=period_range(start=\"2000\", freq=\"A\", periods=4),\n )\n\n\[email protected]\ndef left():\n return DataFrame({\"a\": [20, 10, 0]}, index=[2, 1, 0])\n\n\[email protected]\ndef right():\n return DataFrame({\"b\": [300, 100, 200]}, index=[3, 1, 2])\n\n\[email protected](\n \"how, sort, expected\",\n [\n (\"inner\", False, DataFrame({\"a\": [20, 10], \"b\": [200, 100]}, index=[2, 1])),\n (\"inner\", True, DataFrame({\"a\": [10, 20], \"b\": [100, 200]}, index=[1, 2])),\n (\n \"left\",\n False,\n DataFrame({\"a\": [20, 10, 0], \"b\": [200, 100, np.nan]}, index=[2, 1, 0]),\n ),\n (\n \"left\",\n True,\n DataFrame({\"a\": [0, 10, 20], \"b\": [np.nan, 100, 200]}, index=[0, 1, 2]),\n ),\n (\n \"right\",\n False,\n DataFrame({\"a\": [np.nan, 10, 20], \"b\": [300, 100, 200]}, index=[3, 1, 2]),\n ),\n (\n \"right\",\n True,\n DataFrame({\"a\": [10, 20, np.nan], \"b\": [100, 200, 300]}, index=[1, 2, 3]),\n ),\n (\n \"outer\",\n False,\n DataFrame(\n {\"a\": [0, 10, 20, np.nan], \"b\": [np.nan, 100, 200, 300]},\n index=[0, 1, 2, 3],\n ),\n ),\n (\n \"outer\",\n True,\n DataFrame(\n {\"a\": [0, 10, 20, np.nan], \"b\": [np.nan, 100, 200, 300]},\n index=[0, 1, 2, 3],\n ),\n ),\n ],\n)\ndef test_join(left, right, how, sort, expected):\n\n result = left.join(right, how=how, sort=sort)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_join_index(float_frame):\n # left / right\n\n f = float_frame.loc[float_frame.index[:10], [\"A\", \"B\"]]\n f2 = float_frame.loc[float_frame.index[5:], [\"C\", \"D\"]].iloc[::-1]\n\n joined = f.join(f2)\n tm.assert_index_equal(f.index, joined.index)\n expected_columns = Index([\"A\", \"B\", \"C\", \"D\"])\n tm.assert_index_equal(joined.columns, expected_columns)\n\n joined = f.join(f2, how=\"left\")\n tm.assert_index_equal(joined.index, f.index)\n tm.assert_index_equal(joined.columns, expected_columns)\n\n joined = f.join(f2, how=\"right\")\n tm.assert_index_equal(joined.index, f2.index)\n tm.assert_index_equal(joined.columns, expected_columns)\n\n # inner\n\n joined = f.join(f2, how=\"inner\")\n tm.assert_index_equal(joined.index, f.index[5:10])\n tm.assert_index_equal(joined.columns, expected_columns)\n\n # outer\n\n joined = f.join(f2, how=\"outer\")\n tm.assert_index_equal(joined.index, float_frame.index.sort_values())\n tm.assert_index_equal(joined.columns, expected_columns)\n\n with pytest.raises(ValueError, match=\"join method\"):\n f.join(f2, how=\"foo\")\n\n # corner case - overlapping columns\n msg = \"columns overlap but no suffix\"\n for how in (\"outer\", \"left\", \"inner\"):\n with pytest.raises(ValueError, match=msg):\n float_frame.join(float_frame, how=how)\n\n\ndef test_join_index_more(float_frame):\n af = float_frame.loc[:, [\"A\", \"B\"]]\n bf = float_frame.loc[::2, [\"C\", \"D\"]]\n\n expected = af.copy()\n expected[\"C\"] = float_frame[\"C\"][::2]\n expected[\"D\"] = float_frame[\"D\"][::2]\n\n result = af.join(bf)\n tm.assert_frame_equal(result, expected)\n\n result = af.join(bf, how=\"right\")\n tm.assert_frame_equal(result, expected[::2])\n\n result = bf.join(af, how=\"right\")\n tm.assert_frame_equal(result, expected.loc[:, result.columns])\n\n\ndef test_join_index_series(float_frame):\n df = float_frame.copy()\n s = df.pop(float_frame.columns[-1])\n joined = df.join(s)\n\n # TODO should this check_names ?\n tm.assert_frame_equal(joined, float_frame, check_names=False)\n\n s.name = None\n with pytest.raises(ValueError, match=\"must have a name\"):\n df.join(s)\n\n\ndef test_join_overlap(float_frame):\n df1 = float_frame.loc[:, [\"A\", \"B\", \"C\"]]\n df2 = float_frame.loc[:, [\"B\", \"C\", \"D\"]]\n\n joined = df1.join(df2, lsuffix=\"_df1\", rsuffix=\"_df2\")\n df1_suf = df1.loc[:, [\"B\", \"C\"]].add_suffix(\"_df1\")\n df2_suf = df2.loc[:, [\"B\", \"C\"]].add_suffix(\"_df2\")\n\n no_overlap = float_frame.loc[:, [\"A\", \"D\"]]\n expected = df1_suf.join(df2_suf).join(no_overlap)\n\n # column order not necessarily sorted\n tm.assert_frame_equal(joined, expected.loc[:, joined.columns])\n\n\ndef test_join_period_index(frame_with_period_index):\n other = frame_with_period_index.rename(columns=lambda key: f\"{key}{key}\")\n\n joined_values = np.concatenate([frame_with_period_index.values] * 2, axis=1)\n\n joined_cols = frame_with_period_index.columns.append(other.columns)\n\n joined = frame_with_period_index.join(other)\n expected = DataFrame(\n data=joined_values, columns=joined_cols, index=frame_with_period_index.index\n )\n\n tm.assert_frame_equal(joined, expected)\n\n\ndef test_join_left_sequence_non_unique_index():\n # https://github.com/pandas-dev/pandas/issues/19607\n df1 = DataFrame({\"a\": [0, 10, 20]}, index=[1, 2, 3])\n df2 = DataFrame({\"b\": [100, 200, 300]}, index=[4, 3, 2])\n df3 = DataFrame({\"c\": [400, 500, 600]}, index=[2, 2, 4])\n\n joined = df1.join([df2, df3], how=\"left\")\n\n expected = DataFrame(\n {\n \"a\": [0, 10, 10, 20],\n \"b\": [np.nan, 300, 300, 200],\n \"c\": [np.nan, 400, 500, np.nan],\n },\n index=[1, 2, 2, 3],\n )\n\n tm.assert_frame_equal(joined, expected)\n\n\[email protected](\"sort_kw\", [True, False])\ndef test_suppress_future_warning_with_sort_kw(sort_kw):\n a = DataFrame({\"col1\": [1, 2]}, index=[\"c\", \"a\"])\n\n b = DataFrame({\"col2\": [4, 5]}, index=[\"b\", \"a\"])\n\n c = DataFrame({\"col3\": [7, 8]}, index=[\"a\", \"b\"])\n\n expected = DataFrame(\n {\n \"col1\": {\"a\": 2.0, \"b\": float(\"nan\"), \"c\": 1.0},\n \"col2\": {\"a\": 5.0, \"b\": 4.0, \"c\": float(\"nan\")},\n \"col3\": {\"a\": 7.0, \"b\": 8.0, \"c\": float(\"nan\")},\n }\n )\n if sort_kw is False:\n expected = expected.reindex(index=[\"c\", \"a\", \"b\"])\n\n with tm.assert_produces_warning(None):\n result = a.join([b, c], how=\"outer\", sort=sort_kw)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDataFrameJoin:\n def test_join(self, multiindex_dataframe_random_data):\n frame = multiindex_dataframe_random_data\n\n a = frame.loc[frame.index[:5], [\"A\"]]\n b = frame.loc[frame.index[2:], [\"B\", \"C\"]]\n\n joined = a.join(b, how=\"outer\").reindex(frame.index)\n expected = frame.copy().values\n expected[np.isnan(joined.values)] = np.nan\n expected = DataFrame(expected, index=frame.index, columns=frame.columns)\n\n assert not np.isnan(joined.values).all()\n\n # TODO what should join do with names ?\n tm.assert_frame_equal(joined, expected, check_names=False)\n\n def test_join_segfault(self):\n # GH#1532\n df1 = DataFrame({\"a\": [1, 1], \"b\": [1, 2], \"x\": [1, 2]})\n df2 = DataFrame({\"a\": [2, 2], \"b\": [1, 2], \"y\": [1, 2]})\n df1 = df1.set_index([\"a\", \"b\"])\n df2 = df2.set_index([\"a\", \"b\"])\n # it works!\n for how in [\"left\", \"right\", \"outer\"]:\n df1.join(df2, how=how)\n\n def test_join_str_datetime(self):\n str_dates = [\"20120209\", \"20120222\"]\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n A = DataFrame(str_dates, index=range(2), columns=[\"aa\"])\n C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)\n\n tst = A.join(C, on=\"aa\")\n\n assert len(tst.columns) == 3\n\n def test_join_multiindex_leftright(self):\n # GH 10741\n df1 = DataFrame(\n [\n [\"a\", \"x\", 0.471780],\n [\"a\", \"y\", 0.774908],\n [\"a\", \"z\", 0.563634],\n [\"b\", \"x\", -0.353756],\n [\"b\", \"y\", 0.368062],\n [\"b\", \"z\", -1.721840],\n [\"c\", \"x\", 1],\n [\"c\", \"y\", 2],\n [\"c\", \"z\", 3],\n ],\n columns=[\"first\", \"second\", \"value1\"],\n ).set_index([\"first\", \"second\"])\n\n df2 = DataFrame([[\"a\", 10], [\"b\", 20]], columns=[\"first\", \"value2\"]).set_index(\n [\"first\"]\n )\n\n exp = DataFrame(\n [\n [0.471780, 10],\n [0.774908, 10],\n [0.563634, 10],\n [-0.353756, 20],\n [0.368062, 20],\n [-1.721840, 20],\n [1.000000, np.nan],\n [2.000000, np.nan],\n [3.000000, np.nan],\n ],\n index=df1.index,\n columns=[\"value1\", \"value2\"],\n )\n\n # these must be the same results (but columns are flipped)\n tm.assert_frame_equal(df1.join(df2, how=\"left\"), exp)\n tm.assert_frame_equal(df2.join(df1, how=\"right\"), exp[[\"value2\", \"value1\"]])\n\n exp_idx = MultiIndex.from_product(\n [[\"a\", \"b\"], [\"x\", \"y\", \"z\"]], names=[\"first\", \"second\"]\n )\n exp = DataFrame(\n [\n [0.471780, 10],\n [0.774908, 10],\n [0.563634, 10],\n [-0.353756, 20],\n [0.368062, 20],\n [-1.721840, 20],\n ],\n index=exp_idx,\n columns=[\"value1\", \"value2\"],\n )\n\n tm.assert_frame_equal(df1.join(df2, how=\"right\"), exp)\n tm.assert_frame_equal(df2.join(df1, how=\"left\"), exp[[\"value2\", \"value1\"]])\n\n def test_merge_join_different_levels(self):\n # GH#9455\n\n # first dataframe\n df1 = DataFrame(columns=[\"a\", \"b\"], data=[[1, 11], [0, 22]])\n\n # second dataframe\n columns = MultiIndex.from_tuples([(\"a\", \"\"), (\"c\", \"c1\")])\n df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])\n\n # merge\n columns = [\"a\", \"b\", (\"c\", \"c1\")]\n expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])\n with tm.assert_produces_warning(UserWarning):\n result = pd.merge(df1, df2, on=\"a\")\n tm.assert_frame_equal(result, expected)\n\n # join, see discussion in GH#12219\n columns = [\"a\", \"b\", (\"a\", \"\"), (\"c\", \"c1\")]\n expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]])\n with tm.assert_produces_warning(UserWarning):\n result = df1.join(df2, on=\"a\")\n tm.assert_frame_equal(result, expected)\n\n def test_frame_join_tzaware(self):\n test1 = DataFrame(\n np.zeros((6, 3)),\n index=date_range(\n \"2012-11-15 00:00:00\", periods=6, freq=\"100L\", tz=\"US/Central\"\n ),\n )\n test2 = DataFrame(\n np.zeros((3, 3)),\n index=date_range(\n \"2012-11-15 00:00:00\", periods=3, freq=\"250L\", tz=\"US/Central\"\n ),\n columns=range(3, 6),\n )\n\n result = test1.join(test2, how=\"outer\")\n expected = test1.index.union(test2.index)\n\n tm.assert_index_equal(result.index, expected)\n assert result.index.tz.zone == \"US/Central\"\n",
"from __future__ import annotations\n\nimport operator\nfrom operator import (\n le,\n lt,\n)\nimport textwrap\nfrom typing import (\n Optional,\n Sequence,\n Type,\n TypeVar,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import NaT\nfrom pandas._libs.interval import (\n VALID_CLOSED,\n Interval,\n IntervalMixin,\n intervals_to_interval_bounds,\n)\nfrom pandas._libs.missing import NA\nfrom pandas._typing import (\n ArrayLike,\n Dtype,\n NpDtype,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.cast import maybe_convert_platform\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_float_dtype,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n is_timedelta64_dtype,\n needs_i8_conversion,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import IntervalDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCIntervalIndex,\n ABCPeriodIndex,\n)\nfrom pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n notna,\n)\n\nfrom pandas.core.algorithms import (\n isin,\n take,\n value_counts,\n)\nfrom pandas.core.arrays.base import (\n ExtensionArray,\n _extension_array_shared_docs,\n)\nfrom pandas.core.arrays.categorical import Categorical\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n array as pd_array,\n ensure_wrapped_if_datetimelike,\n extract_array,\n)\nfrom pandas.core.indexers import check_array_indexer\nfrom pandas.core.indexes.base import ensure_index\nfrom pandas.core.ops import (\n invalid_comparison,\n unpack_zerodim_and_defer,\n)\n\nIntervalArrayT = TypeVar(\"IntervalArrayT\", bound=\"IntervalArray\")\n\n_interval_shared_docs = {}\n\n_shared_docs_kwargs = {\n \"klass\": \"IntervalArray\",\n \"qualname\": \"arrays.IntervalArray\",\n \"name\": \"\",\n}\n\n\n_interval_shared_docs[\n \"class\"\n] = \"\"\"\n%(summary)s\n\n.. versionadded:: %(versionadded)s\n\nParameters\n----------\ndata : array-like (1-dimensional)\n Array-like containing Interval objects from which to build the\n %(klass)s.\nclosed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both or\n neither.\ndtype : dtype or None, default None\n If None, dtype will be inferred.\ncopy : bool, default False\n Copy the input data.\n%(name)s\\\nverify_integrity : bool, default True\n Verify that the %(klass)s is valid.\n\nAttributes\n----------\nleft\nright\nclosed\nmid\nlength\nis_empty\nis_non_overlapping_monotonic\n%(extra_attributes)s\\\n\nMethods\n-------\nfrom_arrays\nfrom_tuples\nfrom_breaks\ncontains\noverlaps\nset_closed\nto_tuples\n%(extra_methods)s\\\n\nSee Also\n--------\nIndex : The base pandas Index type.\nInterval : A bounded slice-like interval; the elements of an %(klass)s.\ninterval_range : Function to create a fixed frequency IntervalIndex.\ncut : Bin values into discrete Intervals.\nqcut : Bin values into equal-sized Intervals based on rank or sample quantiles.\n\nNotes\n-----\nSee the `user guide\n<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`__\nfor more.\n\n%(examples)s\\\n\"\"\"\n\n\n@Appender(\n _interval_shared_docs[\"class\"]\n % {\n \"klass\": \"IntervalArray\",\n \"summary\": \"Pandas array for interval data that are closed on the same side.\",\n \"versionadded\": \"0.24.0\",\n \"name\": \"\",\n \"extra_attributes\": \"\",\n \"extra_methods\": \"\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n A new ``IntervalArray`` can be constructed directly from an array-like of\n ``Interval`` objects:\n\n >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])\n <IntervalArray>\n [(0, 1], (1, 5]]\n Length: 2, dtype: interval[int64, right]\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalArray.from_arrays`,\n :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.\n \"\"\"\n ),\n }\n)\nclass IntervalArray(IntervalMixin, ExtensionArray):\n ndim = 1\n can_hold_na = True\n _na_value = _fill_value = np.nan\n\n # ---------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls: Type[IntervalArrayT],\n data,\n closed=None,\n dtype: Optional[Dtype] = None,\n copy: bool = False,\n verify_integrity: bool = True,\n ):\n\n data = extract_array(data, extract_numpy=True)\n\n if isinstance(data, cls):\n left = data._left\n right = data._right\n closed = closed or data.closed\n else:\n\n # don't allow scalars\n if is_scalar(data):\n msg = (\n f\"{cls.__name__}(...) must be called with a collection \"\n f\"of some kind, {data} was passed\"\n )\n raise TypeError(msg)\n\n # might need to convert empty or purely na data\n data = _maybe_convert_platform_interval(data)\n left, right, infer_closed = intervals_to_interval_bounds(\n data, validate_closed=closed is None\n )\n closed = closed or infer_closed\n\n return cls._simple_new(\n left,\n right,\n closed,\n copy=copy,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n\n @classmethod\n def _simple_new(\n cls: Type[IntervalArrayT],\n left,\n right,\n closed=None,\n copy: bool = False,\n dtype: Optional[Dtype] = None,\n verify_integrity: bool = True,\n ) -> IntervalArrayT:\n result = IntervalMixin.__new__(cls)\n\n if closed is None and isinstance(dtype, IntervalDtype):\n closed = dtype.closed\n\n closed = closed or \"right\"\n left = ensure_index(left, copy=copy)\n right = ensure_index(right, copy=copy)\n\n if dtype is not None:\n # GH 19262: dtype must be an IntervalDtype to override inferred\n dtype = pandas_dtype(dtype)\n if is_interval_dtype(dtype):\n dtype = cast(IntervalDtype, dtype)\n if dtype.subtype is not None:\n left = left.astype(dtype.subtype)\n right = right.astype(dtype.subtype)\n else:\n msg = f\"dtype must be an IntervalDtype, got {dtype}\"\n raise TypeError(msg)\n\n if dtype.closed is None:\n # possibly loading an old pickle\n dtype = IntervalDtype(dtype.subtype, closed)\n elif closed != dtype.closed:\n raise ValueError(\"closed keyword does not match dtype.closed\")\n\n # coerce dtypes to match if needed\n if is_float_dtype(left) and is_integer_dtype(right):\n right = right.astype(left.dtype)\n elif is_float_dtype(right) and is_integer_dtype(left):\n left = left.astype(right.dtype)\n\n if type(left) != type(right):\n msg = (\n f\"must not have differing left [{type(left).__name__}] and \"\n f\"right [{type(right).__name__}] types\"\n )\n raise ValueError(msg)\n elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):\n # GH 19016\n msg = (\n \"category, object, and string subtypes are not supported \"\n \"for IntervalArray\"\n )\n raise TypeError(msg)\n elif isinstance(left, ABCPeriodIndex):\n msg = \"Period dtypes are not supported, use a PeriodIndex instead\"\n raise ValueError(msg)\n elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):\n msg = (\n \"left and right must have the same time zone, got \"\n f\"'{left.tz}' and '{right.tz}'\"\n )\n raise ValueError(msg)\n\n # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray\n left = ensure_wrapped_if_datetimelike(left)\n left = extract_array(left, extract_numpy=True)\n right = ensure_wrapped_if_datetimelike(right)\n right = extract_array(right, extract_numpy=True)\n\n lbase = getattr(left, \"_ndarray\", left).base\n rbase = getattr(right, \"_ndarray\", right).base\n if lbase is not None and lbase is rbase:\n # If these share data, then setitem could corrupt our IA\n right = right.copy()\n\n dtype = IntervalDtype(left.dtype, closed=closed)\n result._dtype = dtype\n\n result._left = left\n result._right = right\n if verify_integrity:\n result._validate()\n return result\n\n @classmethod\n def _from_sequence(\n cls: Type[IntervalArrayT],\n scalars,\n *,\n dtype: Optional[Dtype] = None,\n copy: bool = False,\n ) -> IntervalArrayT:\n return cls(scalars, dtype=dtype, copy=copy)\n\n @classmethod\n def _from_factorized(\n cls: Type[IntervalArrayT], values: np.ndarray, original: IntervalArrayT\n ) -> IntervalArrayT:\n if len(values) == 0:\n # An empty array returns object-dtype here. We can't create\n # a new IA from an (empty) object-dtype array, so turn it into the\n # correct dtype.\n values = values.astype(original.dtype.subtype)\n return cls(values, closed=original.closed)\n\n _interval_shared_docs[\"from_breaks\"] = textwrap.dedent(\n \"\"\"\n Construct an %(klass)s from an array of splits.\n\n Parameters\n ----------\n breaks : array-like (1-dimensional)\n Left and right bounds for each interval.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n copy : bool, default False\n Copy the data.\n dtype : dtype or None, default None\n If None, dtype will be inferred.\n\n Returns\n -------\n %(klass)s\n\n See Also\n --------\n interval_range : Function to create a fixed frequency IntervalIndex.\n %(klass)s.from_arrays : Construct from a left and right array.\n %(klass)s.from_tuples : Construct from a sequence of tuples.\n\n %(examples)s\\\n \"\"\"\n )\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_breaks\"]\n % {\n \"klass\": \"IntervalArray\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])\n <IntervalArray>\n [(0, 1], (1, 2], (2, 3]]\n Length: 3, dtype: interval[int64, right]\n \"\"\"\n ),\n }\n )\n def from_breaks(\n cls: Type[IntervalArrayT],\n breaks,\n closed=\"right\",\n copy: bool = False,\n dtype: Optional[Dtype] = None,\n ) -> IntervalArrayT:\n breaks = _maybe_convert_platform_interval(breaks)\n\n return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)\n\n _interval_shared_docs[\"from_arrays\"] = textwrap.dedent(\n \"\"\"\n Construct from two arrays defining the left and right bounds.\n\n Parameters\n ----------\n left : array-like (1-dimensional)\n Left bounds for each interval.\n right : array-like (1-dimensional)\n Right bounds for each interval.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n copy : bool, default False\n Copy the data.\n dtype : dtype, optional\n If None, dtype will be inferred.\n\n Returns\n -------\n %(klass)s\n\n Raises\n ------\n ValueError\n When a value is missing in only one of `left` or `right`.\n When a value in `left` is greater than the corresponding value\n in `right`.\n\n See Also\n --------\n interval_range : Function to create a fixed frequency IntervalIndex.\n %(klass)s.from_breaks : Construct an %(klass)s from an array of\n splits.\n %(klass)s.from_tuples : Construct an %(klass)s from an\n array-like of tuples.\n\n Notes\n -----\n Each element of `left` must be less than or equal to the `right`\n element at the same position. If an element is missing, it must be\n missing in both `left` and `right`. A TypeError is raised when\n using an unsupported type for `left` or `right`. At the moment,\n 'category', 'object', and 'string' subtypes are not supported.\n\n %(examples)s\\\n \"\"\"\n )\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_arrays\"]\n % {\n \"klass\": \"IntervalArray\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])\n <IntervalArray>\n [(0, 1], (1, 2], (2, 3]]\n Length: 3, dtype: interval[int64, right]\n \"\"\"\n ),\n }\n )\n def from_arrays(\n cls: Type[IntervalArrayT],\n left,\n right,\n closed=\"right\",\n copy: bool = False,\n dtype: Optional[Dtype] = None,\n ) -> IntervalArrayT:\n left = _maybe_convert_platform_interval(left)\n right = _maybe_convert_platform_interval(right)\n\n return cls._simple_new(\n left, right, closed, copy=copy, dtype=dtype, verify_integrity=True\n )\n\n _interval_shared_docs[\"from_tuples\"] = textwrap.dedent(\n \"\"\"\n Construct an %(klass)s from an array-like of tuples.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n Array of tuples.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n copy : bool, default False\n By-default copy the data, this is compat only and ignored.\n dtype : dtype or None, default None\n If None, dtype will be inferred.\n\n Returns\n -------\n %(klass)s\n\n See Also\n --------\n interval_range : Function to create a fixed frequency IntervalIndex.\n %(klass)s.from_arrays : Construct an %(klass)s from a left and\n right array.\n %(klass)s.from_breaks : Construct an %(klass)s from an array of\n splits.\n\n %(examples)s\\\n \"\"\"\n )\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_tuples\"]\n % {\n \"klass\": \"IntervalArray\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])\n <IntervalArray>\n [(0, 1], (1, 2]]\n Length: 2, dtype: interval[int64, right]\n \"\"\"\n ),\n }\n )\n def from_tuples(\n cls: Type[IntervalArrayT],\n data,\n closed=\"right\",\n copy: bool = False,\n dtype: Optional[Dtype] = None,\n ) -> IntervalArrayT:\n if len(data):\n left, right = [], []\n else:\n # ensure that empty data keeps input dtype\n left = right = data\n\n for d in data:\n if isna(d):\n lhs = rhs = np.nan\n else:\n name = cls.__name__\n try:\n # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]\n lhs, rhs = d\n except ValueError as err:\n msg = f\"{name}.from_tuples requires tuples of length 2, got {d}\"\n raise ValueError(msg) from err\n except TypeError as err:\n msg = f\"{name}.from_tuples received an invalid item, {d}\"\n raise TypeError(msg) from err\n left.append(lhs)\n right.append(rhs)\n\n return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)\n\n def _validate(self):\n \"\"\"\n Verify that the IntervalArray is valid.\n\n Checks that\n\n * closed is valid\n * left and right match lengths\n * left and right have the same missing values\n * left is always below right\n \"\"\"\n if self.closed not in VALID_CLOSED:\n msg = f\"invalid option for 'closed': {self.closed}\"\n raise ValueError(msg)\n if len(self._left) != len(self._right):\n msg = \"left and right must have the same length\"\n raise ValueError(msg)\n left_mask = notna(self._left)\n right_mask = notna(self._right)\n if not (left_mask == right_mask).all():\n msg = (\n \"missing values must be missing in the same \"\n \"location both left and right sides\"\n )\n raise ValueError(msg)\n if not (self._left[left_mask] <= self._right[left_mask]).all():\n msg = \"left side of interval must be <= right side\"\n raise ValueError(msg)\n\n def _shallow_copy(self: IntervalArrayT, left, right) -> IntervalArrayT:\n \"\"\"\n Return a new IntervalArray with the replacement attributes\n\n Parameters\n ----------\n left : Index\n Values to be used for the left-side of the intervals.\n right : Index\n Values to be used for the right-side of the intervals.\n \"\"\"\n return self._simple_new(left, right, closed=self.closed, verify_integrity=False)\n\n # ---------------------------------------------------------------------\n # Descriptive\n\n @property\n def dtype(self) -> IntervalDtype:\n return self._dtype\n\n @property\n def nbytes(self) -> int:\n return self.left.nbytes + self.right.nbytes\n\n @property\n def size(self) -> int:\n # Avoid materializing self.values\n return self.left.size\n\n # ---------------------------------------------------------------------\n # EA Interface\n\n def __iter__(self):\n return iter(np.asarray(self))\n\n def __len__(self) -> int:\n return len(self._left)\n\n def __getitem__(self, key):\n key = check_array_indexer(self, key)\n left = self._left[key]\n right = self._right[key]\n\n if not isinstance(left, (np.ndarray, ExtensionArray)):\n # scalar\n if is_scalar(left) and isna(left):\n return self._fill_value\n return Interval(left, right, self.closed)\n # error: Argument 1 to \"ndim\" has incompatible type \"Union[ndarray,\n # ExtensionArray]\"; expected \"Union[Union[int, float, complex, str, bytes,\n # generic], Sequence[Union[int, float, complex, str, bytes, generic]],\n # Sequence[Sequence[Any]], _SupportsArray]\"\n if np.ndim(left) > 1: # type: ignore[arg-type]\n # GH#30588 multi-dimensional indexer disallowed\n raise ValueError(\"multi-dimensional indexing not allowed\")\n return self._shallow_copy(left, right)\n\n def __setitem__(self, key, value):\n value_left, value_right = self._validate_setitem_value(value)\n key = check_array_indexer(self, key)\n\n self._left[key] = value_left\n self._right[key] = value_right\n\n def _cmp_method(self, other, op):\n # ensure pandas array for list-like and eliminate non-interval scalars\n if is_list_like(other):\n if len(self) != len(other):\n raise ValueError(\"Lengths must match to compare\")\n other = pd_array(other)\n elif not isinstance(other, Interval):\n # non-interval scalar -> no matches\n return invalid_comparison(self, other, op)\n\n # determine the dtype of the elements we want to compare\n if isinstance(other, Interval):\n other_dtype = pandas_dtype(\"interval\")\n elif not is_categorical_dtype(other.dtype):\n other_dtype = other.dtype\n else:\n # for categorical defer to categories for dtype\n other_dtype = other.categories.dtype\n\n # extract intervals if we have interval categories with matching closed\n if is_interval_dtype(other_dtype):\n if self.closed != other.categories.closed:\n return invalid_comparison(self, other, op)\n\n other = other.categories.take(\n other.codes, allow_fill=True, fill_value=other.categories._na_value\n )\n\n # interval-like -> need same closed and matching endpoints\n if is_interval_dtype(other_dtype):\n if self.closed != other.closed:\n return invalid_comparison(self, other, op)\n elif not isinstance(other, Interval):\n other = type(self)(other)\n\n if op is operator.eq:\n return (self._left == other.left) & (self._right == other.right)\n elif op is operator.ne:\n return (self._left != other.left) | (self._right != other.right)\n elif op is operator.gt:\n return (self._left > other.left) | (\n (self._left == other.left) & (self._right > other.right)\n )\n elif op is operator.ge:\n return (self == other) | (self > other)\n elif op is operator.lt:\n return (self._left < other.left) | (\n (self._left == other.left) & (self._right < other.right)\n )\n else:\n # operator.lt\n return (self == other) | (self < other)\n\n # non-interval/non-object dtype -> no matches\n if not is_object_dtype(other_dtype):\n return invalid_comparison(self, other, op)\n\n # object dtype -> iteratively check for intervals\n result = np.zeros(len(self), dtype=bool)\n for i, obj in enumerate(other):\n try:\n result[i] = op(self[i], obj)\n except TypeError:\n if obj is NA:\n # comparison with np.nan returns NA\n # github.com/pandas-dev/pandas/pull/37124#discussion_r509095092\n result[i] = op is operator.ne\n else:\n raise\n return result\n\n @unpack_zerodim_and_defer(\"__eq__\")\n def __eq__(self, other):\n return self._cmp_method(other, operator.eq)\n\n @unpack_zerodim_and_defer(\"__ne__\")\n def __ne__(self, other):\n return self._cmp_method(other, operator.ne)\n\n @unpack_zerodim_and_defer(\"__gt__\")\n def __gt__(self, other):\n return self._cmp_method(other, operator.gt)\n\n @unpack_zerodim_and_defer(\"__ge__\")\n def __ge__(self, other):\n return self._cmp_method(other, operator.ge)\n\n @unpack_zerodim_and_defer(\"__lt__\")\n def __lt__(self, other):\n return self._cmp_method(other, operator.lt)\n\n @unpack_zerodim_and_defer(\"__le__\")\n def __le__(self, other):\n return self._cmp_method(other, operator.le)\n\n def argsort(\n self,\n ascending: bool = True,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n *args,\n **kwargs,\n ) -> np.ndarray:\n ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)\n\n if ascending and kind == \"quicksort\" and na_position == \"last\":\n return np.lexsort((self.right, self.left))\n\n # TODO: other cases we can use lexsort for? much more performant.\n return super().argsort(\n ascending=ascending, kind=kind, na_position=na_position, **kwargs\n )\n\n def fillna(\n self: IntervalArrayT, value=None, method=None, limit=None\n ) -> IntervalArrayT:\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, a Series or dict can be used to fill in different\n values for each index. The value should not be a list. The\n value(s) passed should be either Interval objects or NA/NaN.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n (Not implemented yet for IntervalArray)\n Method to use for filling holes in reindexed Series\n limit : int, default None\n (Not implemented yet for IntervalArray)\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : IntervalArray with NA/NaN filled\n \"\"\"\n if method is not None:\n raise TypeError(\"Filling by method is not supported for IntervalArray.\")\n if limit is not None:\n raise TypeError(\"limit is not supported for IntervalArray.\")\n\n value_left, value_right = self._validate_fill_value(value)\n\n left = self.left.fillna(value=value_left)\n right = self.right.fillna(value=value_right)\n return self._shallow_copy(left, right)\n\n def astype(self, dtype, copy: bool = True):\n \"\"\"\n Cast to an ExtensionArray or NumPy array with dtype 'dtype'.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n\n copy : bool, default True\n Whether to copy the data, even if not necessary. If False,\n a copy is made only if the old dtype does not match the\n new dtype.\n\n Returns\n -------\n array : ExtensionArray or ndarray\n ExtensionArray or NumPy ndarray with 'dtype' for its dtype.\n \"\"\"\n from pandas import Index\n from pandas.core.arrays.string_ import StringDtype\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n if is_interval_dtype(dtype):\n if dtype == self.dtype:\n return self.copy() if copy else self\n\n # need to cast to different subtype\n try:\n # We need to use Index rules for astype to prevent casting\n # np.nan entries to int subtypes\n new_left = Index(self._left, copy=False).astype(dtype.subtype)\n new_right = Index(self._right, copy=False).astype(dtype.subtype)\n except TypeError as err:\n msg = (\n f\"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible\"\n )\n raise TypeError(msg) from err\n return self._shallow_copy(new_left, new_right)\n elif is_categorical_dtype(dtype):\n return Categorical(np.asarray(self), dtype=dtype)\n elif isinstance(dtype, StringDtype):\n return dtype.construct_array_type()._from_sequence(self, copy=False)\n\n # TODO: This try/except will be repeated.\n try:\n return np.asarray(self).astype(dtype, copy=copy)\n except (TypeError, ValueError) as err:\n msg = f\"Cannot cast {type(self).__name__} to dtype {dtype}\"\n raise TypeError(msg) from err\n\n def equals(self, other) -> bool:\n if type(self) != type(other):\n return False\n\n return bool(\n self.closed == other.closed\n and self.left.equals(other.left)\n and self.right.equals(other.right)\n )\n\n @classmethod\n def _concat_same_type(\n cls: Type[IntervalArrayT], to_concat: Sequence[IntervalArrayT]\n ) -> IntervalArrayT:\n \"\"\"\n Concatenate multiple IntervalArray\n\n Parameters\n ----------\n to_concat : sequence of IntervalArray\n\n Returns\n -------\n IntervalArray\n \"\"\"\n closed = {interval.closed for interval in to_concat}\n if len(closed) != 1:\n raise ValueError(\"Intervals must all be closed on the same side.\")\n closed = closed.pop()\n\n left = np.concatenate([interval.left for interval in to_concat])\n right = np.concatenate([interval.right for interval in to_concat])\n return cls._simple_new(left, right, closed=closed, copy=False)\n\n def copy(self: IntervalArrayT) -> IntervalArrayT:\n \"\"\"\n Return a copy of the array.\n\n Returns\n -------\n IntervalArray\n \"\"\"\n left = self._left.copy()\n right = self._right.copy()\n closed = self.closed\n # TODO: Could skip verify_integrity here.\n return type(self).from_arrays(left, right, closed=closed)\n\n def isna(self) -> np.ndarray:\n return isna(self._left)\n\n def shift(\n self: IntervalArrayT, periods: int = 1, fill_value: object = None\n ) -> IntervalArray:\n if not len(self) or periods == 0:\n return self.copy()\n\n if isna(fill_value):\n fill_value = self.dtype.na_value\n\n # ExtensionArray.shift doesn't work for two reasons\n # 1. IntervalArray.dtype.na_value may not be correct for the dtype.\n # 2. IntervalArray._from_sequence only accepts NaN for missing values,\n # not other values like NaT\n\n empty_len = min(abs(periods), len(self))\n if isna(fill_value):\n from pandas import Index\n\n fill_value = Index(self._left, copy=False)._na_value\n empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))\n else:\n empty = self._from_sequence([fill_value] * empty_len)\n\n if periods > 0:\n a = empty\n b = self[:-periods]\n else:\n a = self[abs(periods) :]\n b = empty\n return self._concat_same_type([a, b])\n\n def take(\n self: IntervalArrayT,\n indices,\n *,\n allow_fill: bool = False,\n fill_value=None,\n axis=None,\n **kwargs,\n ) -> IntervalArrayT:\n \"\"\"\n Take elements from the IntervalArray.\n\n Parameters\n ----------\n indices : sequence of integers\n Indices to be taken.\n\n allow_fill : bool, default False\n How to handle negative values in `indices`.\n\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n fill_value : Interval or NA, optional\n Fill value to use for NA-indices when `allow_fill` is True.\n This may be ``None``, in which case the default NA value for\n the type, ``self.dtype.na_value``, is used.\n\n For many ExtensionArrays, there will be two representations of\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\n physical NA value. `fill_value` should be the user-facing version,\n and the implementation should handle translating that to the\n physical version for processing the take if necessary.\n\n axis : any, default None\n Present for compat with IntervalIndex; does nothing.\n\n Returns\n -------\n IntervalArray\n\n Raises\n ------\n IndexError\n When the indices are out of bounds for the array.\n ValueError\n When `indices` contains negative values other than ``-1``\n and `allow_fill` is True.\n \"\"\"\n nv.validate_take((), kwargs)\n\n fill_left = fill_right = fill_value\n if allow_fill:\n fill_left, fill_right = self._validate_fill_value(fill_value)\n\n left_take = take(\n self._left, indices, allow_fill=allow_fill, fill_value=fill_left\n )\n right_take = take(\n self._right, indices, allow_fill=allow_fill, fill_value=fill_right\n )\n\n return self._shallow_copy(left_take, right_take)\n\n def _validate_listlike(self, value):\n # list-like of intervals\n try:\n array = IntervalArray(value)\n self._check_closed_matches(array, name=\"value\")\n value_left, value_right = array.left, array.right\n except TypeError as err:\n # wrong type: not interval or NA\n msg = f\"'value' should be an interval type, got {type(value)} instead.\"\n raise TypeError(msg) from err\n\n try:\n self.left._validate_fill_value(value_left)\n except (ValueError, TypeError) as err:\n msg = (\n \"'value' should be a compatible interval type, \"\n f\"got {type(value)} instead.\"\n )\n raise TypeError(msg) from err\n\n return value_left, value_right\n\n def _validate_scalar(self, value):\n if isinstance(value, Interval):\n self._check_closed_matches(value, name=\"value\")\n left, right = value.left, value.right\n elif is_valid_na_for_dtype(value, self.left.dtype):\n # GH#18295\n left = right = value\n else:\n raise TypeError(\n \"can only insert Interval objects and NA into an IntervalArray\"\n )\n return left, right\n\n def _validate_fill_value(self, value):\n return self._validate_scalar(value)\n\n def _validate_setitem_value(self, value):\n needs_float_conversion = False\n\n if is_valid_na_for_dtype(value, self.left.dtype):\n # na value: need special casing to set directly on numpy arrays\n if is_integer_dtype(self.dtype.subtype):\n # can't set NaN on a numpy integer array\n needs_float_conversion = True\n elif is_datetime64_dtype(self.dtype.subtype):\n # need proper NaT to set directly on the numpy array\n value = np.datetime64(\"NaT\")\n elif is_datetime64tz_dtype(self.dtype.subtype):\n # need proper NaT to set directly on the DatetimeArray array\n value = NaT\n elif is_timedelta64_dtype(self.dtype.subtype):\n # need proper NaT to set directly on the numpy array\n value = np.timedelta64(\"NaT\")\n value_left, value_right = value, value\n\n elif isinstance(value, Interval):\n # scalar interval\n self._check_closed_matches(value, name=\"value\")\n value_left, value_right = value.left, value.right\n self.left._validate_fill_value(value_left)\n self.left._validate_fill_value(value_right)\n\n else:\n return self._validate_listlike(value)\n\n if needs_float_conversion:\n raise ValueError(\"Cannot set float NaN to integer-backed IntervalArray\")\n return value_left, value_right\n\n def value_counts(self, dropna: bool = True):\n \"\"\"\n Returns a Series containing counts of each interval.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.value_counts\n \"\"\"\n # TODO: implement this is a non-naive way!\n return value_counts(np.asarray(self), dropna=dropna)\n\n # ---------------------------------------------------------------------\n # Rendering Methods\n\n def _format_data(self) -> str:\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\"display.max_seq_items\") or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = \"[]\"\n elif n == 1:\n first = formatter(self[0])\n summary = f\"[{first}]\"\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = f\"[{first}, {last}]\"\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n head_str = \", \".join(head)\n tail_str = \", \".join(tail)\n summary = f\"[{head_str} ... {tail_str}]\"\n else:\n tail = [formatter(x) for x in self]\n tail_str = \", \".join(tail)\n summary = f\"[{tail_str}]\"\n\n return summary\n\n def __repr__(self) -> str:\n # the short repr has no trailing newline, while the truncated\n # repr does. So we include a newline in our template, and strip\n # any trailing newlines from format_object_summary\n data = self._format_data()\n class_name = f\"<{type(self).__name__}>\\n\"\n\n template = f\"{class_name}{data}\\nLength: {len(self)}, dtype: {self.dtype}\"\n return template\n\n def _format_space(self) -> str:\n space = \" \" * (len(type(self).__name__) + 1)\n return f\"\\n{space}\"\n\n # ---------------------------------------------------------------------\n # Vectorized Interval Properties/Attributes\n\n @property\n def left(self):\n \"\"\"\n Return the left endpoints of each Interval in the IntervalArray as\n an Index.\n \"\"\"\n from pandas import Index\n\n return Index(self._left, copy=False)\n\n @property\n def right(self):\n \"\"\"\n Return the right endpoints of each Interval in the IntervalArray as\n an Index.\n \"\"\"\n from pandas import Index\n\n return Index(self._right, copy=False)\n\n @property\n def length(self):\n \"\"\"\n Return an Index with entries denoting the length of each Interval in\n the IntervalArray.\n \"\"\"\n try:\n return self.right - self.left\n except TypeError as err:\n # length not defined for some types, e.g. string\n msg = (\n \"IntervalArray contains Intervals without defined length, \"\n \"e.g. Intervals with string endpoints\"\n )\n raise TypeError(msg) from err\n\n @property\n def mid(self):\n \"\"\"\n Return the midpoint of each Interval in the IntervalArray as an Index.\n \"\"\"\n try:\n return 0.5 * (self.left + self.right)\n except TypeError:\n # datetime safe version\n return self.left + 0.5 * self.length\n\n _interval_shared_docs[\"overlaps\"] = textwrap.dedent(\n \"\"\"\n Check elementwise if an Interval overlaps the values in the %(klass)s.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n other : %(klass)s\n Interval to check against for an overlap.\n\n Returns\n -------\n ndarray\n Boolean array positionally indicating where an overlap occurs.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n\n Examples\n --------\n %(examples)s\n >>> intervals.overlaps(pd.Interval(0.5, 1.5))\n array([ True, True, False])\n\n Intervals that share closed endpoints overlap:\n\n >>> intervals.overlaps(pd.Interval(1, 3, closed='left'))\n array([ True, True, True])\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> intervals.overlaps(pd.Interval(1, 2, closed='right'))\n array([False, True, False])\n \"\"\"\n )\n\n @Appender(\n _interval_shared_docs[\"overlaps\"]\n % {\n \"klass\": \"IntervalArray\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n >>> data = [(0, 1), (1, 3), (2, 4)]\n >>> intervals = pd.arrays.IntervalArray.from_tuples(data)\n >>> intervals\n <IntervalArray>\n [(0, 1], (1, 3], (2, 4]]\n Length: 3, dtype: interval[int64, right]\n \"\"\"\n ),\n }\n )\n def overlaps(self, other):\n if isinstance(other, (IntervalArray, ABCIntervalIndex)):\n raise NotImplementedError\n elif not isinstance(other, Interval):\n msg = f\"`other` must be Interval-like, got {type(other).__name__}\"\n raise TypeError(msg)\n\n # equality is okay if both endpoints are closed (overlap at a point)\n op1 = le if (self.closed_left and other.closed_right) else lt\n op2 = le if (other.closed_left and self.closed_right) else lt\n\n # overlaps is equivalent negation of two interval being disjoint:\n # disjoint = (A.left > B.right) or (B.left > A.right)\n # (simplifying the negation allows this to be done in less operations)\n return op1(self.left, other.right) & op2(other.left, self.right)\n\n # ---------------------------------------------------------------------\n\n @property\n def closed(self):\n \"\"\"\n Whether the intervals are closed on the left-side, right-side, both or\n neither.\n \"\"\"\n return self.dtype.closed\n\n _interval_shared_docs[\"set_closed\"] = textwrap.dedent(\n \"\"\"\n Return an %(klass)s identical to the current one, but closed on the\n specified side.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n closed : {'left', 'right', 'both', 'neither'}\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n new_index : %(klass)s\n\n %(examples)s\\\n \"\"\"\n )\n\n @Appender(\n _interval_shared_docs[\"set_closed\"]\n % {\n \"klass\": \"IntervalArray\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> index = pd.arrays.IntervalArray.from_breaks(range(4))\n >>> index\n <IntervalArray>\n [(0, 1], (1, 2], (2, 3]]\n Length: 3, dtype: interval[int64, right]\n >>> index.set_closed('both')\n <IntervalArray>\n [[0, 1], [1, 2], [2, 3]]\n Length: 3, dtype: interval[int64, both]\n \"\"\"\n ),\n }\n )\n def set_closed(self: IntervalArrayT, closed) -> IntervalArrayT:\n if closed not in VALID_CLOSED:\n msg = f\"invalid option for 'closed': {closed}\"\n raise ValueError(msg)\n\n return type(self)._simple_new(\n left=self._left, right=self._right, closed=closed, verify_integrity=False\n )\n\n _interval_shared_docs[\n \"is_non_overlapping_monotonic\"\n ] = \"\"\"\n Return True if the %(klass)s is non-overlapping (no Intervals share\n points) and is either monotonic increasing or monotonic decreasing,\n else False.\n \"\"\"\n\n # https://github.com/python/mypy/issues/1362\n # Mypy does not support decorated properties\n @property # type: ignore[misc]\n @Appender(\n _interval_shared_docs[\"is_non_overlapping_monotonic\"] % _shared_docs_kwargs\n )\n def is_non_overlapping_monotonic(self) -> bool:\n # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )\n # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)\n # we already require left <= right\n\n # strict inequality for closed == 'both'; equality implies overlapping\n # at a point when both sides of intervals are included\n if self.closed == \"both\":\n return bool(\n (self._right[:-1] < self._left[1:]).all()\n or (self._left[:-1] > self._right[1:]).all()\n )\n\n # non-strict inequality when closed != 'both'; at least one side is\n # not included in the intervals, so equality does not imply overlapping\n return bool(\n (self._right[:-1] <= self._left[1:]).all()\n or (self._left[:-1] >= self._right[1:]).all()\n )\n\n # ---------------------------------------------------------------------\n # Conversion\n\n def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:\n \"\"\"\n Return the IntervalArray's data as a numpy array of Interval\n objects (with dtype='object')\n \"\"\"\n left = self._left\n right = self._right\n mask = self.isna()\n closed = self.closed\n\n result = np.empty(len(left), dtype=object)\n for i in range(len(left)):\n if mask[i]:\n result[i] = np.nan\n else:\n result[i] = Interval(left[i], right[i], closed)\n return result\n\n def __arrow_array__(self, type=None):\n \"\"\"\n Convert myself into a pyarrow Array.\n \"\"\"\n import pyarrow\n\n from pandas.core.arrays._arrow_utils import ArrowIntervalType\n\n try:\n subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)\n except TypeError as err:\n raise TypeError(\n f\"Conversion to arrow with subtype '{self.dtype.subtype}' \"\n \"is not supported\"\n ) from err\n interval_type = ArrowIntervalType(subtype, self.closed)\n storage_array = pyarrow.StructArray.from_arrays(\n [\n pyarrow.array(self._left, type=subtype, from_pandas=True),\n pyarrow.array(self._right, type=subtype, from_pandas=True),\n ],\n names=[\"left\", \"right\"],\n )\n mask = self.isna()\n if mask.any():\n # if there are missing values, set validity bitmap also on the array level\n null_bitmap = pyarrow.array(~mask).buffers()[1]\n storage_array = pyarrow.StructArray.from_buffers(\n storage_array.type,\n len(storage_array),\n [null_bitmap],\n children=[storage_array.field(0), storage_array.field(1)],\n )\n\n if type is not None:\n if type.equals(interval_type.storage_type):\n return storage_array\n elif isinstance(type, ArrowIntervalType):\n # ensure we have the same subtype and closed attributes\n if not type.equals(interval_type):\n raise TypeError(\n \"Not supported to convert IntervalArray to type with \"\n f\"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) \"\n f\"and 'closed' ({self.closed} vs {type.closed}) attributes\"\n )\n else:\n raise TypeError(\n f\"Not supported to convert IntervalArray to '{type}' type\"\n )\n\n return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)\n\n _interval_shared_docs[\n \"to_tuples\"\n ] = \"\"\"\n Return an %(return_type)s of tuples of the form (left, right).\n\n Parameters\n ----------\n na_tuple : bool, default True\n Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA\n value itself if False, ``nan``.\n\n Returns\n -------\n tuples: %(return_type)s\n %(examples)s\\\n \"\"\"\n\n @Appender(\n _interval_shared_docs[\"to_tuples\"] % {\"return_type\": \"ndarray\", \"examples\": \"\"}\n )\n def to_tuples(self, na_tuple=True) -> np.ndarray:\n tuples = com.asarray_tuplesafe(zip(self._left, self._right))\n if not na_tuple:\n # GH 18756\n tuples = np.where(~self.isna(), tuples, np.nan)\n return tuples\n\n # ---------------------------------------------------------------------\n\n def putmask(self, mask: np.ndarray, value) -> None:\n value_left, value_right = self._validate_setitem_value(value)\n\n if isinstance(self._left, np.ndarray):\n np.putmask(self._left, mask, value_left)\n np.putmask(self._right, mask, value_right)\n else:\n self._left.putmask(mask, value_left)\n self._right.putmask(mask, value_right)\n\n def insert(self: IntervalArrayT, loc: int, item: Interval) -> IntervalArrayT:\n \"\"\"\n Return a new IntervalArray inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : Interval\n\n Returns\n -------\n IntervalArray\n \"\"\"\n left_insert, right_insert = self._validate_scalar(item)\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n\n return self._shallow_copy(new_left, new_right)\n\n def delete(self: IntervalArrayT, loc) -> IntervalArrayT:\n if isinstance(self._left, np.ndarray):\n new_left = np.delete(self._left, loc)\n new_right = np.delete(self._right, loc)\n else:\n new_left = self._left.delete(loc)\n new_right = self._right.delete(loc)\n return self._shallow_copy(left=new_left, right=new_right)\n\n @Appender(_extension_array_shared_docs[\"repeat\"] % _shared_docs_kwargs)\n def repeat(self: IntervalArrayT, repeats: int, axis=None) -> IntervalArrayT:\n nv.validate_repeat((), {\"axis\": axis})\n left_repeat = self.left.repeat(repeats)\n right_repeat = self.right.repeat(repeats)\n return self._shallow_copy(left=left_repeat, right=right_repeat)\n\n _interval_shared_docs[\"contains\"] = textwrap.dedent(\n \"\"\"\n Check elementwise if the Intervals contain the value.\n\n Return a boolean mask whether the value is contained in the Intervals\n of the %(klass)s.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n other : scalar\n The value to check whether it is contained in the Intervals.\n\n Returns\n -------\n boolean array\n\n See Also\n --------\n Interval.contains : Check whether Interval object contains value.\n %(klass)s.overlaps : Check if an Interval overlaps the values in the\n %(klass)s.\n\n Examples\n --------\n %(examples)s\n >>> intervals.contains(0.5)\n array([ True, False, False])\n \"\"\"\n )\n\n @Appender(\n _interval_shared_docs[\"contains\"]\n % {\n \"klass\": \"IntervalArray\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])\n >>> intervals\n <IntervalArray>\n [(0, 1], (1, 3], (2, 4]]\n Length: 3, dtype: interval[int64, right]\n \"\"\"\n ),\n }\n )\n def contains(self, other):\n if isinstance(other, Interval):\n raise NotImplementedError(\"contains not implemented for two intervals\")\n\n return (self._left < other if self.open_left else self._left <= other) & (\n other < self._right if self.open_right else other <= self._right\n )\n\n def isin(self, values) -> np.ndarray:\n if not hasattr(values, \"dtype\"):\n values = np.array(values)\n values = extract_array(values, extract_numpy=True)\n\n if is_interval_dtype(values.dtype):\n if self.closed != values.closed:\n # not comparable -> no overlap\n return np.zeros(self.shape, dtype=bool)\n\n if is_dtype_equal(self.dtype, values.dtype):\n # GH#38353 instead of casting to object, operating on a\n # complex128 ndarray is much more performant.\n left = self._combined.view(\"complex128\")\n right = values._combined.view(\"complex128\")\n return np.in1d(left, right)\n\n elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(\n values.left.dtype\n ):\n # not comparable -> no overlap\n return np.zeros(self.shape, dtype=bool)\n\n return isin(self.astype(object), values.astype(object))\n\n @property\n def _combined(self) -> ArrayLike:\n left = self.left._values.reshape(-1, 1)\n right = self.right._values.reshape(-1, 1)\n if needs_i8_conversion(left.dtype):\n comb = left._concat_same_type([left, right], axis=1)\n else:\n comb = np.concatenate([left, right], axis=1)\n return comb\n\n\ndef _maybe_convert_platform_interval(values) -> ArrayLike:\n \"\"\"\n Try to do platform conversion, with special casing for IntervalArray.\n Wrapper around maybe_convert_platform that alters the default return\n dtype in certain cases to be compatible with IntervalArray. For example,\n empty lists return with integer dtype instead of object dtype, which is\n prohibited for IntervalArray.\n\n Parameters\n ----------\n values : array-like\n\n Returns\n -------\n array\n \"\"\"\n if isinstance(values, (list, tuple)) and len(values) == 0:\n # GH 19016\n # empty lists/tuples get object dtype by default, but this is\n # prohibited for IntervalArray, so coerce to integer instead\n return np.array([], dtype=np.int64)\n elif not is_list_like(values) or isinstance(values, ABCDataFrame):\n # This will raise later, but we avoid passing to maybe_convert_platform\n return values\n elif is_categorical_dtype(values):\n values = np.asarray(values)\n elif not hasattr(values, \"dtype\") and not isinstance(values, (list, tuple, range)):\n # TODO: should we just cast these to list?\n return values\n else:\n values = extract_array(values, extract_numpy=True)\n\n return maybe_convert_platform(values)\n"
] |
[
[
"pandas._testing.assert_almost_equal",
"pandas.Series",
"numpy.arange",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.rand",
"pandas.MultiIndex.from_product",
"pandas.date_range"
],
[
"pandas._libs.tslibs.tzconversion.tz_convert",
"pandas._libs.tslibs.tzconversion.tz_localize_to_utc",
"numpy.random.randint"
],
[
"pandas._testing.assert_numpy_array_equal",
"pandas.PeriodIndex",
"pandas.Series",
"pandas.offsets.Day",
"numpy.asarray",
"pandas.period_range",
"pandas.offsets.BusinessDay",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas._testing.round_trip_pickle",
"pandas.Period",
"pandas.date_range",
"pandas.offsets.MonthEnd",
"pandas._testing.assert_series_equal",
"numpy.array",
"pandas._testing.makePeriodIndex",
"pandas._testing.assert_index_equal"
],
[
"pandas.PeriodIndex",
"pandas.period_range",
"numpy.random.choice",
"numpy.arange",
"pandas._testing.makeStringIndex",
"pandas.Categorical",
"pandas.Timestamp",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.option_context",
"pandas._testing.rands_array",
"numpy.random.randn",
"pandas.date_range",
"pandas._testing.reset_display_options",
"numpy.zeros",
"pandas.io.formats.format.set_option"
],
[
"numpy.array",
"numpy.datetime64"
],
[
"numpy.asarray",
"pandas.core.missing.clean_fill_method",
"pandas.core.dtypes.common.is_bool"
],
[
"pandas._testing.assert_produces_warning",
"pandas.merge",
"pandas.period_range",
"numpy.isnan",
"numpy.arange",
"pandas.Index",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"numpy.concatenate",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"pandas._testing.assert_frame_equal",
"numpy.zeros",
"pandas._testing.assert_index_equal"
],
[
"pandas.core.ops.unpack_zerodim_and_defer",
"numpy.asarray",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.interval.intervals_to_interval_bounds",
"numpy.in1d",
"pandas.core.dtypes.dtypes.IntervalDtype",
"pandas.core.dtypes.missing.notna",
"numpy.concatenate",
"pandas._libs.interval.Interval",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._config.get_option",
"pandas.core.arrays._arrow_utils.ArrowIntervalType",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.indexers.check_array_indexer",
"pandas.compat.numpy.function.validate_take",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.construction.array",
"pandas.core.dtypes.cast.maybe_convert_platform",
"pandas.Index",
"numpy.lexsort",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_string_dtype",
"numpy.zeros",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.compat.numpy.function.validate_repeat",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"numpy.putmask",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.indexes.base.ensure_index",
"pandas.compat.numpy.function.validate_argsort_with_ascending",
"numpy.ndim",
"numpy.timedelta64",
"numpy.delete",
"pandas.core.construction.ensure_wrapped_if_datetimelike",
"numpy.array",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.ops.invalid_comparison",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"pandas.core.dtypes.common.is_scalar",
"numpy.datetime64",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.algorithms.take",
"pandas.core.construction.extract_array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.0",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
district10/snippet-manager
|
[
"bebe45a601368947168e3ee6e6ab8c1fc2ee2055"
] |
[
"snippets/point-direction.py"
] |
[
"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D\n\ndef draw_points(points):\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # 绘制虚拟的三维 bounding box,保证三轴成比例\n minpt = np.min(points[:, :3], axis=0)\n maxpt = np.max(points[:, :3], axis=0)\n max_range = np.array([maxpt[0] - minpt[0], maxpt[1] - minpt[1], maxpt[2] - minpt[2]]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (maxpt[0] + minpt[0])\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (maxpt[1] + minpt[1])\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (maxpt[2] + minpt[2])\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='y')\n ax.quiver(points[:, 0], points[:, 1], points[:, 2],\n points[:, 3], points[:, 4], points[:, 5], length=0.05, normalize=True)\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
techieashish/PlasmaPy
|
[
"b1e4ea269e59011dcafd5bf3f658b43e683af645"
] |
[
"plasmapy/formulary/braginskii.py"
] |
[
"\"\"\"Functions to calculate classical transport coefficients.\n\n.. topic:: Examples:\n\n * :ref:`sphx_glr_auto_examples_plot_braginskii.py`\n\nIntroduction\n============\n\nClassical transport theory is derived by using kinetic theory to close the\nplasma two-fluid (electron and ion fluid) equations in the collisional limit.\nThe first complete model in this form was done by S. I. Braginskii [1]_.\n\nThis module uses fitting functions from literature ([1]_, [2]_, [3]_, [4]_,\n[5]_ and the next section) to calculate the transport coefficients, which are\nthe resistivity, thermoelectric conductivity, thermal conductivity, and\nviscosity.\n\nKeep in mind the following assumptions under which the transport equations\nare derived:\n\n1. The plasma is fully ionized, only consisting of ions and electrons.\n Neutral atoms are neglected.\n2. Turbulent transport does not dominate\n3. The velocity distribution is close to Maxwellian. This implies:\n i. Collisional mean free path << gradient scale length along field\n ii. Gyroradius << gradient scale length perpendicular to field\n4. The plasma is highly collisional: collisional frequency >> gyrofrequency\n\nWhen classical transport is not valid, e.g. due to the presence of strong\ngradients or turbulent transport, the transport is significantly increased\nby these other effects. Thus classical transport often serves as a lower\nbound on the losses / transport encountered in a plasma.\n\nTransport Variables\n===================\nFor documentation on the individual transport variables, please take\nthe following links to documentation of methods of `ClassicalTransport`.\n\n* `Resistivity <ClassicalTransport.resistivity>`_\n* `Thermoelectric conductivity <ClassicalTransport.thermoelectric_conductivity>`_\n* `Ion thermal conductivity <ClassicalTransport.ion_thermal_conductivity>`_\n* `Electron thermal conductivity <ClassicalTransport.electron_thermal_conductivity>`_\n* `Ion viscosity <ClassicalTransport.ion_viscosity>`_\n* `Electron viscosity <ClassicalTransport.electron_viscosity>`_\n\n\nUsing the module\n================\nGiven that many of the transport variables share a lot of the same computation\nand many are often needed to be calculated simultaneously, this module provides\na `ClassicalTransport` class that can be initialized once with all of the\nvariables necessary for calculation. It then provides all of the functionality\nas methods (please refer to its documentation).\n\nIf you only wish to calculate a single transport variable (or if just don't\nlike object oriented interfaces), we have also provided wrapper functions in\nthe main module namespace that use `ClassicalTransport` under the hood (see below,\nin the Functions section).\n\n.. warning::\n\n Please note that as PlasmaPy is a very new package, this API should not be\n considered stable yet.\n\nClassical transport models\n==========================\nIn this section, we present a broad overview of classical transport models\nimplemented within this module.\n\nBraginskii [1]_\n---------------\n\nThe original Braginskii treatment as presented in the highly cited review\npaper from 1965. Coefficients are found from expansion of the kinetic\nequation in Laguerre polynomials, truncated at the second term in their\nseries expansion (k = 2). This theory allows for arbitrary Hall parameter\nand include results for Z = 1, 2, 3, 4, and infinity (the case of Lorentz\ngas completely stripped of electrons, and the stationary ion approximation).\n\nSpitzer-Harm [2]_ [3]_\n----------------------\n\nThese coefficients were obtained from a numerical solution of the Fokker-\nPlanck equation. They give one of the earliest and most accurate (in the\nFokker-Planck sense) results for electron transport in simple plasma. They\nprincipally apply in the unmagnetized / parallel field case, although for\nresistivity Spitzer also calculated a famous result for a strong\nperpendicular magnetic field. Results are for Z = 1, 2, 4, 16,\nand infinity (Lorentz gas / stationary ion approximation).\n\nEpperlein-Haines [4]_\n---------------------\n\nNot yet implemented.\n\nJi-Held [5]_\n------------\n\nThis is a modern treatment of the classical transport problem that has been\ncarried out with laudable care. It allows for arbitrary hall parameter and\narbitrary Z for all coefficients. Similar to the Epperlein-Haines model,\nit corrects some known inaccuracies in the original Braginskii results,\nnotably the asymptotic behavior of alpha-cross and beta_perp as Hall ->\n+infinity. It also studies effects of electron collisions in the ion\nterms, which all other treatments have not. To neglect electron-electron\ncollisions, leave mu = 0. To consider them, specify mu and theta.\n\nReferences\n==========\n.. [1] Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of\n plasma physics 1 (1965): 205. (1965)\n.. [2] Spitzer Jr, Lyman, and Richard Härm. \"Transport phenomena in a\n completely ionized gas.\" Physical Review 89.5 (1953): 977. (1953)\n.. [3] Physics of Fully Ionized Gases, L. Spitzer (1962)\n.. [4] Epperlein, E. M., and M. G. Haines. \"Plasma transport coefficients\n in a magnetic field by direct numerical solution of the\n Fokker–Planck equation.\" The Physics of fluids 29.4 (1986):\n 1029-1041.\n.. [5] Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4\n (2013): 042114.\n\"\"\"\n__all__ = [\n \"ClassicalTransport\",\n \"resistivity\",\n \"thermoelectric_conductivity\",\n \"ion_thermal_conductivity\",\n \"electron_thermal_conductivity\",\n \"ion_viscosity\",\n \"electron_viscosity\",\n]\n\nimport numpy as np\nimport warnings\n\nfrom astropy import units as u\nfrom plasmapy import (atomic, utils)\nfrom plasmapy.atomic.atomic import _is_electron\nfrom astropy.constants.si import (e, m_e, k_B)\nfrom plasmapy.formulary.parameters import (Hall_parameter, _grab_charge)\nfrom plasmapy.formulary.collisions import (fundamental_electron_collision_freq,\n fundamental_ion_collision_freq,\n Coulomb_logarithm)\nfrom plasmapy.utils import PhysicsError\n\n\nclass ClassicalTransport:\n r\"\"\"\n Classical transport coefficients (e.g. Braginskii, 1965).\n\n Notes\n -----\n Given that many of the transport variables share a lot of the same\n computation and many are often needed to be calculated simultaneously, this\n class can be initialized once with all of the variables necessary for\n calculation. It then provides all of the functionality as methods (please\n refer to their documentation).\n\n Parameters\n ----------\n T_e : ~astropy.units.Quantity\n Electron temperature in units of temperature or energy per particle\n\n n_e : ~astropy.units.Quantity\n The electron number density in units convertible to per cubic meter.\n\n T_i : ~astropy.units.Quantity\n Ion temperature in units of temperature or energy per particle\n\n n_i : ~astropy.units.Quantity\n The ion number density in units convertible to per cubic meter.\n\n ion_particle : string\n Representation of the ion species (e.g., 'p' for protons,\n 'e' for electrons, 'D+' for deuterium, or 'He-4 +1' for singly\n ionized helium-4). If no charge state information is provided,\n then the particles are assumed to be singly charged.\n\n Z : `int` or `np.inf`, optional\n The ion charge state. Overrides particle charge state if included.\n Different theories support different values of `Z`. For the original\n Braginskii model, `Z` can be any of [1, 2, 3, 4, infinity]. The Ji-Held\n model supports arbitrary `Z`. Average ionization states `Z_mean` can be\n input using this input and the Ji-Held model, although doing so may\n neglect effects caused by multiple ion populations.\n\n B : ~astropy.units.Quantity, optional\n The magnetic field strength in units convertible to tesla. Defaults\n to zero.\n\n model: string\n Indication of whose formulation from literature to use. Allowed values are:\n\n * 'Braginskii',\n * 'Spitzer-Harm',\n * 'Epperlein-Haines' (not yet implemented),\n * 'Ji-Held'.\n\n See refs [1]_, [2]_, [3]_, [4]_ and [5]_.\n\n field_orientation : string, defaults to `parallel`\n Either of 'parallel', 'par', 'perpendicular', 'perp', 'cross', or\n 'all', indicating the cardinal orientation of the magnetic field with\n respect to the transport direction of interest. Note that 'perp' refers\n to transport perpendicular to the field direction (in the direction of\n the temperature gradient), while 'cross' refers to the direction\n perpendicular to B and the gradient of temperature\n (:math:`B \\times \\nabla(T)`). The option 'all' will return a Numpy array\n of all three, `np.array((par, perp, cross))`. Does not apply to viscosities.\n\n coulomb_log_ei: float or dimensionless `~astropy.units.Quantity`, optional\n Force a particular value to be used for the electron-ion Coulomb\n logarithm (test electrons on field ions). If `None`,\n `Coulomb_logarithm` will be used. Useful for comparing calculations.\n\n V_ei: ~astropy.units.Quantity, optional\n The relative velocity between particles. Supplied to `Coulomb_logarithm`\n function, not otherwise used. If not provided, thermal velocity is\n assumed: :math:`\\mu V^2 \\sim 2 k_B T` where `mu` is the reduced mass.\n\n coulomb_log_ii: float or dimensionless `~astropy.units.Quantity`, optional\n Force a particular value to be used for the ion-ion Coulomb logarithm\n (test ions on field ions). If `None`, the PlasmaPy function\n `Coulomb_logarithm` will be used. Useful for comparing calculations.\n\n V_ii: ~astropy.units.Quantity, optional\n\n The relative velocity between particles. Supplied to\n `Coulomb_logarithm` function, not otherwise used. If not provided,\n thermal velocity is assumed: :math:`\\mu V^2 \\sim 2 k_B T`\n where `mu` is the reduced mass.\n\n hall_e: float or dimensionless `~astropy.units.Quantity`, optional\n Force a particular value to be used for the electron Hall parameter. If\n `None`, `Hall_parameter` will be used. Useful for comparing calculations.\n\n hall_i: float or dimensionless `~astropy.units.Quantity`, optional\n Force a particular value to be used for the ion Hall parameter. If\n `None`, `Hall_parameter` will be used. Useful for comparing\n calculations.\n\n mu: optional, float or dimensionless `astropy.units.Quantity`\n Ji-Held model only, may be used to include ion-electron effects\n on the ion transport coefficients. Defaults to zero, thus\n disabling these effects.\n\n theta: optional, float or dimensionless `~astropy.units.Quantity`\n Ji-Held model only, may be used to include ion-electron effects\n on the ion transport coefficients. Defaults to T_e / T_i. Only\n has effect if mu is non-zero.\n\n Raises\n ------\n ValueError\n On incorrect or unknown values of arguments.\n plasmapy.utils.PhysicsError\n If input or calculated values for Coulomb logarithms are nonphysical.\n\n Examples\n --------\n >>> from astropy import units as u\n >>> t = ClassicalTransport(1*u.eV, 1e20/u.m**3,\n ... 1*u.eV, 1e20/u.m**3, 'p')\n >>> t.resistivity()\n <Quantity 0.00036701 m Ohm>\n >>> t.thermoelectric_conductivity()\n <Quantity 0.711084>\n >>> t.ion_thermal_conductivity()\n <Quantity 0.01552066 W / (K m)>\n >>> t.electron_thermal_conductivity()\n <Quantity 0.38064293 W / (K m)>\n >>> t.ion_viscosity()\n <Quantity [4.62129725e-07, 4.60724824e-07, 4.60724824e-07, 0.00000000e+00,\n 0.00000000e+00] Pa s>\n >>> t.electron_viscosity()\n <Quantity [5.82273805e-09, 5.82082061e-09, 5.82082061e-09, 0.00000000e+00,\n 0.00000000e+00] Pa s>\n\n References\n ----------\n .. [1] Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of\n plasma physics 1 (1965): 205. (1965)\n .. [2] Spitzer Jr, Lyman, and Richard Härm. \"Transport phenomena in a\n completely ionized gas.\" Physical Review 89.5 (1953): 977. (1953)\n .. [3] Physics of Fully Ionized Gases, L. Spitzer (1962)\n .. [4] Epperlein, E. M., and M. G. Haines. \"Plasma transport coefficients\n in a magnetic field by direct numerical solution of the\n Fokker–Planck equation.\" The Physics of fluids 29.4 (1986):\n 1029-1041.\n .. [5] Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4\n (2013): 042114.\n\n \"\"\"\n\n @utils.check_quantity(T_e={\"units\": u.K, \"can_be_negative\": False},\n n_e={\"units\": u.m ** -3},\n T_i={\"units\": u.K, \"can_be_negative\": False},\n n_i={\"units\": u.m ** -3},\n )\n def __init__(self,\n T_e: u.K,\n n_e: u.m**-3,\n T_i: u.K,\n n_i: u.m**-3,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n coulomb_log_ei=None,\n V_ei=None,\n coulomb_log_ii=None,\n V_ii=None,\n hall_e=None,\n hall_i=None,\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\"):\n # check the model\n self.model = model.lower() # string inputs should be case insensitive\n valid_models = ['braginskii',\n 'spitzer',\n 'spitzer-harm',\n 'ji-held',\n ]\n if self.model not in valid_models:\n raise ValueError(f\"Unknown transport model '{self.model}'\")\n\n # check the field orientation\n self.field_orientation = field_orientation.lower()\n valid_fields = ['parallel',\n 'par',\n 'perpendicular',\n 'perp',\n 'cross',\n 'all',\n ]\n is_valid_field = self.field_orientation in valid_fields\n if not is_valid_field:\n raise ValueError(f\"Unknown field orientation \"\n f\"'{self.field_orientation}'\")\n\n # density and temperature units have already been checked by decorator\n # so just convert\n self.T_e = T_e.to(u.K, equivalencies=u.temperature_energy())\n self.T_i = T_i.to(u.K, equivalencies=u.temperature_energy())\n self.n_e = n_e.to(u.m ** -3)\n self.n_i = n_i.to(u.m ** -3)\n\n # get ion mass and charge state\n if m_i is None:\n try:\n self.m_i = atomic.particle_mass(ion_particle)\n except Exception:\n raise ValueError(f\"Unable to find mass of particle: \"\n f\"{ion_particle} in ClassicalTransport\")\n else:\n self.m_i = m_i.to(u.kg)\n self.Z = _grab_charge(ion_particle, Z)\n if self.Z < 0:\n raise ValueError(\"Z is not allowed to be negative!\") # TODO remove?\n\n # decide on the particle string for the electrons\n self.e_particle = 'e'\n self.ion_particle = ion_particle\n\n # save other arguments\n self.B = B\n self.V_ei = V_ei\n self.V_ii = V_ii\n\n # calculate Coulomb logs if not forced in input\n if coulomb_log_ei is not None:\n self.coulomb_log_ei = coulomb_log_ei\n else:\n self.coulomb_log_ei = Coulomb_logarithm(T_e,\n n_e,\n (self.e_particle,\n self.ion_particle),\n V_ei,\n method=coulomb_log_method)\n\n if self.coulomb_log_ei < 1:\n # TODO discuss whether this is not too strict\n raise PhysicsError(f\"Coulomb logarithm is {coulomb_log_ei} (below 1),\"\n \"this is probably not physical!\")\n elif self.coulomb_log_ei < 4:\n warnings.warn(f\"Coulomb logarithm is {coulomb_log_ei},\"\n f\" you might have strong coupling effects\",\n utils.CouplingWarning)\n\n if coulomb_log_ii is not None:\n self.coulomb_log_ii = coulomb_log_ii\n else:\n self.coulomb_log_ii = Coulomb_logarithm(T_i,\n n_e, # this is not a typo!\n (self.ion_particle,\n self.ion_particle),\n V_ii,\n method=coulomb_log_method)\n\n if self.coulomb_log_ii < 1:\n # TODO discuss whether this is not too strict\n raise PhysicsError(f\"Coulomb logarithm is {coulomb_log_ii} (below 1),\"\n \"this is probably not physical!\")\n elif self.coulomb_log_ii < 4:\n warnings.warn(f\"Coulomb logarithm is {coulomb_log_ii},\"\n f\" you might have strong coupling effects\",\n utils.CouplingWarning)\n\n # calculate Hall parameters if not forced in input\n if hall_e is not None:\n self.hall_e = hall_e\n else:\n self.hall_e = Hall_parameter(n_e,\n T_e,\n B,\n self.ion_particle,\n self.e_particle,\n coulomb_log_ei,\n V_ei,\n coulomb_log_method=coulomb_log_method)\n if hall_i is not None:\n self.hall_i = hall_i\n else:\n self.hall_i = Hall_parameter(n_i,\n T_i,\n B,\n self.ion_particle,\n self.ion_particle,\n coulomb_log_ii,\n V_ii,\n coulomb_log_method=coulomb_log_method)\n # set up the ion non-dimensional coefficients for the Ji-Held model\n self.mu = 0 if mu is None else mu # disable the JH special features by default\n # self.mu = m_e / self.m_i # enable the JH special features\n self.theta = self.T_e / self.T_i if theta is None else theta\n\n def resistivity(self) -> u.Ohm * u.m:\n \"\"\"\n Calculate the resistivity.\n\n Notes\n -----\n\n The resistivity here is defined similarly to solid conductors, and thus\n represents the classical plasmas' property to resist the flow of\n electrical current. The result is in units of ohm * m, so if you\n assume where the current is flowing in the plasma (length and\n cross-sectional area), you could calculate a DC resistance of the\n plasma in ohms as resistivity * length / cross-sectional area.\n\n Experimentalists with plasma discharges may observe different V = IR\n Ohm's law behavior than suggested by the resistance calculated here,\n for reasons such as the occurrence of plasma sheath layers at the\n electrodes or the plasma not satisfying the classical assumptions.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n \"\"\"\n alpha_hat = _nondim_resistivity(self.hall_e,\n self.Z,\n self.e_particle,\n self.model,\n self.field_orientation)\n tau_e = 1 / fundamental_electron_collision_freq(self.T_e,\n self.n_e,\n self.ion_particle,\n self.coulomb_log_ei,\n self.V_ei)\n\n alpha = alpha_hat / (self.n_e * e ** 2 * tau_e / m_e)\n return alpha.to(u.ohm * u.m)\n\n def thermoelectric_conductivity(self):\n \"\"\"\n Calculate the thermoelectric conductivity.\n\n Notes\n -----\n To be improved.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n \"\"\"\n beta_hat = _nondim_te_conductivity(self.hall_e,\n self.Z,\n self.e_particle,\n self.model,\n self.field_orientation)\n return u.Quantity(beta_hat)\n\n def ion_thermal_conductivity(self) -> u.W / u.m / u.K:\n \"\"\"\n Calculate the thermal conductivity for ions.\n\n Notes\n -----\n This is the classical plasma ions' ability to conduct energy and heat,\n defined similarly to other materials. The result is a conductivity in\n units of W / m / K, so if you assume you know where the heat is flowing\n (temperature gradient, cross-sectional area) you can calculate the\n energy transport in Watts as conductivity * cross-sectional area *\n temperature gradient. In lab plasmas, typically the energy is flowing\n out of your high-temperature plasma to something else, like the walls\n of your device, and you are sad about this.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n ion_thermal_conductivity\n\n \"\"\"\n kappa_hat = _nondim_thermal_conductivity(self.hall_i,\n self.Z,\n self.ion_particle,\n self.model,\n self.field_orientation,\n self.mu,\n self.theta)\n tau_i = 1 / fundamental_ion_collision_freq(self.T_i,\n self.n_i,\n self.ion_particle,\n self.coulomb_log_ii,\n self.V_ii)\n kappa = kappa_hat * (self.n_i * k_B ** 2 * self.T_i * tau_i / self.m_i)\n return kappa.to(u.W / u.m / u.K)\n\n def electron_thermal_conductivity(self) -> u.W / u.m / u.K:\n \"\"\"\n Calculate the thermal conductivity for electrons.\n\n Notes\n -----\n This is quite similar to the ion thermal conductivity, except that it's\n for the plasma electrons. In a typical unmagnetized plasma, the\n electron thermal conductivity is much higher than the ions and will\n dominate, due to the electrons' low mass and fast speeds.\n\n In a strongly magnetized plasma, following the classical transport\n analysis, you calculate that the perpendicular-field thermal\n conductivity becomes greatly reduced for the ions and electrons, with\n the electrons actually being restrained even more than the ions due to\n their low mass and small gyroradius. In reality, the electrons and ions\n are pulling on each other strongly due to their opposing charges, so\n you have the situation of ambipolar diffusion.\n\n This situation has been likened to an energetic little child (the\n electrons) not wanting to be pulled away from the playground (the\n magnetic field) by the parents (the ions).\n\n The ultimate rate must typically be in between the individual rates for\n electrons and ions, so at least you can get some bounds from this type\n of analysis.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n ion_thermal_conductivity\n\n \"\"\"\n kappa_hat = _nondim_thermal_conductivity(self.hall_e,\n self.Z,\n self.e_particle,\n self.model,\n self.field_orientation,\n self.mu,\n self.theta)\n tau_e = 1 / fundamental_electron_collision_freq(self.T_e,\n self.n_e,\n self.ion_particle,\n self.coulomb_log_ei,\n self.V_ei)\n kappa = kappa_hat * (self.n_e * k_B ** 2 * self.T_e * tau_e / m_e)\n return kappa.to(u.W / u.m / u.K)\n\n def ion_viscosity(self) -> u.Pa * u.s:\n \"\"\"\n Calculate the ion viscosity.\n\n Notes\n -----\n This is the dynamic viscosity that you find for ions in the classical\n plasma, similar to the viscosity of air or water or honey. The big\n effect is the T^5/2 dependence, so as classical plasmas get hotter they\n become dramatically more viscous. The ion viscosity typically dominates\n over the electron viscosity.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n electron_viscosity\n\n \"\"\"\n eta_hat = _nondim_viscosity(self.hall_i,\n self.Z,\n self.ion_particle,\n self.model,\n self.field_orientation,\n self.mu,\n self.theta)\n tau_i = 1 / fundamental_ion_collision_freq(self.T_i,\n self.n_i,\n self.ion_particle,\n self.coulomb_log_ii,\n self.V_ii)\n common_factor = self.n_i * k_B * self.T_i * tau_i\n eta1 = np.array(eta_hat) * common_factor\n if not np.isclose(self.hall_i, 0, rtol=1e-8):\n eta1[1:3] /= self.hall_i ** 2\n eta1[3:] /= self.hall_i\n if eta1[0].unit == eta1[2].unit == eta1[4].unit:\n unit_val = eta1[0].unit\n eta = (eta1.value * unit_val).to(u.Pa * u.s)\n return eta\n\n def electron_viscosity(self) -> u.Pa * u.s:\n \"\"\"\n Calculate the electron viscosity.\n\n Notes\n -----\n This is the dynamic viscosity that you find for electrons in the\n classical plasma, similar to the viscosity of air or water or honey.\n The big effect is the T^5/2 dependence, so as classical plasmas get\n hotter they become dramatically more viscous. The ion viscosity\n typically dominates over the electron viscosity.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n ion_viscosity\n\n \"\"\"\n eta_hat = _nondim_viscosity(self.hall_e,\n self.Z,\n self.e_particle,\n self.model,\n self.field_orientation,\n self.mu,\n self.theta)\n tau_e = 1 / fundamental_electron_collision_freq(self.T_e,\n self.n_e,\n self.ion_particle,\n self.coulomb_log_ei,\n self.V_ei)\n common_factor = (self.n_e * k_B * self.T_e * tau_e)\n if np.isclose(self.hall_e, 0, rtol=1e-8):\n eta1 = (eta_hat[0] * common_factor,\n eta_hat[1] * common_factor,\n eta_hat[2] * common_factor,\n eta_hat[3] * common_factor,\n eta_hat[4] * common_factor)\n else:\n eta1 = (eta_hat[0] * common_factor,\n eta_hat[1] * common_factor / self.hall_e ** 2,\n eta_hat[2] * common_factor / self.hall_e ** 2,\n eta_hat[3] * common_factor / self.hall_e,\n eta_hat[4] * common_factor / self.hall_e)\n if eta1[0].unit == eta1[2].unit and eta1[2].unit == eta1[4].unit:\n unit_val = eta1[0].unit\n eta = (np.array((eta1[0].value,\n eta1[1].value,\n eta1[2].value,\n eta1[3].value,\n eta1[4].value)) * unit_val).to(u.Pa * u.s)\n return eta\n\n def all_variables(self) -> dict:\n \"\"\"\n Return all transport variables as a dictionary.\n\n Returns\n -------\n dict\n\n \"\"\"\n d = {}\n d['resistivity'] = self.resistivity()\n d['thermoelectric conductivity'] = self.thermoelectric_conductivity()\n d['electron thermal conductivity'] = self.electron_thermal_conductivity()\n d['electron viscosity'] = self.electron_viscosity()\n if self.model != \"spitzer\":\n d['ion thermal conductivity'] = self.ion_thermal_conductivity()\n d['ion viscosity'] = self.ion_viscosity()\n return d\n\n\ndef resistivity(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\") -> u.Ohm * u.m:\n \"\"\"\n Calculate the resistivity.\n\n Notes\n -----\n\n The resistivity here is defined similarly to solid conductors, and thus\n represents the classical plasmas' property to resist the flow of\n electrical current. The result is in units of ohm * m, so if you\n assume where the current is flowing in the plasma (length and\n cross-sectional area), you could calculate a DC resistance of the\n plasma in ohms as resistivity * length / cross-sectional area.\n\n Experimentalists with plasma discharges may observe different V = IR\n Ohm's law behavior than suggested by the resistance calculated here,\n for reasons such as the occurrence of plasma sheath layers at the\n electrodes or the plasma not satisfying the classical assumptions.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n \"\"\"\n ct = ClassicalTransport(T_e, n_e, T_i, n_i, ion_particle, m_i,\n Z=Z, B=B, model=model,\n field_orientation=field_orientation,\n mu=mu, theta=theta,\n coulomb_log_method=coulomb_log_method)\n return ct.resistivity()\n\n\ndef thermoelectric_conductivity(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\"):\n \"\"\"Calculate the thermoelectric conductivity.\"\"\"\n ct = ClassicalTransport(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i,\n Z=Z,\n B=B,\n model=model,\n field_orientation=field_orientation,\n mu=mu,\n theta=theta,\n coulomb_log_method=coulomb_log_method)\n return ct.thermoelectric_conductivity()\n\ndef ion_thermal_conductivity(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\") -> u.W / u.m / u.K:\n \"\"\"\n Calculate the thermal conductivity for ions.\n\n Notes\n -----\n This is the classical plasma ions' ability to conduct energy and heat,\n defined similarly to other materials. The result is a conductivity in units\n of W / m / K, so if you assume you know where the heat is flowing\n (temperature gradient, cross-sectional area) you can calculate the energy\n transport in Watts as conductivity * cross-sectional area * temperature\n gradient. In lab plasmas, typically the energy is flowing out of your\n high-temperature plasma to something else, like the walls of your device,\n and you are sad about this.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n ion_thermal_conductivity\n\n \"\"\"\n ct = ClassicalTransport(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i,\n Z=Z,\n B=B,\n model=model,\n field_orientation=field_orientation,\n mu=mu,\n theta=theta,\n coulomb_log_method=coulomb_log_method)\n return ct.ion_thermal_conductivity()\n\n\ndef electron_thermal_conductivity(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\") -> u.W / u.m / u.K:\n \"\"\"\n Calculate the thermal conductivity for electrons.\n\n Notes\n -----\n This is quite similar to the ion thermal conductivity, except that it's for\n the plasma electrons. In a typical unmagnetized plasma, the electron\n thermal conductivity is much higher than the ions and will dominate, due to\n the electrons' low mass and fast speeds.\n\n In a strongly magnetized plasma, following the classical transport\n analysis, you calculate that the perpendicular-field thermal conductivity\n becomes greatly reduced for the ions and electrons, with the electrons\n actually being restrained even more than the ions due to their low mass and\n small gyroradius. In reality, the electrons and ions are pulling on each\n other strongly due to their opposing charges, so you have the situation of\n ambipolar diffusion.\n\n This situation has been likened to an energetic little child (the\n electrons) not wanting to be pulled away from the playground (the magnetic\n field) by the parents (the ions).\n\n The ultimate rate must typically be in between the individual rates for\n electrons and ions, so at least you can get some bounds from this type of\n analysis.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n ion_thermal_conductivity\n\n \"\"\"\n ct = ClassicalTransport(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i,\n Z=Z,\n B=B,\n model=model,\n field_orientation=field_orientation,\n mu=mu,\n theta=theta,\n coulomb_log_method=coulomb_log_method)\n return ct.electron_thermal_conductivity()\n\n\ndef ion_viscosity(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\") -> u.Pa * u.s:\n \"\"\"\n Calculate the ion viscosity.\n\n Notes\n -----\n This is the dynamic viscosity that you find for ions in the classical\n plasma, similar to the viscosity of air or water or honey. The big\n effect is the T^5/2 dependence, so as classical plasmas get hotter they\n become dramatically more viscous. The ion viscosity typically dominates\n over the electron viscosity.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n electron_viscosity\n\n \"\"\"\n ct = ClassicalTransport(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i,\n Z=Z,\n B=B,\n model=model,\n field_orientation=field_orientation,\n mu=mu,\n theta=theta,\n coulomb_log_method=coulomb_log_method)\n return ct.ion_viscosity()\n\n\ndef electron_viscosity(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i=None,\n Z=None,\n B=0.0 * u.T,\n model='Braginskii',\n field_orientation='parallel',\n mu=None,\n theta=None,\n coulomb_log_method=\"classical\") -> u.Pa * u.s:\n \"\"\"\n Calculate the electron viscosity.\n\n Notes\n -----\n This is the dynamic viscosity that you find for electrons in the\n classical plasma, similar to the viscosity of air or water or honey.\n The big effect is the T^5/2 dependence, so as classical plasmas get\n hotter they become dramatically more viscous. The ion viscosity\n typically dominates over the electron viscosity.\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n See also\n --------\n ion_viscosity\n\n \"\"\"\n ct = ClassicalTransport(T_e,\n n_e,\n T_i,\n n_i,\n ion_particle,\n m_i,\n Z=Z,\n B=B,\n model=model,\n field_orientation=field_orientation,\n mu=mu,\n theta=theta,\n coulomb_log_method=coulomb_log_method)\n return ct.electron_viscosity()\n\n\ndef _nondim_thermal_conductivity(hall, Z,\n particle,\n model,\n field_orientation,\n mu=None,\n theta=None):\n \"\"\"Calculate dimensionless classical thermal conductivity coefficients.\n\n This function is a switchboard / wrapper that calls the appropriate\n model-specific functions depending on which model is specified and which\n type of particle (electron or ion) is input. Non-electrons are assumed to\n be ions.\n \"\"\"\n if _is_electron(particle):\n if model == 'spitzer-harm' or model == 'spitzer':\n kappa_hat = _nondim_tc_e_spitzer(Z)\n elif model == 'braginskii':\n kappa_hat = _nondim_tc_e_braginskii(hall, Z, field_orientation)\n elif model == 'ji-held':\n kappa_hat = _nondim_tc_e_ji_held(hall, Z, field_orientation)\n else:\n raise ValueError(f\"Unrecognized model '{model}' in \"\n \"_nondim_thermal_conductivity\")\n else:\n if model == 'braginskii':\n kappa_hat = _nondim_tc_i_braginskii(hall, field_orientation)\n elif model == 'ji-held':\n kappa_hat = _nondim_tc_i_ji_held(hall, Z, mu, theta, field_orientation)\n elif model == 'spitzer-harm' or model == 'spitzer':\n raise NotImplementedError(\"Ion thermal conductivity is not \"\n \"implemented in the Spitzer model.\")\n else:\n raise ValueError(f\"Unrecognized model '{model}' in \"\n \"_nondim_thermal_conductivity\")\n return kappa_hat\n\n\ndef _nondim_viscosity(hall,\n Z,\n particle,\n model,\n field_orientation,\n mu=None,\n theta=None):\n \"\"\"Calculate dimensionless classical viscosity coefficients.\n\n This function is a switchboard / wrapper that calls the appropriate\n model-specific functions depending on which model is specified and which\n type of particle (electron or ion) is input. Non-electrons are assumed to\n be ions.\n \"\"\"\n if _is_electron(particle):\n if model == 'braginskii':\n eta_hat = _nondim_visc_e_braginskii(hall, Z)\n elif model == 'ji-held':\n eta_hat = _nondim_visc_e_ji_held(hall, Z)\n else:\n raise ValueError(f\"Unrecognized model '{model}' in \"\n \"_nondim_viscosity\")\n else:\n if model == 'braginskii':\n eta_hat = _nondim_visc_i_braginskii(hall)\n elif model == 'ji-held':\n eta_hat = _nondim_visc_i_ji_held(hall, Z, mu, theta)\n elif model == 'spitzer-harm' or model == 'spitzer':\n raise NotImplementedError(\"Ion viscosity is not \"\n \"implemented in the Spitzer model.\")\n else:\n raise ValueError(f\"Unrecognized model '{model}' in \"\n \"_nondim_viscosity\")\n return eta_hat\n\n\ndef _nondim_resistivity(hall, Z, particle, model, field_orientation):\n \"\"\"Calculate dimensionless classical resistivity coefficients.\n\n This function is a switchboard / wrapper that calls the appropriate\n model-specific functions depending on which model is specified.\n \"\"\"\n if model == 'spitzer-harm' or model == 'spitzer':\n alpha_hat = _nondim_resist_spitzer(Z, field_orientation)\n elif model == 'braginskii':\n alpha_hat = _nondim_resist_braginskii(hall, Z, field_orientation)\n elif model == 'ji-held':\n alpha_hat = _nondim_resist_ji_held(hall, Z, field_orientation)\n else:\n raise ValueError(f\"Unrecognized model '{model}' in \"\n \"_nondim_resistivity\")\n return alpha_hat\n\n\ndef _nondim_te_conductivity(hall, Z, particle, model, field_orientation):\n \"\"\"Calculate dimensionless classical thermoelectric coefficients.\n\n This function is a switchboard / wrapper that calls the appropriate\n model-specific functions depending on which model is specified.\n \"\"\"\n if model == 'spitzer-harm' or model == 'spitzer':\n beta_hat = _nondim_tec_spitzer(Z)\n elif model == 'braginskii':\n beta_hat = _nondim_tec_braginskii(hall, Z, field_orientation)\n elif model == 'ji-held':\n beta_hat = _nondim_tec_ji_held(hall, Z, field_orientation)\n else:\n raise ValueError(f\"Unrecognized model '{model}' in \"\n \"_nondim_te_conductivity\")\n return beta_hat\n\n\ndef _check_Z(allowed_Z, Z):\n \"\"\"Determine if the input Z value is okay given the list of allowed_Z.\"\"\"\n # first, determine if arbitrary Z values are allowed in the theory\n arbitrary_Z_allowed = False\n the_arbitrary_idx = np.nan\n for idx, allowed_Z_val in enumerate(allowed_Z):\n if allowed_Z_val == 'arbitrary':\n arbitrary_Z_allowed = True\n the_arbitrary_idx = idx\n # next, search the allowed_Z for a match to the current Z\n Z_idx = np.nan\n for idx, allowed_Z_val in enumerate(allowed_Z):\n if Z == allowed_Z_val:\n Z_idx = idx\n # at this point we have looped through allowed_Z and either found a match\n # or not. If we haven't found a match and arbitrary Z aren't allowed, break\n if np.isnan(Z_idx) and not arbitrary_Z_allowed:\n raise utils.PhysicsError(f\"{Z} is not an allowed Z value\")\n elif np.isnan(Z_idx): # allowed arbitrary Z\n # return a Z_idx pointing to the 'arbitrary'\n Z_idx = the_arbitrary_idx\n else: # allowed Z\n pass\n # we have got the Z_idx we want. return\n return Z_idx\n\n\ndef _get_spitzer_harm_coeffs(Z):\n \"\"\"Return numerical coefficients from Spitzer-Harm '53.\n\n Table III, Spitzer and Harm, Phys. Rev. Vol 89, 5, 1953\n \"\"\"\n allowed_Z = [1, 2, 4, 16, np.inf]\n Z_idx = _check_Z(allowed_Z, Z)\n gamma_E = [0.5816, 0.6833, 0.7849, 0.9225, 1.0000]\n gamma_T = [0.2727, 0.4137, 0.5714, 0.8279, 1.0000]\n delta_E = [0.4652, 0.5787, 0.7043, 0.8870, 1.0000]\n delta_T = [0.2252, 0.3563, 0.5133, 0.7907, 1.0000]\n return gamma_E[Z_idx], gamma_T[Z_idx], delta_E[Z_idx], delta_T[Z_idx]\n\n\ndef _nondim_tc_e_spitzer(Z):\n \"\"\"Dimensionless electron thermal conductivity - Spitzer.\n\n This result is for parallel field or unmagnetized plasma only.\n \"\"\"\n (gamma_E, gamma_T, delta_E, delta_T) = _get_spitzer_harm_coeffs(Z)\n kappa = (64 / np.pi) * delta_T * (5 / 3 - (gamma_T * delta_E) / (delta_T * gamma_E))\n return kappa\n\n\ndef _nondim_resist_spitzer(Z, field_orientation):\n \"\"\"\n Dimensionless resistivity - Spitzer.\n\n These are results for both parallel-field / unmagnetized plasmas as well\n as perpendicular-field / strongly magnetized plasma. Summary description\n in Physics of Fully Ionized Gases, Spitzer\n \"\"\"\n alpha_perp = 1\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n return alpha_perp\n\n (gamma_E, gamma_T, delta_E, delta_T) = _get_spitzer_harm_coeffs(Z)\n alpha_par = (3 * np.pi / 32) * (1 / gamma_E)\n if field_orientation == 'parallel' or field_orientation == 'par':\n return alpha_par\n # alpha_par = 0.5064 # Z = 1\n\n if field_orientation == 'all':\n return alpha_par, alpha_perp\n\n\ndef _nondim_tec_spitzer(Z):\n \"\"\"Dimensionless thermoelectric conductivity - Spitzer.\n\n This result is for parallel field or unmagnetized plasma only.\n \"\"\"\n (gamma_E, gamma_T, delta_E, delta_T) = _get_spitzer_harm_coeffs(Z)\n beta = 5 / 2 * (8 / 5 * (delta_E / gamma_E) - 1)\n # beta = 0.703\n return beta\n\n\ndef _nondim_tc_e_braginskii(hall, Z, field_orientation):\n \"\"\"Dimensionless electron thermal conductivity - Braginskii.\n\n Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of plasma\n physics 1 (1965): 205.\n \"\"\"\n allowed_Z = [1, 2, 3, 4, np.inf]\n Z_idx = _check_Z(allowed_Z, Z)\n\n # fixing overflow errors when exponentiating hall by making a float\n # instead of an int\n hall = float(hall)\n\n delta_0 = [3.7703, 1.0465, 0.5814, 0.4106, 0.0961]\n delta_1 = [14.79, 10.80, 9.618, 9.055, 7.482]\n gamma_1_prime = [4.664, 3.957, 3.721, 3.604, 3.25]\n gamma_0_prime = [11.92, 5.118, 3.525, 2.841, 1.20]\n gamma_1_doubleprime = [2.500, 2.500, 2.500, 2.500, 2.500]\n gamma_0_doubleprime = [21.67, 15.37, 13.53, 12.65, 10.23]\n\n gamma_0 = gamma_0_prime[Z_idx] / delta_0[Z_idx]\n Delta = hall ** 4 + delta_1[Z_idx] * hall ** 2 + delta_0[Z_idx]\n\n if field_orientation == 'parallel' or field_orientation == 'par':\n kappa_par = gamma_0\n return kappa_par\n\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n kappa_perp = (gamma_1_prime[Z_idx] * hall **\n 2 + gamma_0_prime[Z_idx]) / Delta\n return kappa_perp\n\n if field_orientation == 'cross':\n kappa_cross = (gamma_1_doubleprime[Z_idx] * hall ** 3 +\n gamma_0_doubleprime[Z_idx] * hall) / Delta\n return kappa_cross\n\n if field_orientation == 'all':\n kappa_par = gamma_0\n\n kappa_perp = (gamma_1_prime[Z_idx] * hall **\n 2 + gamma_0_prime[Z_idx]) / Delta\n\n kappa_cross = (gamma_1_doubleprime[Z_idx] * hall ** 3 +\n gamma_0_doubleprime[Z_idx] * hall) / Delta\n return np.array((kappa_par, kappa_perp, kappa_cross))\n\n\ndef _nondim_tc_i_braginskii(hall, field_orientation):\n \"\"\"Dimensionless ion thermal conductivity - Braginskii.\n\n Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of plasma\n physics 1 (1965): 205.\n \"\"\"\n # fixing overflow errors when exponentiating hall by making a float\n # instead of an int\n hall = float(hall)\n\n if field_orientation == 'parallel' or field_orientation == 'par':\n kappa_par_coeff_0 = 3.906\n kappa_par = kappa_par_coeff_0\n return kappa_par\n\n delta_1 = 2.70\n delta_0 = 0.677\n Delta = hall ** 4 + delta_1 * hall ** 2 + delta_0\n\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n kappa_perp_coeff_2 = 2.0\n kappa_perp_coeff_0 = 2.645\n kappa_perp = (kappa_perp_coeff_2 * hall ** 2 + kappa_perp_coeff_0) / Delta\n return kappa_perp\n\n if field_orientation == 'cross':\n kappa_cross_coeff_3 = 2.5\n kappa_cross_coeff_1 = 4.65\n kappa_cross = (kappa_cross_coeff_3 * hall ** 3 + kappa_cross_coeff_1 * hall) / Delta\n return kappa_cross\n\n if field_orientation == 'all':\n kappa_par_coeff_0 = 3.906\n kappa_par = kappa_par_coeff_0\n\n kappa_perp_coeff_2 = 2.0\n kappa_perp_coeff_0 = 2.645\n kappa_perp = (kappa_perp_coeff_2 * hall ** 2 + kappa_perp_coeff_0) / Delta\n\n kappa_cross_coeff_3 = 2.5\n kappa_cross_coeff_1 = 4.65\n kappa_cross = (kappa_cross_coeff_3 * hall ** 3 + kappa_cross_coeff_1 * hall) / Delta\n return np.array((kappa_par, kappa_perp, kappa_cross))\n\n\ndef _nondim_visc_e_braginskii(hall, Z):\n \"\"\"Dimensionless electron viscosity - Braginskii.\n\n Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of plasma\n physics 1 (1965): 205.\n \"\"\"\n # fixing overflow errors when exponentiating hall by making a float\n # instead of an int\n hall = float(hall)\n allowed_Z = [1]\n _check_Z(allowed_Z, Z)\n eta_prime_0 = 0.733\n eta_doubleprime_2 = 2.05\n eta_doubleprime_0 = 8.50\n eta_tripleprime_2 = 1.0\n eta_tripleprime_0 = 7.91\n delta_1 = 13.8\n delta_0 = 11.6\n eta_0_e = eta_prime_0\n\n def eta_2(hall):\n Delta = hall ** 4 + delta_1 * hall ** 2 + delta_0\n return (eta_doubleprime_2 * hall ** 2 + eta_doubleprime_0) / Delta\n\n eta_2_e = eta_2(hall)\n eta_1_e = eta_2(2 * hall)\n\n def f_eta_4(hall):\n Delta = hall ** 4 + delta_1 * hall ** 2 + delta_0\n return (eta_tripleprime_2 * hall ** 3 + eta_tripleprime_0 * hall) / Delta\n\n eta_4_e = f_eta_4(hall)\n eta_3_e = f_eta_4(2 * hall)\n return np.array((eta_0_e, eta_1_e, eta_2_e, eta_3_e, eta_4_e))\n\n\ndef _nondim_visc_i_braginskii(hall):\n \"\"\"Dimensionless ion viscosity - Braginskii.\n\n Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of plasma\n physics 1 (1965): 205.\n \"\"\"\n eta_prime_0 = 0.96\n eta_doubleprime_2 = 6 / 5\n eta_doubleprime_0 = 2.23\n eta_tripleprime_2 = 1.0\n eta_tripleprime_0 = 2.38\n delta_1 = 4.03\n delta_0 = 2.33\n eta_0_i = eta_prime_0\n\n # fixing overflow errors when exponentiating hall by making a float\n # instead of an int\n hall = float(hall)\n\n def f_eta_2(hall):\n Delta = hall ** 4 + delta_1 * hall ** 2 + delta_0\n return (eta_doubleprime_2 * hall ** 2 + eta_doubleprime_0) / Delta\n\n eta_2_i = f_eta_2(hall)\n eta_1_i = f_eta_2(2 * hall)\n\n def f_eta_4(hall):\n Delta = hall ** 4 + delta_1 * hall ** 2 + delta_0\n return (eta_tripleprime_2 * hall ** 3 +\n eta_tripleprime_0 * hall) / Delta\n\n eta_4_i = f_eta_4(hall)\n eta_3_i = f_eta_4(2 * hall)\n return np.array((eta_0_i, eta_1_i, eta_2_i, eta_3_i, eta_4_i))\n\n\ndef _nondim_resist_braginskii(hall, Z, field_orientation):\n \"\"\"Dimensionless resistivity - Braginskii.\n\n Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of plasma\n physics 1 (1965): 205.\n \"\"\"\n allowed_Z = [1, 2, 3, 4, np.inf]\n Z_idx = _check_Z(allowed_Z, Z)\n\n # fixing overflow errors when exponentiating hall by making a float\n # instead of an int\n hall = float(hall)\n\n # alpha_0 = 0.5129\n delta_0 = [3.7703, 1.0465, 0.5814, 0.4106, 0.0961]\n delta_1 = [14.79, 10.80, 9.618, 9.055, 7.482]\n alpha_1_prime = [6.416, 5.523, 5.226, 5.077, 4.63]\n alpha_0_prime = [1.837, 0.5956, 0.3515, 0.2566, 0.0678]\n alpha_1_doubleprime = [1.704, 1.704, 1.704, 1.704, 1.704]\n alpha_0_doubleprime = [0.7796, 0.3439, 0.2400, 0.1957, 0.0940]\n\n alpha_0 = 1 - alpha_0_prime[Z_idx] / delta_0[Z_idx]\n Delta = hall ** 4 + delta_1[Z_idx] * hall ** 2 + delta_0[Z_idx]\n\n if field_orientation == 'parallel' or field_orientation == 'par':\n alpha_par = alpha_0\n return alpha_par\n\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n alpha_perp = (1 - (alpha_1_prime[Z_idx] * hall ** 2 + alpha_0_prime[Z_idx]) / Delta)\n return alpha_perp\n\n if field_orientation == 'cross':\n alpha_cross = (alpha_1_doubleprime[Z_idx] * hall ** 3 +\n alpha_0_doubleprime[Z_idx] * hall) / Delta\n return alpha_cross\n\n if field_orientation == 'all':\n alpha_par = alpha_0\n\n alpha_perp = (1 - (alpha_1_prime[Z_idx] * hall ** 2 + alpha_0_prime[Z_idx]) / Delta)\n\n alpha_cross = (alpha_1_doubleprime[Z_idx] * hall ** 3 +\n alpha_0_doubleprime[Z_idx] * hall) / Delta\n return np.array((alpha_par, alpha_perp, alpha_cross))\n\n\ndef _nondim_tec_braginskii(hall, Z, field_orientation):\n \"\"\"Dimensionless thermoelectric conductivity - Braginskii.\n\n Braginskii, S. I. \"Transport processes in a plasma.\" Reviews of plasma\n physics 1 (1965): 205.\n \"\"\"\n allowed_Z = [1, 2, 3, 4, np.inf]\n Z_idx = _check_Z(allowed_Z, Z)\n # fixing overflow errors when exponentiating hall by making a float\n # instead of an int\n hall = float(hall)\n\n delta_0 = [3.7703, 1.0465, 0.5814, 0.4106, 0.0961]\n delta_1 = [14.79, 10.80, 9.618, 9.055, 7.482]\n beta_1_prime = [5.101, 4.450, 4.233, 4.124, 3.798]\n beta_0_prime = [2.681, 0.9473, 0.5905, 0.4478, 0.1461]\n beta_1_doubleprime = [1.5, 1.5, 1.5, 1.5, 1.5]\n beta_0_doubleprime = [3.053, 1.784, 1.442, 1.285, 0.877]\n\n Delta = hall ** 4 + delta_1[Z_idx] * hall ** 2 + delta_0[Z_idx]\n beta_0 = beta_0_prime[Z_idx] / delta_0[Z_idx]\n # beta_0 = 0.7110\n\n if field_orientation == 'parallel' or field_orientation == 'par':\n beta_par = beta_0\n return beta_par\n\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n beta_perp = (beta_1_prime[Z_idx] * hall ** 2 + beta_0_prime[Z_idx]) / Delta\n return beta_perp\n\n if field_orientation == 'cross':\n beta_cross = (beta_1_doubleprime[Z_idx] * hall ** 3 +\n beta_0_doubleprime[Z_idx] * hall) / Delta\n return beta_cross\n\n if field_orientation == 'all':\n beta_par = beta_0\n\n beta_perp = (beta_1_prime[Z_idx] * hall ** 2 + beta_0_prime[Z_idx]) / Delta\n\n beta_cross = (beta_1_doubleprime[Z_idx] * hall ** 3 +\n beta_0_doubleprime[Z_idx] * hall) / Delta\n return np.array((beta_par, beta_perp, beta_cross))\n\n\n#\n# Abandon all hope, ye who enter here\n#\n\ndef _nondim_tc_e_ji_held(hall, Z, field_orientation):\n \"\"\"Dimensionless electron thermal conductivity - Ji-Held.\n\n Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4 (2013):\n 042114.\n \"\"\"\n allowed_Z = [1, 2, 'arbitrary']\n Z_idx = _check_Z(allowed_Z, Z)\n # fixing overflow errors when exponentiating r by making a float\n # instead of an int\n r = float(np.abs(Z * hall))\n\n def f_kappa_par_e(Z):\n numerator = 13.5 * Z ** 2 + 54.4 * Z + 25.2\n denominator = Z ** 3 + 8.35 * Z ** 2 + 15.2 * Z + 4.51\n return numerator / denominator\n\n def f_kappa_0(Z):\n numerator = 9.91 * Z ** 3 + 75.3 * Z ** 2 + 518 * Z + 333\n denominator = 1000\n return numerator / denominator\n\n def f_kappa_1(Z):\n numerator = 0.211 * Z ** 3 + 12.7 * Z ** 2 + 48.4 * Z + 6.45\n denominator = Z + 57.1\n return numerator / denominator\n\n def f_kappa_2(Z):\n numerator = 0.932 * Z ** (7 / 3) + 0.135 * Z ** 2 + 12.3 * Z + 8.77\n denominator = Z + 4.84\n return numerator / denominator\n\n def f_kappa_3(Z):\n numerator = 0.246 * Z ** 3 + 2.65 * Z ** 2 - 92.8 * Z - 1.96\n denominator = Z ** 2 + 19.9 * Z + 35.3\n return numerator / denominator\n\n def f_kappa_4(Z):\n numerator = 2.76 * Z ** (5 / 3) - 0.836 * Z ** (2 / 3) - 0.0611\n denominator = Z - 0.214\n return numerator / denominator\n\n def f_k_0(Z):\n numerator = 0.0396 * Z ** 3 + 46.3 * Z + 176\n denominator = 1000\n return numerator / denominator\n\n def f_k_1(Z):\n numerator = 15.4 * Z ** 3 + 188 * Z ** 2 + 240 * Z + 35.3\n denominator = 1000 * Z + 397\n return numerator / denominator\n\n def f_k_2(Z):\n numerator = -0.159 * Z ** 2 - 12.5 * Z + 34.1\n denominator = Z ** (2 / 3) + 0.741 * Z ** (1 / 3) + 31.0\n return numerator / denominator\n\n def f_k_3(Z):\n numerator = 0.431 * Z ** 2 + 3.69 * Z + 0.0314\n denominator = Z + 3.62\n return numerator / denominator\n\n def f_k_4(Z):\n numerator = 0.0258 * Z ** 2 - 1.63 * Z + 0.711\n denominator = Z ** (4 / 3) + 4.36 * Z ** (2 / 3) + 2.75\n return numerator / denominator\n\n def f_k_5(Z):\n numerator = Z ** 3 + 11.9 * Z ** 2 + 28.8 * Z + 9.07\n denominator = 173 * Z + 133\n return numerator / denominator\n\n kappa_par_e = [3.204, 2.464, f_kappa_par_e(Z)]\n kappa_0 = [0.936, 1.749, f_kappa_0(Z)]\n kappa_1 = [1.166, 2.635, f_kappa_1(Z)]\n kappa_2 = [3.791, 5.644, f_kappa_2(Z)]\n kappa_3 = [-1.635, -2.212, f_kappa_3(Z)]\n kappa_4 = [2.370, 4.129, f_kappa_4(Z)]\n k_0 = [0.222, 0.269, f_k_0(Z)]\n k_1 = [0.343, 0.580, f_k_1(Z)]\n k_2 = [0.655, 0.252, f_k_2(Z)]\n k_3 = [0.899, 1.626, f_k_3(Z)]\n k_4 = [-0.110, -0.201, f_k_4(Z)]\n k_5 = [0.166, 0.255, f_k_5(Z)]\n\n kappa_par = kappa_par_e[Z_idx]\n if field_orientation == 'parallel' or field_orientation == 'par':\n return Z * kappa_par\n\n def f_kappa_perp(Z_idx):\n numerator = ((13 / 4 * Z + np.sqrt(2)) * r + kappa_0[Z_idx] * kappa_par_e[Z_idx])\n denominator = (r ** 3 +\n kappa_4[Z_idx] * r ** (7 / 3) +\n kappa_3[Z_idx] * r ** 2 +\n kappa_2[Z_idx] * r ** (5 / 3) +\n kappa_1[Z_idx] * r +\n kappa_0[Z_idx])\n return numerator / denominator\n\n kappa_perp = f_kappa_perp(Z_idx)\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n return Z * kappa_perp\n\n def f_kappa_cross(Z_idx):\n numerator = r * (5 / 2 * r + k_0[Z_idx] / k_5[Z_idx])\n denominator = (r ** 3 +\n k_4[Z_idx] * r ** (7 / 3) +\n k_3[Z_idx] * r ** 2 +\n k_2[Z_idx] * r ** (5 / 3) +\n k_1[Z_idx] * r +\n k_0[Z_idx])\n return numerator / denominator\n\n kappa_cross = f_kappa_cross(Z_idx)\n if field_orientation == 'cross':\n return Z * kappa_cross\n\n if field_orientation == 'all':\n return np.array((Z * kappa_par, Z * kappa_perp, Z * kappa_cross))\n\n\ndef _nondim_resist_ji_held(hall, Z, field_orientation):\n \"\"\"Dimensionless resistivity - Ji-Held.\n\n Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4 (2013):\n 042114.\n \"\"\"\n allowed_Z = [1, 2, 'arbitrary']\n Z_idx = _check_Z(allowed_Z, Z)\n # fixing overflow errors when exponentiating r by making a float\n # instead of an int\n r = float(np.abs(Z * hall))\n\n def f_alpha_par_e(Z):\n numerator = Z ** (2 / 3)\n denominator = 1.46 * Z ** (2 / 3) - 0.330 * Z ** (1 / 3) + 0.888\n return 1 - numerator / denominator\n\n def f_alpha_0(Z):\n return 0.623 * Z ** (5 / 3) - 2.61 * Z ** (4 / 3) + 3.56 * Z + 0.557\n\n def f_alpha_1(Z):\n return 2.24 * Z ** (2 / 3) - 1.11 * Z ** (1 / 3) + 1.84\n\n def f_alpha_2(Z):\n return -0.0983 * Z ** (1 / 3) + 0.0176\n\n def f_a_0(Z):\n return 0.0759 * Z ** (8 / 3) + 0.897 * Z ** 2 + 2.06 * Z + 1.06\n\n def f_a_1(Z):\n return 2.18 * Z ** (5 / 3) + 5.31 * Z + 3.73\n\n def f_a_2(Z):\n return 7.41 * Z + 1.11 * Z ** (2 / 3) - 1.17\n\n def f_a_3(Z):\n return 3.89 * Z ** (2 / 3) - 4.51 * Z ** (1 / 3) + 6.76\n\n def f_a_4(Z):\n return 2.26 * Z ** (1 / 3) + 0.281\n\n def f_a_5(Z):\n return 1.18 * Z ** (5 / 3) - 1.03 * Z ** (4 / 3) + 3.60 * Z + 1.32\n\n alpha_par_e = [0.504, 0.431, f_alpha_par_e(Z)]\n alpha_0 = [2.130, 3.078, f_alpha_0(Z)]\n alpha_1 = [2.970, 3.997, f_alpha_1(Z)]\n alpha_2 = [-0.081, -0.106, f_alpha_2(Z)]\n a_0 = [4.093, 9.250, f_a_0(Z)]\n a_1 = [11.22, 21.27, f_a_1(Z)]\n a_2 = [7.350, 15.41, f_a_2(Z)]\n a_3 = [6.140, 7.253, f_a_3(Z)]\n a_4 = [2.541, 3.128, f_a_4(Z)]\n a_5 = [5.070, 9.671, f_a_5(Z)]\n\n alpha_par = alpha_par_e[Z_idx]\n if field_orientation == 'parallel' or field_orientation == 'par':\n return alpha_par\n\n def f_alpha_perp(Z_idx):\n numerator = (1.46 * Z ** (2 / 3) * r + alpha_0[Z_idx] * (1 - alpha_par_e[Z_idx]))\n denominator = (r ** (5 / 3) +\n alpha_2[Z_idx] * r ** (4 / 3) +\n alpha_1[Z_idx] * r +\n alpha_0[Z_idx])\n return 1 - numerator / denominator\n\n alpha_perp = f_alpha_perp(Z_idx)\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n return alpha_perp\n\n def f_alpha_cross(Z_idx):\n numerator = Z ** (2 / 3) * r * (2.53 * r + a_0[Z_idx] / a_5[Z_idx])\n denominator = (r ** (8 / 3) +\n a_4[Z_idx] * r ** (7 / 3) +\n a_3[Z_idx] * r ** 2 +\n a_2[Z_idx] * r ** (5 / 3) +\n a_1[Z_idx] * r +\n a_0[Z_idx])\n return numerator / denominator\n\n alpha_cross = f_alpha_cross(Z_idx)\n if field_orientation == 'cross':\n return alpha_cross\n\n if field_orientation == 'all':\n return np.array((alpha_par, alpha_perp, alpha_cross))\n\n\ndef _nondim_tec_ji_held(hall, Z, field_orientation):\n \"\"\"Dimensionless thermoelectric conductivity - Ji-Held.\n\n Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4 (2013):\n 042114.\n \"\"\"\n allowed_Z = [1, 2, 'arbitrary']\n Z_idx = _check_Z(allowed_Z, Z)\n # fixing overflow errors when exponentiating r by making a float\n # instead of an int\n r = float(np.abs(Z * hall))\n\n def f_beta_par_e(Z):\n numerator = Z ** (5 / 3)\n denominator = 0.693 * Z ** (5 / 3) - 0.279 * Z ** (4 / 3) + Z + 0.01\n return numerator / denominator\n\n def f_beta_0(Z):\n return 0.156 * Z ** (8 / 3) + 0.994 * Z ** 2 + 3.21 * Z - 0.84\n\n def f_beta_1(Z):\n return 3.69 * Z ** (5 / 3) + 3.77 * Z + 0.77\n\n def f_beta_2(Z):\n return 9.43 * Z + 4.22 * Z ** (2 / 3) - 12.9 * Z ** (1 / 3) + 4.56\n\n def f_beta_3(Z):\n return 2.70 * Z ** (2 / 3) + 1.46 * Z ** (1 / 3) - 0.17\n\n def f_beta_4(Z):\n return 2.58 * Z ** (1 / 3) + 0.17\n\n def f_b_0(Z):\n numerator = 6.87 * Z ** 3 + 78.2 * Z ** 2 + 623 * Z + 366\n denominator = 1000\n return numerator / denominator\n\n def f_b_1(Z):\n return 0.134 * Z ** 2 + 0.977 * Z + 0.17\n\n def f_b_2(Z):\n return (0.689 * Z ** (4 / 3) - 0.377 * Z ** (2 / 3) +\n 3.94 * Z ** (1 / 3) + 0.644)\n\n def f_b_3(Z):\n return -0.109 * Z + 1.33 * Z ** (2 / 3) - 3.80 * Z ** (1 / 3) + 0.289\n\n def f_b_4(Z):\n return 2.46 * Z ** (2 / 3) + 0.522\n\n def f_b_5(Z):\n return 0.102 * Z ** 2 + 0.746 * Z + 0.072 * Z ** (1 / 3) + 0.211\n\n beta_par_e = [0.702, 0.905, f_beta_par_e(Z)]\n beta_0 = [3.520, 10.55, f_beta_0(Z)]\n beta_1 = [8.230, 20.03, f_beta_1(Z)]\n beta_2 = [5.310, 13.87, f_beta_2(Z)]\n beta_3 = [3.990, 5.955, f_beta_3(Z)]\n beta_4 = [2.750, 3.421, f_beta_4(Z)]\n b_0 = [1.074, 1.980, f_b_0(Z)]\n b_1 = [1.281, 2.660, f_b_1(Z)]\n b_2 = [4.896, 6.746, f_b_2(Z)]\n b_3 = [-2.290, -2.605, f_b_3(Z)]\n b_4 = [2.982, 4.427, f_b_4(Z)]\n b_5 = [1.131, 2.202, f_b_5(Z)]\n\n beta_par = beta_par_e[Z_idx]\n if field_orientation == 'parallel' or field_orientation == 'par':\n return beta_par\n\n def f_beta_perp(Z_idx):\n numerator = 6.33 * Z ** (5 / 3) * r + beta_0[Z_idx] * beta_par_e[Z_idx]\n denominator = (r ** (8 / 3) +\n beta_4[Z_idx] * r ** (7 / 3) +\n beta_3[Z_idx] * r ** 2 +\n beta_2[Z_idx] * r ** (5 / 3) +\n beta_1[Z_idx] * r +\n beta_0[Z_idx])\n return numerator / denominator\n\n beta_perp = f_beta_perp(Z_idx)\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n return beta_perp\n\n def f_beta_cross(Z_idx):\n numerator = Z * r * (3 / 2 * r + b_0[Z_idx] / b_5[Z_idx])\n denominator = (r ** 3 +\n b_4[Z_idx] * r ** (7 / 3) +\n b_3[Z_idx] * r ** 2 +\n b_2[Z_idx] * r ** (5 / 3) +\n b_1[Z_idx] * r +\n b_0[Z_idx])\n return numerator / denominator\n\n beta_cross = f_beta_cross(Z_idx)\n if field_orientation == 'cross':\n return beta_cross\n\n if field_orientation == 'all':\n return np.array((beta_par, beta_perp, beta_cross))\n\n\ndef _nondim_visc_e_ji_held(hall, Z):\n \"\"\"Dimensionless electron viscosity - Ji-Held.\n\n Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4 (2013):\n 042114.\n \"\"\"\n allowed_Z = [1, 2, 'arbitrary']\n Z_idx = _check_Z(allowed_Z, Z)\n # fixing overflow errors when exponentiating r by making a float\n # instead of an int\n r = float(np.abs(Z * hall))\n\n def f_eta_0_e(Z):\n return 1 / (0.55 * Z + 0.083 * Z ** (1 / 3) + 0.732)\n\n def f_hprime_0(Z):\n return 0.0699 * Z ** 3 + 0.558 * Z ** 2 + 1.66 * Z + 1.06\n\n def f_hprime_1(Z):\n return 0.657 * Z ** 2 + 1.42 * Z + 0.416\n\n def f_hprime_2(Z):\n return -0.369 * Z ** (4 / 3) + 0.379 * Z + 0.339 * Z ** (1 / 3) + 2.17\n\n def f_hprime_3(Z):\n return 2.16 * Z - 0.657 * Z ** (1 / 3) + 0.0347\n\n def f_hprime_4(Z):\n return -0.0703 * Z ** (2 / 3) - 0.224 * Z ** (1 / 3) + 0.333\n\n def f_h_0(Z):\n return 0.0473 * Z ** 3 + 0.323 * Z ** 2 + 0.951 * Z + 0.407\n\n def f_h_1(Z):\n return 0.171 * Z ** 2 + 0.523 * Z + 0.336\n\n def f_h_2(Z):\n return 0.362 * Z ** (4 / 3) + 0.178 * Z + 1.06 * Z ** (1 / 3) + 1.26\n\n def f_h_3(Z):\n return 0.599 * Z + 0.106 * Z ** (2 / 3) - 0.444 * Z ** (1 / 3) - 0.161\n\n def f_h_4(Z):\n return -0.16 * Z ** (2 / 3) + 0.06 * Z ** (1 / 3) + 0.232\n\n def f_h_5(Z):\n return 0.183 * Z ** 2 + 0.714 * Z + 0.0375 * Z ** (1 / 3) + 0.47\n\n eta_0_e = [0.733, 0.516, f_eta_0_e(Z)]\n hprime_0 = [3.348, 7.171, f_hprime_0(Z)]\n hprime_1 = [2.493, 5.884, f_hprime_1(Z)]\n hprime_2 = [2.519, 2.425, f_hprime_2(Z)]\n hprime_3 = [1.538, 3.527, f_hprime_3(Z)]\n hprime_4 = [0.039, -0.061, f_hprime_4(Z)]\n h_0 = [1.728, 3.979, f_h_0(Z)]\n h_1 = [1.030, 2.066, f_h_1(Z)]\n h_2 = [2.860, 3.864, f_h_2(Z)]\n h_3 = [0.100, 0.646, f_h_3(Z)]\n h_4 = [0.132, 0.054, f_h_4(Z)]\n h_5 = [1.405, 2.677, f_h_5(Z)]\n\n eta_0 = eta_0_e[Z_idx]\n\n def f_eta_2(Z_idx, r):\n numerator = ((6 / 5 * Z + 3 / 5 * np.sqrt(2)) * r +\n hprime_0[Z_idx] * eta_0_e[Z_idx])\n denominator = (r ** 3 +\n hprime_4[Z_idx] * r ** (7 / 3) +\n hprime_3[Z_idx] * r ** 2 +\n hprime_2[Z_idx] * r ** (5 / 3) +\n hprime_1[Z_idx] * r +\n hprime_0[Z_idx])\n return numerator / denominator\n\n eta_2 = f_eta_2(Z_idx, r)\n\n eta_1 = f_eta_2(Z_idx, 2 * r)\n\n def f_eta_4(Z_idx, r):\n numerator = r * (r + h_0[Z_idx] / h_5[Z_idx])\n denominator = (r ** 3 +\n h_4[Z_idx] * r ** (7 / 3) +\n h_3[Z_idx] * r ** 2 +\n h_2[Z_idx] * r ** (5 / 3) +\n h_1[Z_idx] * r +\n h_0[Z_idx])\n return numerator / denominator\n\n eta_4 = f_eta_4(Z_idx, r)\n\n eta_3 = f_eta_4(Z_idx, 2 * r)\n\n return np.array((eta_0, eta_1, eta_2, eta_3, eta_4))\n\n\ndef _nondim_tc_i_ji_held(hall, Z, mu, theta, field_orientation, K=3):\n \"\"\"Dimensionless ion thermal conductivity - Ji-Held.\n\n Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4 (2013):\n 042114.\n \"\"\"\n # mu = m_e / m_i\n # theta = T_e / T_i\n zeta = 1 / Z * np.sqrt(mu / theta)\n r = np.abs(hall / np.sqrt(2))\n\n # K = 2 # 2x2 moments, equivalent to original Braginskii\n # K = 3 # 3x3 moments\n\n if K == 3:\n Delta_par_i1 = 1 + 26.90 * zeta + 187.5 * zeta ** 2 + 346.9 * zeta ** 3\n kappa_par_i = (5.586 + 101.7 * zeta + 289.1 * zeta ** 2) / Delta_par_i1\n elif K == 2:\n Delta_par_i1 = 1 + 13.50 * zeta + 36.46 * zeta ** 2\n kappa_par_i = (5.524 + 30.38 * zeta) / Delta_par_i1\n if field_orientation == 'parallel' or field_orientation == 'par':\n return kappa_par_i / np.sqrt(2)\n\n if K == 3:\n Delta_perp_i1 = (r ** 6 +\n (3.635 + 29.15 * zeta + 83 * zeta ** 2) * r ** 4 +\n (1.395 + 35.64 * zeta + 344.9 * zeta ** 2 +\n 1345 * zeta ** 3 + 1891 * zeta ** 4) * r ** 2 +\n 0.09163 * Delta_par_i1 ** 2)\n kappa_perp_i = ((np.sqrt(2) + 15 / 2 * zeta) * r ** 4 +\n (3.841 + 57.59 * zeta + 297.8 * zeta ** 2 +\n 555 * zeta ** 3) * r ** 2 +\n 0.09163 * kappa_par_i * Delta_par_i1 ** 2\n ) / Delta_perp_i1\n elif K == 2:\n Delta_perp_i1 = (r ** 4 +\n (1.352 + 12.49 * zeta + 34 * zeta ** 2) * r ** 2 +\n 0.1693 * Delta_par_i1 ** 2)\n kappa_perp_i = ((np.sqrt(2) + 15 / 2 * zeta) * r ** 2 +\n 0.1693 * kappa_par_i * Delta_par_i1 ** 2\n ) / Delta_perp_i1\n if field_orientation == 'perpendicular' or field_orientation == 'perp':\n return kappa_perp_i / np.sqrt(2)\n\n if K == 3:\n kappa_cross_i = (r * (5 / 2 * r ** 4 +\n (7.963 + 64.40 * zeta +\n 185 * zeta ** 2) * r ** 2 +\n 1.344 + 44.54 * zeta + 511.9 * zeta ** 2 +\n 2155 * zeta ** 3 + 3063 * zeta ** 4\n ) / Delta_perp_i1)\n elif K == 2:\n kappa_cross_i = r * (5 / 2 * r ** 2 +\n 2.323 + 22.73 * zeta + 62.5 * zeta ** 2\n ) / Delta_perp_i1\n if field_orientation == 'cross':\n return kappa_cross_i / np.sqrt(2)\n\n if field_orientation == 'all':\n return np.array((kappa_par_i / np.sqrt(2),\n kappa_perp_i / np.sqrt(2),\n kappa_cross_i / np.sqrt(2)))\n\n\ndef _nondim_visc_i_ji_held(hall, Z, mu, theta, K=3):\n \"\"\"Dimensionless ion viscosity - Ji-Held.\n\n Ji, Jeong-Young, and Eric D. Held. \"Closure and transport theory for\n high-collisionality electron-ion plasmas.\" Physics of Plasmas 20.4 (2013):\n 042114.\n \"\"\"\n zeta = 1 / Z * np.sqrt(mu / theta)\n r = np.abs(hall / np.sqrt(2))\n r13 = 2 * r\n\n # K = 2 # 2x2 moments, equivalent to original Braginskii\n # K = 3 # 3x3 moments\n\n if K == 3:\n Delta_par_i2 = 1 + 15.79 * zeta + 63.92 * zeta ** 2 + 71.69 * zeta ** 3\n eta_0_i = (1.365 + 16.75 * zeta + 35.84 * zeta ** 2) / Delta_par_i2\n\n def Delta_perp_i2(r, zeta, Delta_par_i2):\n Delta_perp_i2 = (r ** 6 +\n (4.391 + 26.69 * zeta + 56 * zeta ** 2) * r ** 4 +\n (3.191 + 49.62 * zeta + 306.4 * zeta ** 2 +\n 808.1 * zeta ** 3 + 784 * zeta ** 4) * r ** 2 +\n 0.4483 * Delta_par_i2 ** 2)\n return Delta_perp_i2\n\n Delta_perp_i2_24 = Delta_perp_i2(r, zeta, Delta_par_i2)\n Delta_perp_i2_13 = Delta_perp_i2(r13, zeta, Delta_par_i2)\n\n def f_eta_2(r, zeta, Delta_perp_i2):\n eta_2_i = (((3 / 5 * np.sqrt(2) + 2 * zeta) * r ** 4 +\n (2.680 + 25.98 * zeta + 90.71 * zeta ** 2 +\n 104 * zeta ** 3) * r ** 2 +\n 0.4483 * eta_0_i * Delta_par_i2 ** 2\n ) / Delta_perp_i2)\n return eta_2_i\n\n eta_2_i = f_eta_2(r, zeta, Delta_perp_i2_24)\n eta_1_i = f_eta_2(r13, zeta, Delta_perp_i2_13)\n\n def f_eta_4(r, zeta, Delta_perp_i2):\n eta_4_i = r * (r ** 4 +\n (3.535 + 23.30 * zeta + 52 * zeta ** 2) * r ** 2 +\n 0.9538 + 21.81 * zeta + 174.2 * zeta ** 2 +\n 538.4 * zeta ** 3 + 576 * zeta ** 4\n ) / Delta_perp_i2\n return eta_4_i\n\n eta_4_i = f_eta_4(r, zeta, Delta_perp_i2_24)\n eta_3_i = f_eta_4(r13, zeta, Delta_perp_i2_13)\n\n elif K == 2:\n Delta_par_i2 = 1 + 7.164 * zeta + 10.49 * zeta ** 2\n eta_0_i = (1.357 + 5.243 * zeta) / Delta_par_i2\n\n def Delta_perp_i2(r, zeta, Delta_par_i2):\n Delta_perp_i2 = (r ** 4 +\n (2.023 + 11.68 * zeta + 20 * zeta ** 2) * r ** 2 +\n 0.5820 * Delta_par_i2 ** 2)\n return Delta_perp_i2\n\n Delta_perp_i2_24 = Delta_perp_i2(r, zeta, Delta_par_i2)\n Delta_perp_i2_13 = Delta_perp_i2(r13, zeta, Delta_par_i2)\n\n def f_eta_2(r, zeta, Delta_perp_i2):\n eta_2_i = ((3 / 5 * np.sqrt(2) + 2 * zeta) * r ** 2 +\n 0.5820 * eta_0_i * Delta_par_i2 ** 2\n ) / Delta_perp_i2\n return eta_2_i\n\n eta_2_i = f_eta_2(r, zeta, Delta_perp_i2_24)\n eta_1_i = f_eta_2(r13, zeta, Delta_perp_i2_13)\n\n def f_eta_4(r, zeta, Delta_perp_i2):\n Delta_perp_i2 = (r ** 4 +\n (2.023 + 11.68 * zeta + 20 * zeta ** 2) * r ** 2 +\n 0.5820 * Delta_par_i2 ** 2)\n eta_4_i = r * (r ** 2 +\n 1.188 + 8.283 * zeta + 16 * zeta ** 2\n ) / Delta_perp_i2\n return eta_4_i\n\n eta_4_i = f_eta_4(r, zeta, Delta_perp_i2_24)\n eta_3_i = f_eta_4(r13, zeta, Delta_perp_i2_13)\n\n return np.array((eta_0_i / np.sqrt(2), eta_1_i / np.sqrt(2),\n eta_2_i / np.sqrt(2), eta_3_i / np.sqrt(2),\n eta_4_i / np.sqrt(2)))\n"
] |
[
[
"numpy.sqrt",
"numpy.abs",
"numpy.isnan",
"numpy.array",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deviant-syndrome/spear2sc
|
[
"0dee0cc8c4b0b6c45e2f6af103e2c9bb8acf8d7c"
] |
[
"spear2sc/analysis.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport plotille\n\nfrom .spear_utils import index_time, index_amp\n\n\"\"\"spear2sc.analysis A set of methods to perform basic analysis of the partials\"\"\"\n\n\ndef get_durations_thresh(partials):\n \"\"\"Gets the 92th percentile of partial durations\n\n The concrete percentile threshold is sort of an empirical value\n\n :param partials: list of sound partials\n :type partials: list\n :return: 92th percentile of partials durations\n :rtype: float\n \"\"\"\n durations = list(map(lambda p: get_total_duration(p), partials))\n return np.percentile(durations, 92)\n\n\ndef get_amp_thresh(partials, est_dur_thresh):\n \"\"\"Get the 30th percentile of partial's levels\n\n Only those partials which are longer than est_dur_thresh are counted.\n The concrete percentile threshold is sort of an empirical value\n\n :param partials: list of sound partials\n :type partials: list\n :param est_dur_thresh: duration threshold, seconds\n :type est_dur_thresh: float\n :return: 30th percentile of partial's levels\n :rtype: float\n \"\"\"\n return np.percentile(list(map(lambda p: get_amp_mean(p), filter(lambda p: get_total_duration(p) > est_dur_thresh, partials))), 30)\n\n\ndef get_amp_mean(partial):\n \"\"\"Gets the median (50th percentile) of partial's levels\n\n :param partial: sound partial (list of [time, freq, amp] points)\n :type partial: list\n :return: median of of partial's levels\n :rtype: float\n \"\"\"\n return np.percentile(list(map(lambda p: p[index_amp], partial)), 50)\n\n\ndef get_total_duration(partial):\n \"\"\"Gets the total duration of a partial in seconds\n :param partial: sound partial (list of [time, freq, amp] points)\n :type partial: list\n :return: Duration of a partials in seconds\n :rtype: float\n \"\"\"\n return partial[len(partial) - 1][index_time] - partial[0][index_time]\n\n\ndef get_amp_envelope(partial):\n \"\"\"Retrieves particle's level envelope over time\n\n :param partial: sound partial (list of [time, freq, amp] points)\n :type partial: list\n :return: Tuple of timestamps and levels for plotting\n :rtype tuple(list, list)\n \"\"\"\n return list(map(lambda p: p[index_time], partial)), list(map(lambda p: p[index_amp], partial))\n\n\ndef print_analysis(partials, options):\n \"\"\"Prints partials analysis results to stdout\n\n This analysis includes: estimating the number of partials, possible estimating\n durations and level thresholds, if they are not specified in options.\n If graphics is True in options, also prints the graphs\n\n :param partials: list of sound partials\n :type partials: list\n :param options: Analysis options (est_duration_thresh, est_level_thresh, graphics)\n :type options: tuple(float, float, boolean)\n :return:\n \"\"\"\n est_duration_thresh, est_level_thresh, graphics = options\n if est_duration_thresh is None:\n est_duration_thresh = get_durations_thresh(partials)\n if est_level_thresh == 0.0:\n est_level_thresh = get_amp_thresh(partials, est_duration_thresh)\n\n print(\"92th percentile of durations is: {:10.4f}\".format(est_duration_thresh))\n print(\"30th percentile of levels is: {:10.4f}\".format(est_level_thresh))\n est_num_partials = 0\n\n fig = plotille.Figure()\n fig.color_mode = 'byte'\n fig.width = 120\n fig.height = 30\n\n partials_total = 0\n for partial in partials:\n partials_total = partials_total + 1\n if get_total_duration(partial) > est_duration_thresh and get_amp_mean(partial) > est_level_thresh:\n est_num_partials = est_num_partials + 1\n x, y = get_amp_envelope(partial)\n fig.plot(x, y)\n if graphics:\n print(fig.show())\n print(\"Total number of partials: {}\".format(partials_total))\n print(\"Estimated number of representative partials: {} ({:6.2f}%)\"\n .format(est_num_partials, est_num_partials / (partials_total + 0.001) * 100))"
] |
[
[
"numpy.percentile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OpenBanboo/AgileNet
|
[
"d75baa20b9f762cb56b249dca272150de9ae8def",
"d75baa20b9f762cb56b249dca272150de9ae8def"
] |
[
"miniImagenet/data/omniglot.py",
"miniImagenet/FCDec.py"
] |
[
"from __future__ import print_function\nimport torch.utils.data as data\nimport os\nimport os.path\nimport numpy as np\nfrom PIL import Image as pil_image\nimport pickle\nimport random\nfrom . import parser\n\n\nclass Omniglot(data.Dataset):\n def __init__(self, root, dataset='omniglot'):\n self.root = root\n self.seed = 10\n self.dataset = dataset\n if not self._check_exists_():\n self._init_folders_()\n if self.check_decompress():\n self._decompress_()\n self._preprocess_()\n\n def _init_folders_(self):\n decompress = False\n if not os.path.exists(self.root):\n os.makedirs(self.root)\n if not os.path.exists(os.path.join(self.root, 'omniglot')):\n os.makedirs(os.path.join(self.root, 'omniglot'))\n decompress = True\n if not os.path.exists(os.path.join(self.root, 'omniglot', 'train')):\n os.makedirs(os.path.join(self.root, 'omniglot', 'train'))\n decompress = True\n if not os.path.exists(os.path.join(self.root, 'omniglot', 'test')):\n os.makedirs(os.path.join(self.root, 'omniglot', 'test'))\n decompress = True\n if not os.path.exists(os.path.join(self.root, 'compacted_datasets')):\n os.makedirs(os.path.join(self.root, 'compacted_datasets'))\n decompress = True\n return decompress\n\n def check_decompress(self):\n return os.listdir('%s/omniglot/test' % self.root) == []\n\n def _decompress_(self):\n print(\"\\nDecompressing Images...\")\n comp_files = ['%s/compressed/omniglot/images_background.zip' % self.root,\n '%s/compressed/omniglot/images_evaluation.zip' % self.root]\n if os.path.isfile(comp_files[0]) and os.path.isfile(comp_files[1]):\n os.system(('unzip %s -d ' % comp_files[0]) +\n os.path.join(self.root, 'omniglot', 'train'))\n os.system(('unzip %s -d ' % comp_files[1]) +\n os.path.join(self.root, 'omniglot', 'test'))\n else:\n raise Exception('Missing %s or %s' % (comp_files[0], comp_files[1]))\n print(\"Decompressed\")\n\n def _check_exists_(self):\n return os.path.exists(os.path.join(self.root, 'compacted_datasets', 'omniglot_train.pickle')) and \\\n os.path.exists(os.path.join(self.root, 'compacted_datasets', 'omniglot_test.pickle'))\n\n def _preprocess_(self):\n print('\\nPreprocessing Omniglot images...')\n (class_names_train, images_path_train) = parser.get_image_paths(os.path.join(self.root, 'omniglot', 'train'))\n (class_names_test, images_path_test) = parser.get_image_paths(os.path.join(self.root, 'omniglot', 'test'))\n\n keys_all = sorted(list(set(class_names_train + class_names_test)))\n label_encoder = {}\n label_decoder = {}\n for i in range(len(keys_all)):\n label_encoder[keys_all[i]] = i\n label_decoder[i] = keys_all[i]\n\n all_set = {}\n for class_, path in zip(class_names_train + class_names_test, images_path_train + images_path_test):\n img = np.array(pil_image.open(path), dtype='float32')\n if label_encoder[class_] not in all_set:\n all_set[label_encoder[class_]] = []\n all_set[label_encoder[class_]].append(img)\n\n # Now we save the 1200 training - 423 testing partition\n keys = sorted(list(all_set.keys()))\n random.seed(self.seed)\n random.shuffle(keys)\n\n train_set = {}\n test_set = {}\n for i in range(1200):\n train_set[keys[i]] = all_set[keys[i]]\n for i in range(1200, len(keys)):\n test_set[keys[i]] = all_set[keys[i]]\n\n self.sanity_check(all_set)\n\n with open(os.path.join(self.root, 'compacted_datasets', 'omniglot_train.pickle'), 'wb') as handle:\n pickle.dump(train_set, handle, protocol=2)\n with open(os.path.join(self.root, 'compacted_datasets', 'omniglot_test.pickle'), 'wb') as handle:\n pickle.dump(test_set, handle, protocol=2)\n\n with open(os.path.join(self.root, 'compacted_datasets', 'omniglot_label_encoder.pickle'), 'wb') as handle:\n pickle.dump(label_encoder, handle, protocol=2)\n with open(os.path.join(self.root, 'compacted_datasets', 'omniglot_label_decoder.pickle'), 'wb') as handle:\n pickle.dump(label_decoder, handle, protocol=2)\n\n print('Images preprocessed')\n\n def sanity_check(self, all_set):\n all_good = True\n for class_ in all_set:\n if len(all_set[class_]) != 20:\n all_good = False\n if all_good:\n print(\"All classes have 20 samples\")\n\n def load_dataset(self, train, size):\n print(\"Loading dataset\")\n if train:\n with open(os.path.join(self.root, 'compacted_datasets', 'omniglot_train.pickle'), 'rb') as handle:\n data = pickle.load(handle)\n else:\n with open(os.path.join(self.root, 'compacted_datasets', 'omniglot_test.pickle'), 'rb') as handle:\n data = pickle.load(handle)\n print(\"Num classes before rotations: \"+str(len(data)))\n\n data_rot = {}\n # resize images and normalize\n for class_ in data:\n for rot in range(4):\n data_rot[class_ * 4 + rot] = []\n for i in range(len(data[class_])):\n image2resize = pil_image.fromarray(np.uint8(data[class_][i]*255))\n image_resized = image2resize.resize((size[1], size[0]))\n image_resized = np.array(image_resized, dtype='float32')/127.5 - 1\n image = self.rotate_image(image_resized, rot)\n image = np.expand_dims(image, axis=0)\n data_rot[class_ * 4 + rot].append(image)\n\n print(\"Dataset Loaded\")\n print(\"Num classes after rotations: \"+str(len(data_rot)))\n self.sanity_check(data_rot)\n return data_rot\n\n def rotate_image(self, image, times):\n rotated_image = np.zeros(image.shape)\n for channel in range(image.shape[0]):\n rotated_image[:, :] = np.rot90(image[:, :], k=times)\n return rotated_image\n",
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport create_dic_fuc\nfrom LinearFunction import LinearFunction\n\nclass FCDec(nn.Module):\n def __init__(self, dictionary, input_features, output_features, is_dic_grad=False):\n super(FCDec, self).__init__()\n self.is_dic_grad = is_dic_grad\n print(self.is_dic_grad)\n self.dictionary = nn.Parameter(dictionary, requires_grad=self.is_dic_grad)\n\n # Not a very smart way to initialize\n #self.weight.data.uniform_(-0.1, 0.1)\n #if bias is not None:\n #self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n # See the autograd section for explanation of what happens here.\n return LinearFunction.apply(input, self.dictionary)\n"
] |
[
[
"numpy.rot90",
"numpy.expand_dims",
"numpy.uint8",
"numpy.array",
"numpy.zeros"
],
[
"torch.nn.Parameter"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
biplab37/qiskit-aakash
|
[
"e10b204887606f1f75bdfde182bb0c6d0a322c68",
"e10b204887606f1f75bdfde182bb0c6d0a322c68",
"e10b204887606f1f75bdfde182bb0c6d0a322c68",
"e10b204887606f1f75bdfde182bb0c6d0a322c68"
] |
[
"test/python/pulse/test_commands.py",
"qiskit/transpiler/passes/mapping/stochastic_swap.py",
"qiskit/transpiler/passes/optimize_1q_gates.py",
"test/python/visualization/test_circuit_text_drawer.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name,unexpected-keyword-arg\n\n\"\"\"Test cases for the pulse command group.\"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom qiskit.pulse import (Acquire, FrameChange, PersistentValue,\n Snapshot, Kernel, Discriminator, functional_pulse)\nfrom qiskit.test import QiskitTestCase\n\n\nclass TestAcquire(QiskitTestCase):\n \"\"\"Acquisition tests.\"\"\"\n\n def test_can_construct_valid_acquire_command(self):\n \"\"\"Test if valid acquire command can be constructed.\n \"\"\"\n kernel_opts = {\n 'start_window': 0,\n 'stop_window': 10\n }\n kernel = Kernel(name='boxcar', **kernel_opts)\n\n discriminator_opts = {\n 'neighborhoods': [{'qubits': 1, 'channels': 1}],\n 'cal': 'coloring',\n 'resample': False\n }\n discriminator = Discriminator(name='linear_discriminator', **discriminator_opts)\n\n acq_command = Acquire(duration=10, kernel=kernel, discriminator=discriminator)\n\n self.assertEqual(acq_command.duration, 10)\n self.assertEqual(acq_command.discriminator.name, 'linear_discriminator')\n self.assertEqual(acq_command.discriminator.params, discriminator_opts)\n self.assertEqual(acq_command.kernel.name, 'boxcar')\n self.assertEqual(acq_command.kernel.params, kernel_opts)\n\n def test_can_construct_acquire_command_with_default_values(self):\n \"\"\"Test if an acquire command can be constructed with default discriminator and kernel.\n \"\"\"\n acq_command = Acquire(duration=10)\n\n self.assertEqual(acq_command.duration, 10)\n self.assertEqual(acq_command.discriminator, None)\n self.assertEqual(acq_command.kernel, None)\n\n\nclass TestFrameChange(QiskitTestCase):\n \"\"\"FrameChange tests.\"\"\"\n\n def test_default(self):\n \"\"\"Test default frame change.\n \"\"\"\n fc_command = FrameChange(phase=1.57)\n\n self.assertEqual(fc_command.phase, 1.57)\n self.assertEqual(fc_command.duration, 0)\n\n\nclass TestFunctionalPulse(QiskitTestCase):\n \"\"\"SamplePulse tests.\"\"\"\n\n def test_gaussian(self):\n \"\"\"Test gaussian pulse.\n \"\"\"\n\n @functional_pulse\n def gaussian(duration, amp, t0, sig):\n x = np.linspace(0, duration - 1, duration)\n return amp * np.exp(-(x - t0) ** 2 / sig ** 2)\n\n pulse_command = gaussian(duration=10, name='test_pulse', amp=1, t0=5, sig=1)\n _y = 1 * np.exp(-(np.linspace(0, 9, 10) - 5)**2 / 1**2)\n\n self.assertListEqual(list(pulse_command.samples), list(_y))\n\n # check name\n self.assertEqual(pulse_command.name, 'test_pulse')\n\n # check duration\n self.assertEqual(pulse_command.duration, 10)\n\n\nclass TestPersistentValue(QiskitTestCase):\n \"\"\"PersistentValue tests.\"\"\"\n\n def test_default(self):\n \"\"\"Test default persistent value.\n \"\"\"\n pv_command = PersistentValue(value=0.5 - 0.5j)\n\n self.assertEqual(pv_command.value, 0.5-0.5j)\n self.assertEqual(pv_command.duration, 0)\n\n\nclass TestSnapshot(QiskitTestCase):\n \"\"\"Snapshot tests.\"\"\"\n\n def test_default(self):\n \"\"\"Test default snapshot.\n \"\"\"\n snap_command = Snapshot(name='test_name', snap_type='state')\n\n self.assertEqual(snap_command.name, \"test_name\")\n self.assertEqual(snap_command.type, \"state\")\n self.assertEqual(snap_command.duration, 0)\n\n\nclass TestKernel(QiskitTestCase):\n \"\"\"Kernel tests.\"\"\"\n\n def test_can_construct_kernel_with_default_values(self):\n \"\"\"Test if Kernel can be constructed with default name and params.\n \"\"\"\n kernel = Kernel()\n\n self.assertEqual(kernel.name, None)\n self.assertEqual(kernel.params, {})\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nA pass implementing the default Qiskit stochastic mapper.\n\"\"\"\n\nfrom logging import getLogger\nfrom pprint import pformat\nfrom math import inf\nimport numpy as np\n\nfrom qiskit.transpiler.basepasses import TransformationPass\nfrom qiskit.transpiler.exceptions import TranspilerError\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard import SwapGate\nfrom qiskit.transpiler.layout import Layout\n# pylint: disable=no-name-in-module\nfrom .cython.stochastic_swap.utils import nlayout_from_layout\n# pylint: disable=no-name-in-module\nfrom .cython.stochastic_swap.swap_trial import swap_trial\nlogger = getLogger(__name__)\n\n\n# Notes:\n# 1. Measurements may occur and be followed by swaps that result in repeated\n# measurement of the same qubit. Near-term experiments cannot implement\n# these circuits, so some care is required when using this mapper\n# with experimental backend targets.\n# 2. We do not use the fact that the input state is zero to simplify\n# the circuit.\n\n\nclass StochasticSwap(TransformationPass):\n \"\"\"\n Maps a DAGCircuit onto a `coupling_map` adding swap gates.\n\n Uses a randomized algorithm.\n \"\"\"\n\n def __init__(self, coupling_map, initial_layout=None,\n trials=20, seed=None):\n \"\"\"\n Map a DAGCircuit onto a `coupling_map` using swap gates.\n\n If initial_layout is not None, we assume the input circuit\n has been layed out before running this pass, and that\n the layout process yields a DAG, coupling map, and layout\n with the following properties:\n\n 1. All three have the same number of qubits\n 2. The layout a bijection from the DAG qubits to the coupling map\n\n For this mapping pass, it may also be necessary that\n\n 3. The coupling map is a connected graph\n\n If these are not satisfied, the behavior is undefined.\n\n Args:\n coupling_map (CouplingMap): Directed graph representing a coupling\n map.\n initial_layout (Layout): initial layout of qubits in mapping\n trials (int): maximum number of iterations to attempt\n seed (int): seed for random number generator\n \"\"\"\n super().__init__()\n self.coupling_map = coupling_map\n self.initial_layout = initial_layout\n self.input_layout = None\n self.trials = trials\n self.seed = seed\n self.qregs = None\n self.rng = None\n\n def run(self, dag):\n \"\"\"\n Run the StochasticSwap pass on `dag`.\n\n Args:\n dag (DAGCircuit): DAG to map.\n\n Returns:\n DAGCircuit: A mapped DAG.\n\n Raises:\n TranspilerError: if the coupling map or the layout are not\n compatible with the DAG\n \"\"\"\n\n if self.initial_layout is None:\n if self.property_set[\"layout\"]:\n self.initial_layout = self.property_set[\"layout\"]\n else:\n self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())\n\n if len(dag.qubits()) != len(self.initial_layout):\n raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n\n if len(self.coupling_map.physical_qubits) != len(self.initial_layout):\n raise TranspilerError(\n \"Mappers require to have the layout to be the same size as the coupling map\")\n\n self.input_layout = self.initial_layout.copy()\n\n self.qregs = dag.qregs\n if self.seed is None:\n self.seed = np.random.randint(0, np.iinfo(np.int32).max)\n self.rng = np.random.RandomState(self.seed)\n logger.debug(\"StochasticSwap RandomState seeded with seed=%s\", self.seed)\n\n new_dag = self._mapper(dag, self.coupling_map, trials=self.trials)\n # self.property_set[\"layout\"] = self.initial_layout\n return new_dag\n\n def _layer_permutation(self, layer_partition, layout, qubit_subset,\n coupling, trials):\n \"\"\"Find a swap circuit that implements a permutation for this layer.\n\n The goal is to swap qubits such that qubits in the same two-qubit gates\n are adjacent.\n\n Based on S. Bravyi's algorithm.\n\n layer_partition (list): The layer_partition is a list of (qu)bit\n lists and each qubit is a tuple (qreg, index).\n layout (Layout): The layout is a Layout object mapping virtual\n qubits in the input circuit to physical qubits in the coupling\n graph. It reflects the current positions of the data.\n qubit_subset (list): The qubit_subset is the set of qubits in\n the coupling graph that we have chosen to map into, as tuples\n (Register, index).\n coupling (CouplingMap): Directed graph representing a coupling map.\n This coupling map should be one that was provided to the\n stochastic mapper.\n trials (int): Number of attempts the randomized algorithm makes.\n\n Returns:\n Tuple: success_flag, best_circuit, best_depth, best_layout, trivial_flag\n\n If success_flag is True, then best_circuit contains a DAGCircuit with\n the swap circuit, best_depth contains the depth of the swap circuit,\n and best_layout contains the new positions of the data qubits after the\n swap circuit has been applied. The trivial_flag is set if the layer\n has no multi-qubit gates.\n\n Raises:\n TranspilerError: if anything went wrong.\n \"\"\"\n return _layer_permutation(layer_partition, self.initial_layout,\n layout, qubit_subset,\n coupling, trials,\n self.qregs, self.rng)\n\n def _layer_update(self, i, first_layer, best_layout, best_depth,\n best_circuit, layer_list):\n \"\"\"Provide a DAGCircuit for a new mapped layer.\n\n i (int) = layer number\n first_layer (bool) = True if this is the first layer in the\n circuit with any multi-qubit gates\n best_layout (Layout) = layout returned from _layer_permutation\n best_depth (int) = depth returned from _layer_permutation\n best_circuit (DAGCircuit) = swap circuit returned\n from _layer_permutation\n layer_list (list) = list of DAGCircuit objects for each layer,\n output of DAGCircuit layers() method\n\n Return a DAGCircuit object to append to the output DAGCircuit\n that the _mapper method is building.\n \"\"\"\n layout = best_layout\n logger.debug(\"layer_update: layout = %s\", pformat(layout))\n logger.debug(\"layer_update: self.initial_layout = %s\", pformat(self.initial_layout))\n dagcircuit_output = DAGCircuit()\n for qubit in layout.get_virtual_bits().keys():\n if qubit.register not in dagcircuit_output.qregs.values():\n dagcircuit_output.add_qreg(qubit.register)\n\n # If this is the first layer with multi-qubit gates,\n # output all layers up to this point and ignore any\n # swap gates. Set the initial layout.\n if first_layer:\n logger.debug(\"layer_update: first multi-qubit gate layer\")\n # Output all layers up to this point\n for j in range(i + 1):\n # Make qubit edge map and extend by classical bits\n edge_map = layout.combine_into_edge_map(self.initial_layout)\n for bit in dagcircuit_output.clbits():\n edge_map[bit] = bit\n dagcircuit_output.compose_back(layer_list[j][\"graph\"], edge_map)\n # Otherwise, we output the current layer and the associated swap gates.\n else:\n # Output any swaps\n if best_depth > 0:\n logger.debug(\"layer_update: there are swaps in this layer, \"\n \"depth %d\", best_depth)\n dagcircuit_output.extend_back(best_circuit)\n else:\n logger.debug(\"layer_update: there are no swaps in this layer\")\n # Make qubit edge map and extend by classical bits\n edge_map = layout.combine_into_edge_map(self.initial_layout)\n for bit in dagcircuit_output.clbits():\n edge_map[bit] = bit\n # Output this layer\n dagcircuit_output.compose_back(layer_list[i][\"graph\"], edge_map)\n\n return dagcircuit_output\n\n def _mapper(self, circuit_graph, coupling_graph,\n trials=20):\n \"\"\"Map a DAGCircuit onto a CouplingMap using swap gates.\n\n Use self.initial_layout for the initial layout.\n\n Args:\n circuit_graph (DAGCircuit): input DAG circuit\n coupling_graph (CouplingMap): coupling graph to map onto\n trials (int): number of trials.\n\n Returns:\n DAGCircuit: object containing a circuit equivalent to\n circuit_graph that respects couplings in coupling_graph\n Layout: a layout object mapping qubits of circuit_graph into\n qubits of coupling_graph. The layout may differ from the\n initial_layout if the first layer of gates cannot be\n executed on the initial_layout, since in this case\n it is more efficient to modify the layout instead of swapping\n Dict: a final-layer qubit permutation\n\n Raises:\n TranspilerError: if there was any error during the mapping\n or with the parameters.\n \"\"\"\n # Schedule the input circuit by calling layers()\n layerlist = list(circuit_graph.layers())\n logger.debug(\"schedule:\")\n for i, v in enumerate(layerlist):\n logger.debug(\" %d: %s\", i, v[\"partition\"])\n\n if self.initial_layout is not None:\n qubit_subset = self.initial_layout.get_virtual_bits().keys()\n else:\n # Supply a default layout for this dag\n self.initial_layout = Layout()\n physical_qubit = 0\n for qreg in circuit_graph.qregs.values():\n for index in range(qreg.size):\n self.initial_layout[(qreg, index)] = physical_qubit\n physical_qubit += 1\n qubit_subset = self.initial_layout.get_virtual_bits().keys()\n # Restrict the coupling map to the image of the layout\n coupling_graph = coupling_graph.subgraph(\n self.initial_layout.get_physical_bits().keys())\n if coupling_graph.size() < len(self.initial_layout):\n raise TranspilerError(\"Coupling map too small for default layout\")\n self.input_layout = self.initial_layout.copy()\n\n # Find swap circuit to preceed to each layer of input circuit\n layout = self.initial_layout.copy()\n\n # Construct an empty DAGCircuit with the same set of\n # qregs and cregs as the input circuit\n dagcircuit_output = DAGCircuit()\n dagcircuit_output.name = circuit_graph.name\n for qreg in circuit_graph.qregs.values():\n dagcircuit_output.add_qreg(qreg)\n for creg in circuit_graph.cregs.values():\n dagcircuit_output.add_creg(creg)\n\n # Make a trivial wire mapping between the subcircuits\n # returned by _layer_update and the circuit we build\n identity_wire_map = {}\n for qubit in circuit_graph.qubits():\n identity_wire_map[qubit] = qubit\n for bit in circuit_graph.clbits():\n identity_wire_map[bit] = bit\n\n first_layer = True # True until first layer is output\n logger.debug(\"initial_layout = %s\", layout)\n\n # Iterate over layers\n for i, layer in enumerate(layerlist):\n\n # Attempt to find a permutation for this layer\n success_flag, best_circuit, best_depth, best_layout, trivial_flag \\\n = self._layer_permutation(layer[\"partition\"], layout,\n qubit_subset, coupling_graph,\n trials)\n logger.debug(\"mapper: layer %d\", i)\n logger.debug(\"mapper: success_flag=%s,best_depth=%s,trivial_flag=%s\",\n success_flag, str(best_depth), trivial_flag)\n\n # If this fails, try one gate at a time in this layer\n if not success_flag:\n logger.debug(\"mapper: failed, layer %d, \"\n \"retrying sequentially\", i)\n serial_layerlist = list(layer[\"graph\"].serial_layers())\n\n # Go through each gate in the layer\n for j, serial_layer in enumerate(serial_layerlist):\n\n success_flag, best_circuit, best_depth, best_layout, trivial_flag = \\\n self._layer_permutation(\n serial_layer[\"partition\"],\n layout, qubit_subset,\n coupling_graph,\n trials)\n logger.debug(\"mapper: layer %d, sublayer %d\", i, j)\n logger.debug(\"mapper: success_flag=%s,best_depth=%s,\"\n \"trivial_flag=%s\",\n success_flag, str(best_depth), trivial_flag)\n\n # Give up if we fail again\n if not success_flag:\n raise TranspilerError(\"swap mapper failed: \" +\n \"layer %d, sublayer %d\" % (i, j))\n\n # If this layer is only single-qubit gates,\n # and we have yet to see multi-qubit gates,\n # continue to the next inner iteration\n if trivial_flag and first_layer:\n logger.debug(\"mapper: skip to next sublayer\")\n continue\n\n if first_layer:\n self.initial_layout = layout\n\n # Update the record of qubit positions\n # for each inner iteration\n layout = best_layout\n # Update the DAG\n dagcircuit_output.extend_back(\n self._layer_update(j,\n first_layer,\n best_layout,\n best_depth,\n best_circuit,\n serial_layerlist),\n identity_wire_map)\n if first_layer:\n first_layer = False\n\n else:\n # Update the record of qubit positions for each iteration\n layout = best_layout\n\n if first_layer:\n self.initial_layout = layout\n\n # Update the DAG\n dagcircuit_output.extend_back(\n self._layer_update(i,\n first_layer,\n best_layout,\n best_depth,\n best_circuit,\n layerlist),\n identity_wire_map)\n\n if first_layer:\n first_layer = False\n\n # This is the final edgemap. We might use it to correctly replace\n # any measurements that needed to be removed earlier.\n logger.debug(\"mapper: self.initial_layout = %s\", pformat(self.initial_layout))\n logger.debug(\"mapper: layout = %s\", pformat(layout))\n last_edgemap = layout.combine_into_edge_map(self.initial_layout)\n logger.debug(\"mapper: last_edgemap = %s\", pformat(last_edgemap))\n\n # If first_layer is still set, the circuit only has single-qubit gates\n # so we can use the initial layout to output the entire circuit\n # This code is dead due to changes to first_layer above.\n if first_layer:\n logger.debug(\"mapper: first_layer flag still set\")\n layout = self.initial_layout\n for i, layer in enumerate(layerlist):\n edge_map = layout.combine_into_edge_map(self.initial_layout)\n dagcircuit_output.compose_back(layer[\"graph\"], edge_map)\n\n return dagcircuit_output\n\n\ndef _layer_permutation(layer_partition, initial_layout, layout, qubit_subset,\n coupling, trials, qregs, rng):\n \"\"\"Find a swap circuit that implements a permutation for this layer.\n\n Args:\n layer_partition (list): The layer_partition is a list of (qu)bit\n lists and each qubit is a tuple (qreg, index).\n initial_layout (Layout): The initial layout passed.\n layout (Layout): The layout is a Layout object mapping virtual\n qubits in the input circuit to physical qubits in the coupling\n graph. It reflects the current positions of the data.\n qubit_subset (list): The qubit_subset is the set of qubits in\n the coupling graph that we have chosen to map into, as tuples\n (Register, index).\n coupling (CouplingMap): Directed graph representing a coupling map.\n This coupling map should be one that was provided to the\n stochastic mapper.\n trials (int): Number of attempts the randomized algorithm makes.\n qregs (OrderedDict): Ordered dict of registers from input DAG.\n rng (RandomState): Random number generator.\n\n Returns:\n Tuple: success_flag, best_circuit, best_depth, best_layout, trivial_flag\n\n Raises:\n TranspilerError: if anything went wrong.\n \"\"\"\n logger.debug(\"layer_permutation: layer_partition = %s\",\n pformat(layer_partition))\n logger.debug(\"layer_permutation: layout = %s\",\n pformat(layout.get_virtual_bits()))\n logger.debug(\"layer_permutation: qubit_subset = %s\",\n pformat(qubit_subset))\n logger.debug(\"layer_permutation: trials = %s\", trials)\n\n gates = [] # list of lists of tuples [[(register, index), ...], ...]\n for gate_args in layer_partition:\n if len(gate_args) > 2:\n raise TranspilerError(\"Layer contains > 2-qubit gates\")\n if len(gate_args) == 2:\n gates.append(tuple(gate_args))\n logger.debug(\"layer_permutation: gates = %s\", pformat(gates))\n\n # Can we already apply the gates? If so, there is no work to do.\n dist = sum([coupling.distance(layout[g[0]], layout[g[1]])\n for g in gates])\n logger.debug(\"layer_permutation: distance = %s\", dist)\n if dist == len(gates):\n logger.debug(\"layer_permutation: nothing to do\")\n circ = DAGCircuit()\n for register in layout.get_virtual_bits().keys():\n if register.register not in circ.qregs.values():\n circ.add_qreg(register.register)\n return True, circ, 0, layout, (not bool(gates))\n\n # Begin loop over trials of randomized algorithm\n num_qubits = len(layout)\n best_depth = inf # initialize best depth\n best_edges = None # best edges found\n best_circuit = None # initialize best swap circuit\n best_layout = None # initialize best final layout\n\n cdist2 = coupling._dist_matrix**2\n # Scaling matrix\n scale = np.zeros((num_qubits, num_qubits))\n\n int_qubit_subset = regtuple_to_numeric(qubit_subset, qregs)\n int_gates = gates_to_idx(gates, qregs)\n int_layout = nlayout_from_layout(layout, qregs, coupling.size())\n\n trial_circuit = DAGCircuit() # SWAP circuit for this trial\n for qubit in layout.get_virtual_bits().keys():\n if qubit.register not in trial_circuit.qregs.values():\n trial_circuit.add_qreg(qubit.register)\n\n slice_circuit = DAGCircuit() # circuit for this swap slice\n for qubit in layout.get_virtual_bits().keys():\n if qubit.register not in slice_circuit.qregs.values():\n slice_circuit.add_qreg(qubit.register)\n edges = np.asarray(coupling.get_edges(), dtype=np.int32).ravel()\n cdist = coupling._dist_matrix\n for trial in range(trials):\n logger.debug(\"layer_permutation: trial %s\", trial)\n # This is one Trial --------------------------------------\n dist, optim_edges, trial_layout, depth_step = swap_trial(num_qubits, int_layout,\n int_qubit_subset,\n int_gates, cdist2,\n cdist, edges, scale,\n rng)\n\n logger.debug(\"layer_permutation: final distance for this trial = %s\", dist)\n if dist == len(gates) and depth_step < best_depth:\n logger.debug(\"layer_permutation: got circuit with improved depth %s\",\n depth_step)\n best_edges = optim_edges\n best_layout = trial_layout\n best_depth = min(best_depth, depth_step)\n\n # Break out of trial loop if we found a depth 1 circuit\n # since we can't improve it further\n if best_depth == 1:\n break\n\n # If we have no best circuit for this layer, all of the\n # trials have failed\n if best_layout is None:\n logger.debug(\"layer_permutation: failed!\")\n return False, None, None, None, False\n\n edgs = best_edges.edges()\n for idx in range(best_edges.size//2):\n slice_circuit.apply_operation_back(\n SwapGate(), [initial_layout[edgs[2*idx]], initial_layout[edgs[2*idx+1]]], [])\n trial_circuit.extend_back(slice_circuit)\n best_circuit = trial_circuit\n\n # Otherwise, we return our result for this layer\n logger.debug(\"layer_permutation: success!\")\n best_lay = best_layout.to_layout(qregs)\n return True, best_circuit, best_depth, best_lay, False\n\n\ndef regtuple_to_numeric(items, qregs):\n \"\"\"Takes Qubit instances and converts them into an integer array.\n\n Args:\n items (list): List of Qubit instances to convert.\n qregs (dict): List of Qubit instances.\n Returns:\n ndarray: Array of integers.\n\n \"\"\"\n sizes = [qr.size for qr in qregs.values()]\n reg_idx = np.cumsum([0]+sizes)\n regint = {}\n for ind, qreg in enumerate(qregs.values()):\n regint[qreg] = ind\n out = np.zeros(len(items), dtype=np.int32)\n for idx, val in enumerate(items):\n out[idx] = reg_idx[regint[val.register]]+val.index\n return out\n\n\ndef gates_to_idx(gates, qregs):\n \"\"\"Converts gate tuples into a nested list of integers.\n\n Args:\n gates (list): List of Qubit instances representing gates.\n qregs (dict): List of Qubit instances.\n\n Returns:\n list: Nested list of integers for gates.\n \"\"\"\n sizes = [qr.size for qr in qregs.values()]\n reg_idx = np.cumsum([0]+sizes)\n regint = {}\n for ind, qreg in enumerate(qregs.values()):\n regint[qreg] = ind\n out = np.zeros(2*len(gates), dtype=np.int32)\n for idx, gate in enumerate(gates):\n out[2*idx] = reg_idx[regint[gate[0].register]]+gate[0].index\n out[2*idx+1] = reg_idx[regint[gate[1].register]]+gate[1].index\n return out\n",
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\n\"\"\"\nTranspiler pass to optimize chains of single-qubit u1, u2, u3 gates by combining them into\na single gate.\n\"\"\"\n\nfrom itertools import groupby\n\nimport numpy as np\n\nfrom qiskit.transpiler.exceptions import TranspilerError\nfrom qiskit.extensions.standard.u1 import U1Gate\nfrom qiskit.extensions.standard.u2 import U2Gate\nfrom qiskit.extensions.standard.u3 import U3Gate\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.transpiler.basepasses import TransformationPass\nfrom qiskit.quantum_info.operators.quaternion import quaternion_from_euler\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.circuit import QuantumRegister, Parameter\n\n_CHOP_THRESHOLD = 1e-15\n\n\nclass Optimize1qGates(TransformationPass):\n \"\"\"Simplify runs of single qubit gates in the [\"u1\", \"u2\", \"u3\", \"cx\", \"id\"] basis.\"\"\"\n def run(self, dag):\n \"\"\"Return a new circuit that has been optimized.\"\"\"\n runs = dag.collect_runs([\"u1\", \"u2\", \"u3\", \"id\"])\n runs = _split_runs_on_parameters(runs)\n for run in runs:\n right_name = \"u1\"\n right_parameters = (0, 0, 0) # (theta, phi, lambda)\n\n for current_node in run:\n left_name = current_node.name\n if (current_node.condition is not None\n or len(current_node.qargs) != 1\n or left_name not in [\"u1\", \"u2\", \"u3\", \"id\"]):\n raise TranspilerError(\"internal error\")\n if left_name == \"u1\":\n left_parameters = (0, 0, current_node.op.params[0])\n elif left_name == \"u2\":\n left_parameters = (np.pi / 2, current_node.op.params[0],\n current_node.op.params[1])\n elif left_name == \"u3\":\n left_parameters = tuple(current_node.op.params)\n else:\n left_name = \"u1\" # replace id with u1\n left_parameters = (0, 0, 0)\n # If there are any sympy objects coming from the gate convert\n # to numpy.\n left_parameters = tuple([float(x) for x in left_parameters])\n # Compose gates\n name_tuple = (left_name, right_name)\n if name_tuple == (\"u1\", \"u1\"):\n # u1(lambda1) * u1(lambda2) = u1(lambda1 + lambda2)\n right_parameters = (0, 0, right_parameters[2] +\n left_parameters[2])\n elif name_tuple == (\"u1\", \"u2\"):\n # u1(lambda1) * u2(phi2, lambda2) = u2(phi2 + lambda1, lambda2)\n right_parameters = (np.pi / 2, right_parameters[1] +\n left_parameters[2], right_parameters[2])\n elif name_tuple == (\"u2\", \"u1\"):\n # u2(phi1, lambda1) * u1(lambda2) = u2(phi1, lambda1 + lambda2)\n right_name = \"u2\"\n right_parameters = (np.pi / 2, left_parameters[1],\n right_parameters[2] + left_parameters[2])\n elif name_tuple == (\"u1\", \"u3\"):\n # u1(lambda1) * u3(theta2, phi2, lambda2) =\n # u3(theta2, phi2 + lambda1, lambda2)\n right_parameters = (right_parameters[0], right_parameters[1] +\n left_parameters[2], right_parameters[2])\n elif name_tuple == (\"u3\", \"u1\"):\n # u3(theta1, phi1, lambda1) * u1(lambda2) =\n # u3(theta1, phi1, lambda1 + lambda2)\n right_name = \"u3\"\n right_parameters = (left_parameters[0], left_parameters[1],\n right_parameters[2] + left_parameters[2])\n elif name_tuple == (\"u2\", \"u2\"):\n # Using Ry(pi/2).Rz(2*lambda).Ry(pi/2) =\n # Rz(pi/2).Ry(pi-2*lambda).Rz(pi/2),\n # u2(phi1, lambda1) * u2(phi2, lambda2) =\n # u3(pi - lambda1 - phi2, phi1 + pi/2, lambda2 + pi/2)\n right_name = \"u3\"\n right_parameters = (np.pi - left_parameters[2] -\n right_parameters[1], left_parameters[1] +\n np.pi / 2, right_parameters[2] +\n np.pi / 2)\n elif name_tuple[1] == \"nop\":\n right_name = left_name\n right_parameters = left_parameters\n else:\n # For composing u3's or u2's with u3's, use\n # u2(phi, lambda) = u3(pi/2, phi, lambda)\n # together with the qiskit.mapper.compose_u3 method.\n right_name = \"u3\"\n # Evaluate the symbolic expressions for efficiency\n right_parameters = Optimize1qGates.compose_u3(left_parameters[0],\n left_parameters[1],\n left_parameters[2],\n right_parameters[0],\n right_parameters[1],\n right_parameters[2])\n # Why evalf()? This program:\n # OPENQASM 2.0;\n # include \"qelib1.inc\";\n # qreg q[2];\n # creg c[2];\n # u3(0.518016983430947*pi,1.37051598592907*pi,1.36816383603222*pi) q[0];\n # u3(1.69867232277986*pi,0.371448347747471*pi,0.461117217930936*pi) q[0];\n # u3(0.294319836336836*pi,0.450325871124225*pi,1.46804720442555*pi) q[0];\n # measure q -> c;\n # took >630 seconds (did not complete) to optimize without\n # calling evalf() at all, 19 seconds to optimize calling\n # evalf() AFTER compose_u3, and 1 second to optimize\n # calling evalf() BEFORE compose_u3.\n # 1. Here down, when we simplify, we add f(theta) to lambda to\n # correct the global phase when f(theta) is 2*pi. This isn't\n # necessary but the other steps preserve the global phase, so\n # we continue in that manner.\n # 2. The final step will remove Z rotations by 2*pi.\n # 3. Note that is_zero is true only if the expression is exactly\n # zero. If the input expressions have already been evaluated\n # then these final simplifications will not occur.\n # TODO After we refactor, we should have separate passes for\n # exact and approximate rewriting.\n\n # Y rotation is 0 mod 2*pi, so the gate is a u1\n if np.mod(right_parameters[0], (2 * np.pi)) == 0 \\\n and right_name != \"u1\":\n right_name = \"u1\"\n right_parameters = (0, 0, right_parameters[1] +\n right_parameters[2] +\n right_parameters[0])\n # Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2\n if right_name == \"u3\":\n # theta = pi/2 + 2*k*pi\n if np.mod((right_parameters[0] - np.pi / 2), (2 * np.pi)) == 0:\n right_name = \"u2\"\n right_parameters = (np.pi / 2, right_parameters[1],\n right_parameters[2] +\n (right_parameters[0] - np.pi / 2))\n # theta = -pi/2 + 2*k*pi\n if np.mod((right_parameters[0] + np.pi / 2), (2 * np.pi)) == 0:\n right_name = \"u2\"\n right_parameters = (np.pi / 2, right_parameters[1] +\n np.pi, right_parameters[2] -\n np.pi + (right_parameters[0] +\n np.pi / 2))\n # u1 and lambda is 0 mod 2*pi so gate is nop (up to a global phase)\n if right_name == \"u1\" and np.mod(right_parameters[2], (2 * np.pi)) == 0:\n right_name = \"nop\"\n\n # Replace the the first node in the run with a dummy DAG which contains a dummy\n # qubit. The name is irrelevant, because substitute_node_with_dag will take care of\n # putting it in the right place.\n run_qarg = QuantumRegister(1, 'q')[0]\n new_op = Gate(name=\"\", num_qubits=1, params=[])\n if right_name == \"u1\":\n new_op = U1Gate(right_parameters[2])\n if right_name == \"u2\":\n new_op = U2Gate(right_parameters[1], right_parameters[2])\n if right_name == \"u3\":\n new_op = U3Gate(*right_parameters)\n\n if right_name != 'nop':\n new_dag = DAGCircuit()\n new_dag.add_qreg(run_qarg.register)\n new_dag.apply_operation_back(new_op, [run_qarg], [])\n dag.substitute_node_with_dag(run[0], new_dag)\n\n # Delete the other nodes in the run\n for current_node in run[1:]:\n dag.remove_op_node(current_node)\n if right_name == \"nop\":\n dag.remove_op_node(run[0])\n\n return dag\n\n @staticmethod\n def compose_u3(theta1, phi1, lambda1, theta2, phi2, lambda2):\n \"\"\"Return a triple theta, phi, lambda for the product.\n\n u3(theta, phi, lambda)\n = u3(theta1, phi1, lambda1).u3(theta2, phi2, lambda2)\n = Rz(phi1).Ry(theta1).Rz(lambda1+phi2).Ry(theta2).Rz(lambda2)\n = Rz(phi1).Rz(phi').Ry(theta').Rz(lambda').Rz(lambda2)\n = u3(theta', phi1 + phi', lambda2 + lambda')\n\n Return theta, phi, lambda.\n \"\"\"\n # Careful with the factor of two in yzy_to_zyz\n thetap, phip, lambdap = Optimize1qGates.yzy_to_zyz((lambda1 + phi2), theta1, theta2)\n (theta, phi, lamb) = (thetap, phi1 + phip, lambda2 + lambdap)\n\n return (theta, phi, lamb)\n\n @staticmethod\n def yzy_to_zyz(xi, theta1, theta2, eps=1e-9): # pylint: disable=invalid-name\n \"\"\"Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.\n\n Solve the equation\n\n .. math::\n\n Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)\n\n for theta, phi, and lambda.\n\n Return a solution theta, phi, and lambda.\n \"\"\"\n quaternion_yzy = quaternion_from_euler([theta1, xi, theta2], 'yzy')\n euler = quaternion_yzy.to_zyz()\n quaternion_zyz = quaternion_from_euler(euler, 'zyz')\n # output order different than rotation order\n out_angles = (euler[1], euler[0], euler[2])\n abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data))\n if not np.allclose(abs_inner, 1, eps):\n raise TranspilerError('YZY and ZYZ angles do not give same rotation matrix.')\n out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle\n for angle in out_angles)\n return out_angles\n\n\ndef _split_runs_on_parameters(runs):\n \"\"\"Finds runs containing parameterized gates and splits them into sequential\n runs excluding the parameterized gates.\n \"\"\"\n\n def _is_dagnode_parameterized(node):\n return any(isinstance(param, Parameter) for param in node.op.params)\n\n out = []\n for run in runs:\n groups = groupby(run, _is_dagnode_parameterized)\n\n for group_is_parameterized, gates in groups:\n if not group_is_parameterized:\n out.append(list(gates))\n\n return out\n",
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" `_text_circuit_drawer` \"draws\" a circuit in \"ascii art\" \"\"\"\n\nfrom codecs import encode\nfrom math import pi\nimport unittest\nimport sympy\nimport numpy\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit.visualization import text as elements\nfrom qiskit.visualization.circuit_visualization import _text_circuit_drawer\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.circuit import Gate, Parameter\nfrom qiskit.quantum_info.random import random_unitary\nfrom qiskit.quantum_info.operators import SuperOp\n\n\nclass TestTextDrawerElement(QiskitTestCase):\n \"\"\" Draw each element\"\"\"\n\n def assertEqualElement(self, expected, element):\n \"\"\"\n Asserts the top,mid,bot trio\n Args:\n expected (list[top,mid,bot]): What is expected.\n element (DrawElement): The element to check.\n \"\"\"\n try:\n encode('\\n'.join(expected), encoding='cp437')\n except UnicodeEncodeError:\n self.fail(\"_text_circuit_drawer() should only use extended ascii (aka code page 437).\")\n\n self.assertEqual(expected[0], element.top)\n self.assertEqual(expected[1], element.mid)\n self.assertEqual(expected[2], element.bot)\n\n def test_measure_to(self):\n \"\"\" MeasureTo element. \"\"\"\n element = elements.MeasureTo()\n expected = [\" ║ \",\n \"═╩═\",\n \" \"]\n self.assertEqualElement(expected, element)\n\n def test_measure_from(self):\n \"\"\" MeasureFrom element. \"\"\"\n element = elements.MeasureFrom()\n expected = [\"┌─┐\",\n \"┤M├\",\n \"└╥┘\"]\n self.assertEqualElement(expected, element)\n\n def test_text_empty(self):\n \"\"\" The empty circuit.\"\"\"\n expected = ''\n circuit = QuantumCircuit()\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_pager(self):\n \"\"\" The pager breaks the circuit when the drawing does not fit in the console.\"\"\"\n expected = '\\n'.join([\" ┌───┐ »\",\n \"q_0: |0>┤ X ├──■──»\",\n \" └─┬─┘┌─┴─┐»\",\n \"q_1: |0>──■──┤ X ├»\",\n \" └───┘»\",\n \" c_0: 0 ══════════»\",\n \" »\",\n \"« ┌─┐┌───┐ »\",\n \"«q_0: ┤M├┤ X ├──■──»\",\n \"« └╥┘└─┬─┘┌─┴─┐»\",\n \"«q_1: ─╫───■──┤ X ├»\",\n \"« ║ └───┘»\",\n \"«c_0: ═╩═══════════»\",\n \"« »\",\n \"« ┌─┐┌───┐ \",\n \"«q_0: ┤M├┤ X ├──■──\",\n \"« └╥┘└─┬─┘┌─┴─┐\",\n \"«q_1: ─╫───■──┤ X ├\",\n \"« ║ └───┘\",\n \"«c_0: ═╩═══════════\",\n \"« \"])\n\n qr = QuantumRegister(2, 'q')\n cr = ClassicalRegister(1, 'c')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[1], qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.measure(qr[0], cr[0])\n circuit.cx(qr[1], qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.measure(qr[0], cr[0])\n circuit.cx(qr[1], qr[0])\n circuit.cx(qr[0], qr[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, line_length=20)), expected)\n\n def test_text_no_pager(self):\n \"\"\" The pager can be disable.\"\"\"\n qr = QuantumRegister(1, 'q')\n circuit = QuantumCircuit(qr)\n for _ in range(100):\n circuit.h(qr[0])\n amount_of_lines = str(_text_circuit_drawer(circuit, line_length=-1)).count('\\n')\n self.assertEqual(amount_of_lines, 2)\n\n\nclass TestTextDrawerGatesInCircuit(QiskitTestCase):\n \"\"\" Gate by gate checks in different settings.\"\"\"\n\n def test_text_measure_1(self):\n \"\"\" The measure operator, using 3-bit-length registers. \"\"\"\n expected = '\\n'.join([' ┌─┐ ',\n 'q_0: |0>┤M├──────',\n ' └╥┘┌─┐ ',\n 'q_1: |0>─╫─┤M├───',\n ' ║ └╥┘┌─┐',\n 'q_2: |0>─╫──╫─┤M├',\n ' ║ ║ └╥┘',\n ' c_0: 0 ═╩══╬══╬═',\n ' ║ ║ ',\n ' c_1: 0 ════╩══╬═',\n ' ║ ',\n ' c_2: 0 ═══════╩═',\n ' '])\n\n qr = QuantumRegister(3, 'q')\n cr = ClassicalRegister(3, 'c')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_measure_1_reverse_bits(self):\n \"\"\" The measure operator, using 3-bit-length registers, with reverse_bits \"\"\"\n expected = '\\n'.join([' ┌─┐',\n 'q_2: |0>──────┤M├',\n ' ┌─┐└╥┘',\n 'q_1: |0>───┤M├─╫─',\n ' ┌─┐└╥┘ ║ ',\n 'q_0: |0>┤M├─╫──╫─',\n ' └╥┘ ║ ║ ',\n ' c_2: 0 ═╬══╬══╩═',\n ' ║ ║ ',\n ' c_1: 0 ═╬══╩════',\n ' ║ ',\n ' c_0: 0 ═╩═══════',\n ' '])\n\n qr = QuantumRegister(3, 'q')\n cr = ClassicalRegister(3, 'c')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_text_measure_2(self):\n \"\"\" The measure operator, using some registers. \"\"\"\n expected = '\\n'.join([' ',\n 'q1_0: |0>──────',\n ' ',\n 'q1_1: |0>──────',\n ' ┌─┐ ',\n 'q2_0: |0>┤M├───',\n ' └╥┘┌─┐',\n 'q2_1: |0>─╫─┤M├',\n ' ║ └╥┘',\n ' c1_0: 0 ═╬══╬═',\n ' ║ ║ ',\n ' c1_1: 0 ═╬══╬═',\n ' ║ ║ ',\n ' c2_0: 0 ═╩══╬═',\n ' ║ ',\n ' c2_1: 0 ════╩═',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n qr2 = QuantumRegister(2, 'q2')\n cr2 = ClassicalRegister(2, 'c2')\n circuit = QuantumCircuit(qr1, qr2, cr1, cr2)\n circuit.measure(qr2, cr2)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_measure_2_reverse_bits(self):\n \"\"\" The measure operator, using some registers, with reverse_bits \"\"\"\n expected = '\\n'.join([' ┌─┐',\n 'q2_1: |0>───┤M├',\n ' ┌─┐└╥┘',\n 'q2_0: |0>┤M├─╫─',\n ' └╥┘ ║ ',\n 'q1_1: |0>─╫──╫─',\n ' ║ ║ ',\n 'q1_0: |0>─╫──╫─',\n ' ║ ║ ',\n ' c2_1: 0 ═╬══╩═',\n ' ║ ',\n ' c2_0: 0 ═╩════',\n ' ',\n ' c1_1: 0 ══════',\n ' ',\n ' c1_0: 0 ══════',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n qr2 = QuantumRegister(2, 'q2')\n cr2 = ClassicalRegister(2, 'c2')\n circuit = QuantumCircuit(qr1, qr2, cr1, cr2)\n circuit.measure(qr2, cr2)\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_text_swap(self):\n \"\"\" Swap drawing. \"\"\"\n expected = '\\n'.join([' ',\n 'q1_0: |0>─X────',\n ' │ ',\n 'q1_1: |0>─┼──X─',\n ' │ │ ',\n 'q2_0: |0>─X──┼─',\n ' │ ',\n 'q2_1: |0>────X─',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.swap(qr1, qr2)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_swap_reverse_bits(self):\n \"\"\" Swap drawing with reverse_bits. \"\"\"\n expected = '\\n'.join([' ',\n 'q2_1: |0>────X─',\n ' │ ',\n 'q2_0: |0>─X──┼─',\n ' │ │ ',\n 'q1_1: |0>─┼──X─',\n ' │ ',\n 'q1_0: |0>─X────',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.swap(qr1, qr2)\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_text_cswap(self):\n \"\"\" CSwap drawing. \"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>─■──X──X─\",\n \" │ │ │ \",\n \"q_1: |0>─X──■──X─\",\n \" │ │ │ \",\n \"q_2: |0>─X──X──■─\",\n \" \"])\n\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cswap(qr[0], qr[1], qr[2])\n circuit.cswap(qr[1], qr[0], qr[2])\n circuit.cswap(qr[2], qr[1], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cswap_reverse_bits(self):\n \"\"\" CSwap drawing with reverse_bits. \"\"\"\n expected = '\\n'.join([' ',\n 'q_2: |0>─X──X──■─',\n ' │ │ │ ',\n 'q_1: |0>─X──■──X─',\n ' │ │ │ ',\n 'q_0: |0>─■──X──X─',\n ' '])\n\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cswap(qr[0], qr[1], qr[2])\n circuit.cswap(qr[1], qr[0], qr[2])\n circuit.cswap(qr[2], qr[1], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_text_cu3(self):\n \"\"\" cu3 drawing. \"\"\"\n expected = '\\n'.join([\" ┌──────────────────────────┐\",\n \"q_0: |0>─────────────■──────────────┤ U3(1.5708,1.5708,1.5708) ├\",\n \" ┌────────────┴─────────────┐└────────────┬─────────────┘\",\n \"q_1: |0>┤ U3(1.5708,1.5708,1.5708) ├─────────────┼──────────────\",\n \" └──────────────────────────┘ │ \",\n \"q_2: |0>─────────────────────────────────────────■──────────────\",\n \" \"])\n\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cu3(pi / 2, pi / 2, pi / 2, qr[0], qr[1])\n circuit.cu3(pi / 2, pi / 2, pi / 2, qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cu3_reverse_bits(self):\n \"\"\" cu3 drawing with reverse_bits\"\"\"\n expected = '\\n'.join([\" \",\n \"q_2: |0>─────────────────────────────────────────■──────────────\",\n \" ┌──────────────────────────┐ │ \",\n \"q_1: |0>┤ U3(1.5708,1.5708,1.5708) ├─────────────┼──────────────\",\n \" └────────────┬─────────────┘┌────────────┴─────────────┐\",\n \"q_0: |0>─────────────■──────────────┤ U3(1.5708,1.5708,1.5708) ├\",\n \" └──────────────────────────┘\"])\n\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cu3(pi / 2, pi / 2, pi / 2, qr[0], qr[1])\n circuit.cu3(pi / 2, pi / 2, pi / 2, qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_text_crz(self):\n \"\"\" crz drawing. \"\"\"\n expected = '\\n'.join([\" ┌────────────┐\",\n \"q_0: |0>──────■───────┤ Rz(1.5708) ├\",\n \" ┌─────┴──────┐└─────┬──────┘\",\n \"q_1: |0>┤ Rz(1.5708) ├──────┼───────\",\n \" └────────────┘ │ \",\n \"q_2: |0>────────────────────■───────\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.crz(pi / 2, qr[0], qr[1])\n circuit.crz(pi / 2, qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cx(self):\n \"\"\" cx drawing. \"\"\"\n expected = '\\n'.join([\" ┌───┐\",\n \"q_0: |0>──■──┤ X ├\",\n \" ┌─┴─┐└─┬─┘\",\n \"q_1: |0>┤ X ├──┼──\",\n \" └───┘ │ \",\n \"q_2: |0>───────■──\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[1])\n circuit.cx(qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cy(self):\n \"\"\" cy drawing. \"\"\"\n expected = '\\n'.join([\" ┌───┐\",\n \"q_0: |0>──■──┤ Y ├\",\n \" ┌─┴─┐└─┬─┘\",\n \"q_1: |0>┤ Y ├──┼──\",\n \" └───┘ │ \",\n \"q_2: |0>───────■──\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cy(qr[0], qr[1])\n circuit.cy(qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cz(self):\n \"\"\" cz drawing. \"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>─■──■─\",\n \" │ │ \",\n \"q_1: |0>─■──┼─\",\n \" │ \",\n \"q_2: |0>────■─\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cz(qr[0], qr[1])\n circuit.cz(qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_ch(self):\n \"\"\" ch drawing. \"\"\"\n expected = '\\n'.join([\" ┌───┐\",\n \"q_0: |0>──■──┤ H ├\",\n \" ┌─┴─┐└─┬─┘\",\n \"q_1: |0>┤ H ├──┼──\",\n \" └───┘ │ \",\n \"q_2: |0>───────■──\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.ch(qr[0], qr[1])\n circuit.ch(qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_rzz(self):\n \"\"\" rzz drawing. See #1957 \"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>─■───────────────────\",\n \" │zz(0) \",\n \"q_1: |0>─■───────■───────────\",\n \" │zz(1.5708) \",\n \"q_2: |0>─────────■───────────\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.rzz(0, qr[0], qr[1])\n circuit.rzz(pi / 2, qr[2], qr[1])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cu1(self):\n \"\"\" cu1 drawing. \"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>─■────────■───────\",\n \" │1.5708 │ \",\n \"q_1: |0>─■────────┼───────\",\n \" │1.5708 \",\n \"q_2: |0>──────────■───────\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cu1(pi / 2, qr[0], qr[1])\n circuit.cu1(pi / 2, qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_cu1_reverse_bits(self):\n \"\"\" cu1 drawing with reverse_bits\"\"\"\n expected = '\\n'.join([\" \",\n \"q_2: |0>──────────■───────\",\n \" │ \",\n \"q_1: |0>─■────────┼───────\",\n \" │1.5708 │1.5708 \",\n \"q_0: |0>─■────────■───────\",\n \" \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cu1(pi / 2, qr[0], qr[1])\n circuit.cu1(pi / 2, qr[2], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_text_ccx(self):\n \"\"\" cx drawing. \"\"\"\n expected = '\\n'.join([\" ┌───┐\",\n \"q_0: |0>──■────■──┤ X ├\",\n \" │ ┌─┴─┐└─┬─┘\",\n \"q_1: |0>──■──┤ X ├──■──\",\n \" ┌─┴─┐└─┬─┘ │ \",\n \"q_2: |0>┤ X ├──■────■──\",\n \" └───┘ \"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.ccx(qr[0], qr[1], qr[2])\n circuit.ccx(qr[2], qr[0], qr[1])\n circuit.ccx(qr[2], qr[1], qr[0])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_reset(self):\n \"\"\" Reset drawing. \"\"\"\n expected = '\\n'.join([' ',\n 'q1_0: |0>─|0>─',\n ' ',\n 'q1_1: |0>─|0>─',\n ' ',\n 'q2_0: |0>─────',\n ' ',\n 'q2_1: |0>─|0>─',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.reset(qr1)\n circuit.reset(qr2[1])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_single_gate(self):\n \"\"\" Single Qbit gate drawing. \"\"\"\n expected = '\\n'.join([' ┌───┐',\n 'q1_0: |0>┤ H ├',\n ' ├───┤',\n 'q1_1: |0>┤ H ├',\n ' └───┘',\n 'q2_0: |0>─────',\n ' ┌───┐',\n 'q2_1: |0>┤ H ├',\n ' └───┘'])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.h(qr1)\n circuit.h(qr2[1])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_barrier(self):\n \"\"\" Barrier drawing. \"\"\"\n expected = '\\n'.join([' ░ ',\n 'q1_0: |0>─░─',\n ' ░ ',\n 'q1_1: |0>─░─',\n ' ░ ',\n 'q2_0: |0>───',\n ' ░ ',\n 'q2_1: |0>─░─',\n ' ░ '])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.barrier(qr1)\n circuit.barrier(qr2[1])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_no_barriers(self):\n \"\"\" Drawing without plotbarriers. \"\"\"\n expected = '\\n'.join([' ┌───┐ ',\n 'q1_0: |0>┤ H ├─────',\n ' ├───┤ ',\n 'q1_1: |0>┤ H ├─────',\n ' ├───┤ ',\n 'q2_0: |0>┤ H ├─────',\n ' └───┘┌───┐',\n 'q2_1: |0>─────┤ H ├',\n ' └───┘'])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.h(qr1)\n circuit.barrier(qr1)\n circuit.barrier(qr2[1])\n circuit.h(qr2)\n self.assertEqual(str(_text_circuit_drawer(circuit, plotbarriers=False)), expected)\n\n def test_text_conditional_1(self):\n \"\"\" Conditional drawing with 1-bit-length regs.\"\"\"\n qasm_string = \"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[1];\n creg c0[1];\n creg c1[1];\n if(c0==1) x q[0];\n if(c1==1) x q[0];\n \"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐ \",\n \"q_0: |0>─┤ X ├──┤ X ├─\",\n \" ┌┴─┴─┴┐ └─┬─┘ \",\n \"c0_0: 0 ╡ = 1 ╞═══╪═══\",\n \" └─────┘┌──┴──┐\",\n \"c1_0: 0 ═══════╡ = 1 ╞\",\n \" └─────┘\"])\n\n circuit = QuantumCircuit.from_qasm_str(qasm_string)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_conditional_2(self):\n \"\"\" Conditional drawing with 2-bit-length regs.\"\"\"\n qasm_string = \"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[1];\n creg c0[2];\n creg c1[2];\n if(c0==2) x q[0];\n if(c1==2) x q[0];\n \"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐ \",\n \"q_0: |0>─┤ X ├──┤ X ├─\",\n \" ┌┴─┴─┴┐ └─┬─┘ \",\n \"c0_0: 0 ╡ ╞═══╪═══\",\n \" │ = 2 │ │ \",\n \"c0_1: 0 ╡ ╞═══╪═══\",\n \" └─────┘┌──┴──┐\",\n \"c1_0: 0 ═══════╡ ╞\",\n \" │ = 2 │\",\n \"c1_1: 0 ═══════╡ ╞\",\n \" └─────┘\"])\n circuit = QuantumCircuit.from_qasm_str(qasm_string)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_conditional_3(self):\n \"\"\" Conditional drawing with 3-bit-length regs.\"\"\"\n qasm_string = \"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[1];\n creg c0[3];\n creg c1[3];\n if(c0==3) x q[0];\n if(c1==3) x q[0];\n \"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐ \",\n \"q_0: |0>─┤ X ├──┤ X ├─\",\n \" ┌┴─┴─┴┐ └─┬─┘ \",\n \"c0_0: 0 ╡ ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_1: 0 ╡ = 3 ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_2: 0 ╡ ╞═══╪═══\",\n \" └─────┘┌──┴──┐\",\n \"c1_0: 0 ═══════╡ ╞\",\n \" │ │\",\n \"c1_1: 0 ═══════╡ = 3 ╞\",\n \" │ │\",\n \"c1_2: 0 ═══════╡ ╞\",\n \" └─────┘\"])\n circuit = QuantumCircuit.from_qasm_str(qasm_string)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_conditional_4(self):\n \"\"\" Conditional drawing with 4-bit-length regs.\"\"\"\n qasm_string = \"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[1];\n creg c0[4];\n creg c1[4];\n if(c0==4) x q[0];\n if(c1==4) x q[0];\n \"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐ \",\n \"q_0: |0>─┤ X ├──┤ X ├─\",\n \" ┌┴─┴─┴┐ └─┬─┘ \",\n \"c0_0: 0 ╡ ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_1: 0 ╡ ╞═══╪═══\",\n \" │ = 4 │ │ \",\n \"c0_2: 0 ╡ ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_3: 0 ╡ ╞═══╪═══\",\n \" └─────┘┌──┴──┐\",\n \"c1_0: 0 ═══════╡ ╞\",\n \" │ │\",\n \"c1_1: 0 ═══════╡ ╞\",\n \" │ = 4 │\",\n \"c1_2: 0 ═══════╡ ╞\",\n \" │ │\",\n \"c1_3: 0 ═══════╡ ╞\",\n \" └─────┘\"])\n circuit = QuantumCircuit.from_qasm_str(qasm_string)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_conditional_5(self):\n \"\"\" Conditional drawing with 5-bit-length regs.\"\"\"\n qasm_string = \"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[1];\n creg c0[5];\n creg c1[5];\n if(c0==5) x q[0];\n if(c1==5) x q[0];\n \"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐ \",\n \"q_0: |0>─┤ X ├──┤ X ├─\",\n \" ┌┴─┴─┴┐ └─┬─┘ \",\n \"c0_0: 0 ╡ ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_1: 0 ╡ ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_2: 0 ╡ = 5 ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_3: 0 ╡ ╞═══╪═══\",\n \" │ │ │ \",\n \"c0_4: 0 ╡ ╞═══╪═══\",\n \" └─────┘┌──┴──┐\",\n \"c1_0: 0 ═══════╡ ╞\",\n \" │ │\",\n \"c1_1: 0 ═══════╡ ╞\",\n \" │ │\",\n \"c1_2: 0 ═══════╡ = 5 ╞\",\n \" │ │\",\n \"c1_3: 0 ═══════╡ ╞\",\n \" │ │\",\n \"c1_4: 0 ═══════╡ ╞\",\n \" └─────┘\"])\n circuit = QuantumCircuit.from_qasm_str(qasm_string)\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_measure_html(self):\n \"\"\" The measure operator. HTML representation. \"\"\"\n expected = '\\n'.join([\"<pre style=\\\"word-wrap: normal;\"\n \"white-space: pre;line-height: 15px;\\\"> ┌─┐\",\n \"q_0: |0>┤M├\",\n \" └╥┘\",\n \" c_0: 0 ═╩═\",\n \" </pre>\"])\n qr = QuantumRegister(1, 'q')\n cr = ClassicalRegister(1, 'c')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n self.assertEqual(_text_circuit_drawer(circuit)._repr_html_(), expected)\n\n def test_text_justify_left(self):\n \"\"\" Drawing with left justify \"\"\"\n expected = '\\n'.join([' ┌───┐ ',\n 'q1_0: |0>┤ X ├───',\n ' ├───┤┌─┐',\n 'q1_1: |0>┤ H ├┤M├',\n ' └───┘└╥┘',\n ' c1_0: 0 ══════╬═',\n ' ║ ',\n ' c1_1: 0 ══════╩═',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n circuit = QuantumCircuit(qr1, cr1)\n circuit.x(qr1[0])\n circuit.h(qr1[1])\n circuit.measure(qr1[1], cr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='left')), expected)\n\n def test_text_justify_right(self):\n \"\"\" Drawing with right justify \"\"\"\n expected = '\\n'.join([' ┌───┐',\n 'q1_0: |0>─────┤ X ├',\n ' ┌───┐└┬─┬┘',\n 'q1_1: |0>┤ H ├─┤M├─',\n ' └───┘ └╥┘ ',\n ' c1_0: 0 ═══════╬══',\n ' ║ ',\n ' c1_1: 0 ═══════╩══',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n circuit = QuantumCircuit(qr1, cr1)\n circuit.x(qr1[0])\n circuit.h(qr1[1])\n circuit.measure(qr1[1], cr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='right')), expected)\n\n def test_text_justify_none(self):\n \"\"\" Drawing with none justify \"\"\"\n expected = '\\n'.join([' ┌───┐ ',\n 'q1_0: |0>┤ X ├────────',\n ' └───┘┌───┐┌─┐',\n 'q1_1: |0>─────┤ H ├┤M├',\n ' └───┘└╥┘',\n ' c1_0: 0 ═══════════╬═',\n ' ║ ',\n ' c1_1: 0 ═══════════╩═',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n circuit = QuantumCircuit(qr1, cr1)\n circuit.x(qr1[0])\n circuit.h(qr1[1])\n circuit.measure(qr1[1], cr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='none')), expected)\n\n def test_text_justify_left_barrier(self):\n \"\"\" Left justify respects barriers\"\"\"\n expected = '\\n'.join([' ┌───┐ ░ ',\n 'q1_0: |0>┤ H ├─░──────',\n ' └───┘ ░ ┌───┐',\n 'q1_1: |0>──────░─┤ H ├',\n ' ░ └───┘'])\n\n qr1 = QuantumRegister(2, 'q1')\n circuit = QuantumCircuit(qr1)\n circuit.h(qr1[0])\n circuit.barrier(qr1)\n circuit.h(qr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='left')), expected)\n\n def test_text_justify_right_barrier(self):\n \"\"\" Right justify respects barriers \"\"\"\n expected = '\\n'.join([' ┌───┐ ░ ',\n 'q1_0: |0>┤ H ├─░──────',\n ' └───┘ ░ ┌───┐',\n 'q1_1: |0>──────░─┤ H ├',\n ' ░ └───┘'])\n\n qr1 = QuantumRegister(2, 'q1')\n circuit = QuantumCircuit(qr1)\n circuit.h(qr1[0])\n circuit.barrier(qr1)\n circuit.h(qr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='right')), expected)\n\n def test_text_overlap_cx(self):\n \"\"\" Overlapping CX gates are drawn not overlapping\"\"\"\n expected = '\\n'.join([' ',\n 'q1_0: |0>──■───────',\n ' │ ',\n 'q1_1: |0>──┼────■──',\n ' │ ┌─┴─┐',\n 'q1_2: |0>──┼──┤ X ├',\n ' ┌─┴─┐└───┘',\n 'q1_3: |0>┤ X ├─────',\n ' └───┘ '])\n\n qr1 = QuantumRegister(4, 'q1')\n circuit = QuantumCircuit(qr1)\n circuit.cx(qr1[0], qr1[3])\n circuit.cx(qr1[1], qr1[2])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='left')), expected)\n\n def test_text_overlap_measure(self):\n \"\"\" Measure is drawn not overlapping\"\"\"\n expected = '\\n'.join([' ┌─┐ ',\n 'q1_0: |0>┤M├─────',\n ' └╥┘┌───┐',\n 'q1_1: |0>─╫─┤ X ├',\n ' ║ └───┘',\n ' c1_0: 0 ═╩══════',\n ' ',\n ' c1_1: 0 ════════',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n circuit = QuantumCircuit(qr1, cr1)\n circuit.measure(qr1[0], cr1[0])\n circuit.x(qr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='left')), expected)\n\n def test_text_overlap_swap(self):\n \"\"\" Swap is drawn in 2 separate columns\"\"\"\n expected = '\\n'.join([' ',\n 'q1_0: |0>─X────',\n ' │ ',\n 'q1_1: |0>─┼──X─',\n ' │ │ ',\n 'q2_0: |0>─X──┼─',\n ' │ ',\n 'q2_1: |0>────X─',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n qr2 = QuantumRegister(2, 'q2')\n circuit = QuantumCircuit(qr1, qr2)\n circuit.swap(qr1, qr2)\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='left')), expected)\n\n def test_text_justify_right_measure_resize(self):\n \"\"\" Measure gate can resize if necessary\"\"\"\n expected = '\\n'.join([' ┌───┐',\n 'q1_0: |0>┤ X ├',\n ' └┬─┬┘',\n 'q1_1: |0>─┤M├─',\n ' └╥┘ ',\n ' c1_0: 0 ══╬══',\n ' ║ ',\n ' c1_1: 0 ══╩══',\n ' '])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n circuit = QuantumCircuit(qr1, cr1)\n circuit.x(qr1[0])\n circuit.measure(qr1[1], cr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit, justify='right')), expected)\n\n def test_text_box_length(self):\n \"\"\"The length of boxes is independent of other boxes in the layer\n https://github.com/Qiskit/qiskit-terra/issues/1882\"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐\",\n \"q1_0: |0>────┤ H ├────┤ H ├\",\n \" └───┘ └───┘\",\n \"q1_1: |0>──────────────────\",\n \" ┌───────────┐ \",\n \"q1_2: |0>┤ U1(1e-07) ├─────\",\n \" └───────────┘ \"])\n\n qr = QuantumRegister(3, 'q1')\n circuit = QuantumCircuit(qr)\n circuit.h(qr[0])\n circuit.h(qr[0])\n circuit.u1(0.0000001, qr[2])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_spacing_2378(self):\n \"\"\"Small gates in the same layer as long gates.\n See https://github.com/Qiskit/qiskit-terra/issues/2378\"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>──────X──────\",\n \" │ \",\n \"q_1: |0>──────X──────\",\n \" ┌───────────┐\",\n \"q_2: |0>┤ Rz(11111) ├\",\n \" └───────────┘\"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n circuit.swap(qr[0], qr[1])\n circuit.rz(11111, qr[2])\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n\nclass TestTextDrawerMultiQGates(QiskitTestCase):\n \"\"\" Gates impling multiple qubits.\"\"\"\n\n def test_2Qgate(self):\n \"\"\" 2Q no params. \"\"\"\n expected = '\\n'.join([\" ┌───────┐\",\n \"q_1: |0>┤1 ├\",\n \" │ twoQ │\",\n \"q_0: |0>┤0 ├\",\n \" └───────┘\"])\n\n qr = QuantumRegister(2, 'q')\n circuit = QuantumCircuit(qr)\n\n my_gate2 = Gate(name='twoQ', num_qubits=2, params=[])\n circuit.append(my_gate2, [qr[0], qr[1]])\n\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_2Qgate_cross_wires(self):\n \"\"\" 2Q no params, with cross wires \"\"\"\n expected = '\\n'.join([\" ┌───────┐\",\n \"q_1: |0>┤0 ├\",\n \" │ twoQ │\",\n \"q_0: |0>┤1 ├\",\n \" └───────┘\"])\n\n qr = QuantumRegister(2, 'q')\n circuit = QuantumCircuit(qr)\n\n my_gate2 = Gate(name='twoQ', num_qubits=2, params=[])\n circuit.append(my_gate2, [qr[1], qr[0]])\n\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_3Qgate_cross_wires(self):\n \"\"\" 3Q no params, with cross wires \"\"\"\n expected = '\\n'.join([\" ┌─────────┐\",\n \"q_2: |0>┤1 ├\",\n \" │ │\",\n \"q_1: |0>┤0 threeQ ├\",\n \" │ │\",\n \"q_0: |0>┤2 ├\",\n \" └─────────┘\"])\n\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n\n my_gate3 = Gate(name='threeQ', num_qubits=3, params=[])\n circuit.append(my_gate3, [qr[1], qr[2], qr[0]])\n\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_2Qgate_nottogether(self):\n \"\"\" 2Q that are not together \"\"\"\n expected = '\\n'.join([\" ┌───────┐\",\n \"q_2: |0>┤1 ├\",\n \" │ │\",\n \"q_1: |0>┤ twoQ ├\",\n \" │ │\",\n \"q_0: |0>┤0 ├\",\n \" └───────┘\"])\n qr = QuantumRegister(3, 'q')\n circuit = QuantumCircuit(qr)\n\n my_gate2 = Gate(name='twoQ', num_qubits=2, params=[])\n circuit.append(my_gate2, [qr[0], qr[2]])\n\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_2Qgate_nottogether_across_4(self):\n \"\"\" 2Q that are 2 bits apart\"\"\"\n expected = '\\n'.join([\" ┌───────┐\",\n \"q_3: |0>┤1 ├\",\n \" │ │\",\n \"q_2: |0>┤ ├\",\n \" │ twoQ │\",\n \"q_1: |0>┤ ├\",\n \" │ │\",\n \"q_0: |0>┤0 ├\",\n \" └───────┘\"])\n\n qr = QuantumRegister(4, 'q')\n circuit = QuantumCircuit(qr)\n\n my_gate2 = Gate(name='twoQ', num_qubits=2, params=[])\n circuit.append(my_gate2, [qr[0], qr[3]])\n\n self.assertEqual(str(_text_circuit_drawer(circuit, reverse_bits=True)), expected)\n\n def test_unitary_nottogether_across_4(self):\n \"\"\" Unitary that are 2 bits apart\"\"\"\n expected = '\\n'.join([\" ┌──────────┐\",\n \"q_0: |0>┤0 ├\",\n \" │ │\",\n \"q_1: |0>┤ ├\",\n \" │ unitary │\",\n \"q_2: |0>┤ ├\",\n \" │ │\",\n \"q_3: |0>┤1 ├\",\n \" └──────────┘\"])\n\n qr = QuantumRegister(4, 'q')\n qc = QuantumCircuit(qr)\n\n qc.append(random_unitary(4, seed=42), [qr[0], qr[3]])\n\n self.assertEqual(str(_text_circuit_drawer(qc)), expected)\n\n def test_kraus(self):\n \"\"\" Test Kraus.\n See https://github.com/Qiskit/qiskit-terra/pull/2238#issuecomment-487630014\"\"\"\n expected = '\\n'.join([\" ┌───────┐\",\n \"q_0: |0>┤ Kraus ├\",\n \" └───────┘\"])\n\n error = SuperOp(0.75 * numpy.eye(4) + 0.25 * numpy.diag([1, -1, -1, 1]))\n qr = QuantumRegister(1, name='q')\n qc = QuantumCircuit(qr)\n qc.append(error, [qr[0]])\n\n self.assertEqual(str(_text_circuit_drawer(qc)), expected)\n\n def test_multiplexer(self):\n \"\"\" Test Multiplexer.\n See https://github.com/Qiskit/qiskit-terra/pull/2238#issuecomment-487630014\"\"\"\n expected = '\\n'.join([\" ┌──────────────┐\",\n \"q_0: |0>┤0 ├\",\n \" │ multiplexer │\",\n \"q_1: |0>┤1 ├\",\n \" └──────────────┘\"])\n\n cx_multiplexer = Gate('multiplexer', 2, [numpy.eye(2), numpy.array([[0, 1], [1, 0]])])\n\n qr = QuantumRegister(2, name='q')\n qc = QuantumCircuit(qr)\n qc.append(cx_multiplexer, [qr[0], qr[1]])\n\n self.assertEqual(str(_text_circuit_drawer(qc)), expected)\n\n\nclass TestTextDrawerParams(QiskitTestCase):\n \"\"\"Test drawing parameters.\"\"\"\n\n def test_text_parameters_mix(self):\n \"\"\" cu3 drawing with parameters\"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>─────────────■─────────────\",\n \" ┌────────────┴────────────┐\",\n \"q_1: |0>┤ U3(1.5708,theta,3.1416) ├\",\n \" └─────────────────────────┘\"])\n\n qr = QuantumRegister(2, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cu3(pi / 2, Parameter('theta'), pi, qr[0], qr[1])\n\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n def test_text_sympy_constant(self):\n \"\"\" cu3 drawing with sympy pi\"\"\"\n expected = '\\n'.join([\" \",\n \"q_0: |0>──────────■───────────\",\n \" ┌─────────┴──────────┐\",\n \"q_1: |0>┤ U3(1.5708,pi/2,pi) ├\",\n \" └────────────────────┘\"])\n\n qr = QuantumRegister(2, 'q')\n circuit = QuantumCircuit(qr)\n circuit.cu3(pi / 2, sympy.pi / 2, sympy.pi, qr[0], qr[1])\n\n self.assertEqual(str(_text_circuit_drawer(circuit)), expected)\n\n\nclass TestTextDrawerVerticallyExtended(QiskitTestCase):\n \"\"\"Test vertically_compressed=False\"\"\"\n\n def test_text_conditional_1(self):\n \"\"\" Conditional drawing with 1-bit-length regs.\"\"\"\n qasm_string = \"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[1];\n creg c0[1];\n creg c1[1];\n if(c0==1) x q[0];\n if(c1==1) x q[0];\n \"\"\"\n expected = '\\n'.join([\" ┌───┐ ┌───┐ \",\n \"q_0: |0>─┤ X ├──┤ X ├─\",\n \" └─┬─┘ └─┬─┘ \",\n \" ┌──┴──┐ │ \",\n \"c0_0: 0 ╡ = 1 ╞═══╪═══\",\n \" └─────┘ │ \",\n \" ┌──┴──┐\",\n \"c1_0: 0 ═══════╡ = 1 ╞\",\n \" └─────┘\"])\n\n circuit = QuantumCircuit.from_qasm_str(qasm_string)\n self.assertEqual(str(_text_circuit_drawer(circuit, vertically_compressed=False)), expected)\n\n def test_text_justify_right(self):\n \"\"\" Drawing with right justify \"\"\"\n expected = '\\n'.join([\" ┌───┐\",\n \"q1_0: |0>─────┤ X ├\",\n \" └───┘\",\n \" ┌───┐ ┌─┐ \",\n \"q1_1: |0>┤ H ├─┤M├─\",\n \" └───┘ └╥┘ \",\n \" ║ \",\n \" c1_0: 0 ═══════╬══\",\n \" ║ \",\n \" ║ \",\n \" c1_1: 0 ═══════╩══\",\n \" \"])\n\n qr1 = QuantumRegister(2, 'q1')\n cr1 = ClassicalRegister(2, 'c1')\n circuit = QuantumCircuit(qr1, cr1)\n circuit.x(qr1[0])\n circuit.h(qr1[1])\n circuit.measure(qr1[1], cr1[1])\n self.assertEqual(str(_text_circuit_drawer(circuit,\n justify='right',\n vertically_compressed=False)), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.exp",
"numpy.linspace"
],
[
"numpy.random.RandomState",
"numpy.zeros",
"numpy.cumsum",
"numpy.iinfo"
],
[
"numpy.mod",
"numpy.abs",
"numpy.allclose"
],
[
"numpy.diag",
"numpy.eye",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexhsamuel/fixfmt
|
[
"a392ca2863d55204e3ee8fc832b72ecba1387485"
] |
[
"python/fixfmt/table.py"
] |
[
"import copy\nimport numpy as np\n\nfrom . import string_length, palide, center, Bool, Number, String, is_fmt\nfrom . import _ext\nfrom . import npfmt\nfrom .lib import ansi\n\n#-------------------------------------------------------------------------------\n\n# FIXME: Elsewhere.\ndef update_cfg(old, updates):\n new = copy.deepcopy(old)\n for key, val in updates.items():\n if isinstance(val, dict):\n val = update_cfg(old.get(key, {}), val)\n new[key] = val\n return new\n\n\nSEPARATOR_CFG = {\n \"between\" : u\" \",\n \"end\" : u\"\",\n \"index\" : u\" | \",\n \"start\" : u\"\",\n}\n\nDEFAULT_CFG = {\n \"bottom\": {\n \"line\" : u\"-\",\n \"separator\" : SEPARATOR_CFG,\n \"show\" : False,\n },\n \"data\": {\n \"max_rows\" : \"terminal\",\n },\n \"formatters\": {\n \"by_name\" : {},\n \"by_dtype\" : {},\n \"default\" : npfmt.DEFAULT_CFG,\n },\n \"header\": {\n \"elide\": {\n \"position\" : 0.7,\n \"ellipsis\" : u\"/\",\n },\n \"prefix\" : u\"\",\n \"separator\" : SEPARATOR_CFG,\n \"show\" : True,\n \"style\": {\n \"prefix\" : u\"\",\n \"suffix\" : u\"\",\n },\n \"suffix\" : u\"\",\n },\n \"index\": {\n \"show\" : True,\n },\n \"row\": {\n \"separator\" : SEPARATOR_CFG,\n },\n \"row_ellipsis\": {\n \"separator\" : SEPARATOR_CFG,\n \"pad\" : u\" \",\n \"position\" : 0.85,\n \"format\" : u\"... skipping {skipped} rows ...\",\n },\n \"top\": {\n \"line\" : u\"-\",\n \"separator\" : SEPARATOR_CFG,\n \"show\" : False,\n },\n \"underline\": {\n \"line\" : u\"=\",\n \"separator\" : SEPARATOR_CFG,\n \"show\" : True,\n },\n}\n\n\n\nUNICODE_CFG = update_cfg(DEFAULT_CFG, {\n \"formatters\": {\n \"default\": {\n \"bool\": {\n \"true\" : u\"\\u2714\",\n \"false\" : u\"\\u00b7\",\n },\n \"number\": {\n \"inf\" : u\"\\u221e\",\n },\n \"string\": {\n \"ellipsis\" : u\"\\u2026\",\n },\n },\n },\n \"header\": {\n \"elide\": {\n \"ellipsis\" : u\"\\u2026\",\n },\n },\n \"row_ellipsis\": {\n \"format\" : u\"\\u2026 skipping {skipped} rows \\u2026\",\n },\n \"underline\": {\n \"line\" : u\"\\u2550\",\n },\n})\n\n\nUNICODE_BOX_CFG = update_cfg(DEFAULT_CFG, {\n \"bottom\": {\n \"line\" : u\"\\u2500\",\n \"separator\": {\n \"between\" : u\"\\u2500\\u2534\\u2500\",\n \"end\" : u\"\\u2500\\u2518\",\n \"index\" : u\"\\u2500\\u2568\\u2500\",\n \"start\" : u\"\\u2514\\u2500\",\n },\n \"show\" : True,\n },\n \"formatters\": {\n \"default\": {\n \"number\": {\n \"inf\" : u\"\\u221e\",\n },\n \"string\": {\n \"ellipsis\" : u\"\\u2026\",\n },\n },\n },\n \"header\": {\n \"elide\": {\n \"ellipsis\" : u\"\\u2026\",\n },\n \"separator\": {\n \"between\" : u\" \\u2502 \",\n \"end\" : u\" \\u2502\",\n \"index\" : u\" \\u2551 \",\n \"start\" : u\"\\u2502 \",\n },\n },\n \"row_ellipsis\": {\n \"separator\": {\n \"end\" : u\"\\u2561\",\n \"start\" : u\"\\u255e\",\n },\n \"pad\" : u\"\\u2550\",\n \"format\" : u\" skipped {skipped} rows \",\n },\n \"row\": {\n \"separator\": {\n \"between\" : u\" \\u2502 \",\n \"end\" : u\" \\u2502\",\n \"index\" : u\" \\u2551 \",\n \"start\" : u\"\\u2502 \",\n },\n },\n \"top\": {\n \"line\" : u\"\\u2500\",\n \"separator\": {\n \"between\" : u\"\\u2500\\u252c\\u2500\",\n \"end\" : u\"\\u2500\\u2510\",\n \"index\" : u\"\\u2500\\u2565\\u2500\",\n \"start\" : u\"\\u250c\\u2500\",\n },\n \"show\" : True,\n },\n \"underline\": {\n \"line\" : u\"\\u2500\",\n \"separator\": {\n \"between\" : u\"\\u2500\\u253c\\u2500\",\n \"end\" : u\"\\u2500\\u2524\",\n \"index\" : u\"\\u2500\\u256b\\u2500\",\n \"start\" : u\"\\u251c\\u2500\",\n },\n },\n})\n\n\ndef _colorize(cfg):\n \"\"\"\n ANSI-colorizes a configuration.\n\n Modifies `cfg` in place.\n \"\"\"\n fmt_cfg = cfg[\"formatters\"][\"default\"]\n\n # Color true and false.\n c = fmt_cfg[\"bool\"]\n c.update({\n \"true\" : ansi.style(fg=\"black\" )(c[\"true\"]),\n \"false\" : ansi.style(fg=\"dark_gray\")(c[\"false\"]),\n })\n\n # Color Inf and Nan, for visibility.\n c = fmt_cfg[\"number\"]\n c.update({\n \"inf\" : ansi.style(fg=\"blue\", bold=True)(c[\"inf\"]),\n \"nan\" : ansi.style(fg=\"dark_gray\" )(c[\"nan\"]),\n })\n\n # Color lines gray.\n line_style = ansi.style(fg=\"light_gray\")\n style_line = lambda s: s if s is None else line_style(s)\n for c in (cfg[\"bottom\"], cfg[\"header\"], cfg[\"row\"], cfg[\"row_ellipsis\"],\n cfg[\"top\"], cfg[\"underline\"], ):\n c = c[\"separator\"]\n c.update({\n \"between\" : style_line(c[\"between\"]),\n \"end\" : style_line(c[\"end\"]),\n \"index\" : style_line(c[\"index\"]),\n \"start\" : style_line(c[\"start\"]),\n })\n for c in cfg[\"bottom\"], cfg[\"top\"], cfg[\"underline\"]:\n c[\"line\"] = style_line(c[\"line\"])\n\n # Use underlining instead of drawing an underline.\n # FIXME: Hacky.\n if not cfg[\"underline\"][\"show\"]:\n cfg[\"header\"][\"style\"].update({\n \"prefix\": ansi.sgr(underline=True, bold=True),\n \"suffix\": ansi.RESET,\n })\n\n # Color the ellipsis row.\n c = cfg[\"row_ellipsis\"]\n c.update({\n \"pad\" : style_line(c[\"pad\"]),\n \"format\": ansi.style(fg=\"light_gray\")(c[\"format\"]),\n })\n\n return cfg\n\n\n#-------------------------------------------------------------------------------\n\ndef _get_formatter(name, arr, cfg):\n \"\"\"\n Constructs a formatter for a named array.\n \"\"\"\n # Start with the overall default formatter configuration\n fmt_cfg = cfg[\"default\"]\n\n # For each of the name, dtype name, and dtype kind, look up in the\n # corresponding cfg dict. If we find a formatter, return it outright.\n # Otherwise, update the formatter cfg.\n for c, k in (\n (cfg[\"by_dtype\"], arr.dtype.kind),\n (cfg[\"by_dtype\"], arr.dtype.name),\n (cfg[\"by_name\"], name),\n ):\n try:\n val = c[k]\n except KeyError:\n pass\n else:\n if is_fmt(val):\n return val\n else:\n fmt_cfg = update_cfg(fmt_cfg, val)\n\n min_width = fmt_cfg[\"min_width\"]\n if fmt_cfg[\"name_width\"]:\n min_width = max(min_width, string_length(name))\n\n return npfmt.choose_formatter(arr, min_width=min_width, cfg=fmt_cfg)\n\n\ndef _get_header_position(fmt):\n \"\"\"\n Returns the pad position for justifying the header.\n \"\"\"\n if isinstance(fmt, Bool):\n return fmt.pos\n elif isinstance(fmt, Number):\n return 0 # FIXME: Constant.\n elif isinstance(fmt, String):\n return fmt.pad_pos\n else:\n # Assume everythign else is left-justified.\n return 1 # FIXME: Constant.\n\n\n#-------------------------------------------------------------------------------\n\nclass Table:\n\n def __init__(self, cfg=DEFAULT_CFG):\n # The configuration.\n self.__cfg = cfg\n # Column names and formatters.\n self.__names = []\n self.__fmts = []\n # Number of leading index columns.\n self.__num_idx = 0\n # The underlying table.\n self.__table = _ext.Table()\n\n # Start the table.\n self.add_string(self.__cfg[\"row\"][\"separator\"][\"start\"])\n\n\n def __add_array(self, arr, fmt):\n table = self.__table\n\n name = arr.dtype.name\n if name in {\n \"int8\", \"int16\", \"int32\", \"int64\",\n \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n \"float32\", \"float64\", \"bool\"\n }:\n getattr(table, \"add_\" + name)(arr, fmt)\n elif name == \"object\":\n table.add_str_object(arr, fmt)\n elif arr.dtype.kind in \"U\":\n table.add_ucs32(arr.dtype.itemsize, arr, fmt)\n elif arr.dtype.kind in \"S\":\n table.add_utf8(arr.dtype.itemsize, arr, fmt)\n elif name.startswith(\"datetime64\"): # FIXME: Sloppy.\n table.add_tick_time(arr.astype(\"int64\"), fmt)\n else:\n raise TypeError(\"unsupported dtype: {}\".format(arr.dtype))\n\n\n def add_string(self, string):\n self.__table.add_string(string)\n\n\n def add_index_column(self, name, arr, fmt=None):\n assert self.__num_idx == len(self.__fmts), \\\n \"can't add index after normal column\"\n\n if self.__num_idx > 0:\n self.add_string(self.__cfg[\"row\"][\"separator\"][\"between\"])\n\n if fmt is None:\n fmt = _get_formatter(name, arr, self.__cfg[\"formatters\"])\n self.__add_array(arr, fmt)\n self.__names.append(name)\n self.__fmts.append(fmt)\n self.__num_idx += 1\n\n\n def add_column(self, name, arr, fmt=None):\n if self.__num_idx > 0 and self.__num_idx == len(self.__fmts):\n self.add_string(self.__cfg[\"row\"][\"separator\"][\"index\"])\n elif len(self.__fmts) > 0:\n self.add_string(self.__cfg[\"row\"][\"separator\"][\"between\"])\n\n if fmt is None:\n fmt = _get_formatter(name, arr, self.__cfg[\"formatters\"])\n self.__add_array(arr, fmt)\n self.__names.append(name)\n self.__fmts.append(fmt)\n\n\n def finish(self):\n self.add_string(self.__cfg[\"row\"][\"separator\"][\"end\"])\n\n\n def _fmt_header(self):\n cfg = self.__cfg[\"header\"]\n assert string_length(cfg[\"style\"][\"prefix\"]) == 0\n assert string_length(cfg[\"style\"][\"suffix\"]) == 0\n\n if cfg[\"show\"]:\n sep = cfg[\"separator\"]\n\n def format_name(i, name, fmt):\n name = name or \"\"\n name = cfg[\"prefix\"] + name + cfg[\"suffix\"]\n pad_pos = _get_header_position(fmt)\n name = palide(\n name, fmt.width,\n elide_pos=cfg[\"elide\"][\"position\"],\n ellipsis=cfg[\"elide\"][\"ellipsis\"],\n pad_pos=pad_pos)\n name = cfg[\"style\"][\"prefix\"] + name + cfg[\"style\"][\"suffix\"]\n if i > 0:\n if i == self.__num_idx:\n name = sep[\"index\"] + name\n else:\n name = sep[\"between\"] + name\n return name\n\n header = sep[\"start\"] + \"\".join(\n format_name(i, n, f)\n for i, (n, f) in enumerate(zip(self.__names, self.__fmts))\n ) + sep[\"end\"]\n return header\n\n\n def _fmt_line(self, cfg):\n if cfg[\"show\"]:\n # FIXME: Relax this.\n if string_length(cfg[\"line\"]) != 1:\n raise ValueError(\"line must be one character\")\n\n sep = cfg[\"separator\"]\n return (\n sep[\"start\"]\n + \"\".join(\n (sep[\"index\"] if i > 0 and i == self.__num_idx\n else sep[\"between\"] if i > 0\n else \"\")\n + cfg[\"line\"] * f.width\n for i, f in enumerate(self.__fmts)\n )\n + sep[\"end\"]\n )\n\n\n def _fmt_top(self):\n return self._fmt_line(self.__cfg[\"top\"])\n\n\n def _fmt_underline(self):\n return self._fmt_line(self.__cfg[\"underline\"])\n\n\n def _fmt_bottom(self):\n return self._fmt_line(self.__cfg[\"bottom\"])\n\n\n # FIXME: By screen (repeating header?)\n # FIXME: Do what when it's too wide???\n\n def _format(self):\n cfg = self.__cfg\n\n num_extra_rows = sum([\n cfg[\"top\"][\"show\"],\n cfg[\"header\"][\"show\"],\n cfg[\"underline\"][\"show\"],\n cfg[\"bottom\"][\"show\"],\n ])\n\n max_rows = cfg[\"data\"][\"max_rows\"]\n if max_rows == \"terminal\":\n # FIXME\n max_rows = ansi.get_terminal_size().lines - 1\n\n yield self._fmt_top()\n yield self._fmt_header()\n yield self._fmt_underline()\n\n table = self.__table\n num_rows = len(table)\n if max_rows is None or num_rows <= max_rows - num_extra_rows:\n for i in range(len(table)):\n yield table(i)\n else:\n cfg_ell = cfg[\"row_ellipsis\"]\n num_rows_top = int(cfg_ell[\"position\"] * max_rows)\n num_rows_bottom = max_rows - num_extra_rows - num_rows_top - 1\n num_rows_skipped = num_rows - num_rows_top - num_rows_bottom\n\n # Print rows from the top.\n for i in range(num_rows_top):\n yield table(i)\n\n # Print the row ellipsis.\n ell = cfg_ell[\"format\"].format(\n bottom =num_rows_bottom,\n rows =num_rows,\n skipped =num_rows_skipped,\n top =num_rows_top,\n )\n ell_start = cfg_ell[\"separator\"][\"start\"]\n ell_end = cfg_ell[\"separator\"][\"end\"]\n ell_pad = cfg_ell[\"pad\"]\n ell_width = (\n table.width\n - string_length(ell_start)\n - string_length(ell_end))\n ell = center(ell, ell_width, ell_pad)\n yield ell_start + ell + ell_end\n\n # Print rows from the bottom.\n for i in range(num_rows - num_rows_bottom, num_rows):\n yield table(i)\n\n yield self._fmt_bottom()\n\n\n def format(self):\n yield from filter(None, self._format())\n\n\n def print(self, print=print):\n for line in self.format():\n print(line)\n\n\n\ndef from_arrays(arrs, cfg=DEFAULT_CFG):\n \"\"\"\n Constructs a table from arrays.\n\n :param arrs:\n A dict or iterable of items from name to array.\n \"\"\"\n try:\n arrs = arrs.items()\n except AttributeError:\n pass\n\n tbl = Table(cfg)\n for name, arr in arrs:\n # FIXME: Since Table doesn't support S and U arrays, convert these\n # to objects for now.\n if arr.dtype.kind in \"SU\":\n arr = arr.astype(object)\n tbl.add_column(name, arr)\n tbl.finish()\n return tbl\n\n\n#-------------------------------------------------------------------------------\n\nclass RowTable:\n \"\"\"\n :ivar names:\n List of column names to show. T\n \"\"\"\n\n DEFAULT_CFG = _colorize(update_cfg(UNICODE_CFG, {\n \"header\": {\n \"separator\": {\n \"between\": \" \",\n },\n \"style\": {\n \"prefix\": ansi.sgr(bold=True),\n \"suffix\": ansi.sgr(bold=False),\n },\n },\n \"formatters\": {\n \"default\": {\n \"name_width\": True,\n },\n },\n \"row\": {\n \"separator\": {\n \"between\": \" \",\n },\n },\n \"underline\": {\n \"separator\": {\n \"between\": \" \",\n },\n },\n }))\n\n LINE = object()\n\n\n def __init__(self, cfg=DEFAULT_CFG):\n self.cfg = cfg\n self.names = []\n self.rows = []\n self.fmts = {}\n self.defaults = {}\n self.titles = {}\n\n\n def append(self, **fields):\n self.rows.append(fields)\n for name in fields:\n if name not in self.names:\n self.names.append(name)\n\n\n def extend(self, rows):\n for row in rows:\n self.append(**row)\n\n\n @property\n def num_rows(self):\n return len(self.rows)\n\n\n def line(self):\n self.rows.append(self.LINE)\n\n\n def text(self, text):\n self.rows.append(text)\n\n\n def order(self, names):\n old_names = list(names)\n self.names.clear()\n for name in names:\n if name in old_names:\n self.names.append(name)\n for name in old_names:\n if name not in self.names:\n self.names.append(name)\n\n\n def set_fmts(self):\n cfg = self.cfg[\"formatters\"]\n for name in self.names:\n if name not in self.fmts:\n # FIXME: Do better.\n arr = np.array([\n r[name] for r in self.rows\n if r is not self.LINE \n and name in r \n and r[name] is not None\n ])\n self.fmts[name] = _get_formatter(name, arr, cfg=cfg)\n\n\n def __line(self, fmts, cfg):\n if not cfg[\"show\"]:\n return\n\n sep = cfg[\"separator\"]\n fmts = ( f for f in fmts if f is not None )\n yield (\n sep[\"start\"]\n + \"\".join(\n (sep[\"between\"] if i > 0 else \"\")\n + cfg[\"line\"] * f.width\n for i, f in enumerate(fmts)\n )\n + sep[\"end\"]\n )\n\n\n def __header(self, names, fmts, cfg):\n if not cfg[\"show\"]:\n return\n\n assert string_length(cfg[\"style\"][\"prefix\"]) == 0\n assert string_length(cfg[\"style\"][\"suffix\"]) == 0\n\n sep = cfg[\"separator\"]\n\n def format_name(name, fmt):\n name = name or \"\"\n name = cfg[\"prefix\"] + name + cfg[\"suffix\"]\n pad_pos = _get_header_position(fmt)\n name = palide(\n name,\n fmt.width,\n elide_pos =cfg[\"elide\"][\"position\"],\n ellipsis =cfg[\"elide\"][\"ellipsis\"],\n pad_pos =pad_pos\n )\n name = cfg[\"style\"][\"prefix\"] + name + cfg[\"style\"][\"suffix\"]\n return name\n\n yield sep[\"start\"] + sep[\"between\"].join(\n format_name(n, f)\n for n, f in zip(names, fmts)\n if f is not None\n ) + sep[\"end\"]\n\n\n def __row(self, names, fmts, vals, cfg):\n sep = cfg[\"separator\"]\n vals = (\n \" \" * f.width if v is None else f(v)\n for f, v in zip(fmts, vals)\n if f is not None\n )\n yield (\n sep[\"start\"]\n + sep[\"between\"].join(vals)\n + sep[\"end\"]\n )\n\n\n def __iter__(self):\n self.set_fmts()\n\n names = self.names\n fmts = [ self.fmts.get(n, None) for n in names ]\n\n yield from self.__line(fmts, self.cfg[\"top\"])\n yield from self.__header(names, fmts, self.cfg[\"header\"])\n yield from self.__line(fmts, self.cfg[\"underline\"])\n\n defs = [ self.defaults.get(n, None) for n in names ]\n\n for row in self.rows:\n if row is self.LINE:\n yield from self.__line()\n elif isinstance(row, str):\n yield row\n else:\n vals = ( row.get(n, d) for n, d in zip(names, defs) )\n yield from self.__row(names, fmts, vals, self.cfg[\"row\"])\n\n yield from self.__line(fmts, self.cfg[\"bottom\"])\n\n\n def print(self, print=print):\n for line in self:\n print(line)\n\n\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vreshniak/ImplicitResNet
|
[
"62e3c2f047f2572a0d0a0ee7cd3c8dd6e340080e"
] |
[
"implicitresnet/models/rhs.py"
] |
[
"from abc import ABCMeta, abstractmethod\n\nimport math\n\nimport torch\nfrom .misc import ParabolicPerceptron, HamiltonianPerceptron, HollowMLP, MLP, PreActConv2d\n\n\n\n###############################################################################\n###############################################################################\n\n\nclass rhs_base(torch.nn.Module, metaclass=ABCMeta):\n\tdef __init__(self, shape, T, num_steps, spectral_limits=None, learn_scales=False, learn_shift=False):\n\t\tsuper().__init__()\n\t\tself.shape = shape\n\t\tself.h = T / num_steps\n\t\tself.learn_scales = learn_scales\n\t\tself.learn_shift = learn_shift\n\n\t\t# initialize shifts so that sigmoid(shift) = ini_sigmoid\n\t\tini_sigmoid = 0.99\n\t\tinv_sigmoid = math.log(ini_sigmoid/(1-ini_sigmoid))\n\t\tself.shifta = torch.nn.parameter.Parameter( torch.tensor(inv_sigmoid), requires_grad=learn_shift)\n\t\tself.shiftb = torch.nn.parameter.Parameter( torch.tensor(inv_sigmoid), requires_grad=learn_shift)\n\n\t\t# initialize scales so that sigmoid(scales) = ini_sigmoid\n\t\tini_sigmoid = 0.5 if learn_scales else 0.99\n\t\tinv_sigmoid = math.log(ini_sigmoid/(1-ini_sigmoid))\n\t\tself.scales = torch.nn.parameter.Parameter( inv_sigmoid * torch.ones(1,*shape), requires_grad=learn_scales)\n\n\t\tself.set_spectral_limits(spectral_limits)\n\n\tdef set_spectral_limits(self, spectral_limits):\n\t\tassert isinstance(spectral_limits,list) or (spectral_limits is None), \"spectral_limits should be a list or None\"\n\t\tif spectral_limits is None:\n\t\t\tself.eigmin, self.eiginit, self.eigmax = ( -1.0, 0.0, 1.0 )\n\t\telif len(spectral_limits)==2:\n\t\t\tself.eigmin, self.eigmax = spectral_limits\n\t\t\tself.eiginit = 0.5*(self.eigmin+self.eigmax)\n\t\t\tassert self.eigmin<self.eigmax, \"eigmin < eigmax must be given, got spectral_limits = \"+str(spectral_limits)\n\t\telif len(spectral_limits)==3:\n\t\t\tself.eigmin, self.eiginit, self.eigmax = spectral_limits\n\t\t\tassert self.eigmin<self.eiginit and self.eiginit<self.eigmax, \"eigmin < eiginit < eigmax must be given, got spectral_limits = \"+str(spectral_limits)\n\n\t\tini_sigmoid_a = 0.01\n\t\tini_sigmoid_b = ini_sigmoid_a * (self.eigmax-self.eiginit)/(self.eiginit-self.eigmin) # balance initial shifta and shiftb\n\t\t# ini_sigmoid_b = 0.99\n\t\t# ini_sigmoid_a = ini_sigmoid_b * (self.eiginit-self.eigmin)/(self.eigmax-self.eiginit) # balance initial shifta and shiftb\n\t\tif self.shifta.requires_grad:\n\t\t\ta = math.log(ini_sigmoid_a/(1-ini_sigmoid_a))\n\t\t\ttorch.nn.init.constant_(self.shifta, a)\n\t\tif self.shiftb.requires_grad:\n\t\t\tb = math.log(ini_sigmoid_b/(1-ini_sigmoid_b))\n\t\t\ttorch.nn.init.constant_(self.shiftb, b)\n\n\tdef freeze_shift(self):\n\t\tself.shifta.requires_grad_(False)\n\t\tself.shiftb.requires_grad_(False)\n\n\tdef unfreeze_shift(self):\n\t\tself.shifta.requires_grad_(True)\n\t\tself.shiftb.requires_grad_(True)\n\n\n\tdef initialize(self):\n\t\tfor name, weight in self.F.named_parameters():\n\t\t\tif 'weight' in name:\n\t\t\t\t# torch.nn.init.xavier_normal_(weight, gain=1.e-1)\n\t\t\t\ttorch.nn.init.xavier_uniform_(weight)\n\t\t\t\ttorch.nn.init.xavier_uniform_(weight, gain=1./weight.detach().norm())\n\t\t\t\t# torch.nn.init.uniform_(weight,-1.e-5,1.e-5)\n\t\t\telse:\n\t\t\t\ttorch.nn.init.zeros_(weight)\n\t\t# perform dummy initial spectral normalization if any\n\t\tx = torch.ones(1,*self.shape)\n\t\tfor _ in range(5):\n\t\t\tfor m in self.F:\n\t\t\t\tm(x)\n\n\tdef t2ind(self, t):\n\t\tif torch.is_tensor(t):\n\t\t\tassert t.ndim<2, \"t must be either a scalar or a vector\"\n\t\t\treturn torch.clamp( (t/self.h).int(), max=len(self.F)-1 )\n\t\telse:\n\t\t\treturn min(int(t/self.h), len(self.F)-1)\n\n\n\tdef forward(self, t, y):\n\t\tind = self.t2ind(t)\n\t\tif torch.is_tensor(ind) and ind.ndim>0:\n\t\t\tassert ind.size(0)==y.size(0), \"if t is tensor, it must have the same batch dimension as y\"\n\t\t\t# need to sacrifice full batch parallelization here\n\t\t\tf = [ self.F[i](y[batch,...]) for batch, i in enumerate(ind) ]\n\t\t\tf = torch.stack(f)\n\t\t\t# this doesn't work. why?\n\t\t\t# f = [ self.F[i](y[i==ind,...]) for i in torch.unique(ind) ]\n\t\t\t# f = torch.cat(f,0)\n\t\telse:\n\t\t\tf = self.F[ind](y)\n\n\t\tf = torch.sigmoid(self.scales) * f\n\t\ta = self.eiginit + torch.sigmoid(self.shifta) * ( self.eigmin - self.eiginit )\n\t\tb = self.eiginit + torch.sigmoid(self.shiftb) * ( self.eigmax - self.eiginit )\n\n\t\treturn 0.5 * ((b-a)*f + (a+b)*y)\n\n\n\n###############################################################################\n###############################################################################\n\n\n\nclass rhs_mlp(rhs_base):\n\tdef __init__(self, dim, width, depth, T, num_steps, activation='relu', final_activation=None, power_iters=0, spectral_limits=None, learn_scales=False, learn_shift=False):\n\t\tsuper().__init__((dim,), T, num_steps, spectral_limits, learn_scales, learn_shift)\n\n\t\tif final_activation is None:\n\t\t\tfinal_activation = activation\n\n\t\tself.F = torch.nn.ModuleList( [ MLP(in_dim=dim, out_dim=dim, width=width, depth=depth, activation=activation, final_activation=final_activation, power_iters=power_iters) for _ in range(num_steps) ] )\n\n\t\t# intialize rhs\n\t\tself.initialize()\n\nclass rhs_hamiltonian_mlp(rhs_base):\n\tdef __init__(self, dim, width, T, num_steps, activation='relu', power_iters=0, spectral_limits=None, learn_scales=False, learn_shift=False):\n\t\tsuper().__init__((dim,), T, num_steps, spectral_limits, learn_scales, learn_shift)\n\n\t\tself.F = torch.nn.ModuleList( [ HamiltonianPerceptron(dim=dim, width=width, activation=activation, power_iters=power_iters) for _ in range(num_steps) ] )\n\n\t\t# intialize rhs\n\t\tself.initialize()\n\nclass rhs_parabolic_mlp(rhs_base):\n\tdef __init__(self, dim, width, T, num_steps, activation='relu', power_iters=0, spectral_limits=None, learn_scales=False, learn_shift=False):\n\t\tsuper().__init__((dim,), T, num_steps, spectral_limits, learn_scales, learn_shift)\n\n\t\tself.F = torch.nn.ModuleList( [ ParabolicPerceptron(dim=dim, width=width, activation=activation, power_iters=power_iters) for _ in range(num_steps) ] )\n\n\t\t# intialize rhs\n\t\tself.initialize()\n\n\nclass rhs_conv2d(rhs_base):\n\tdef __init__(self, input_shape, kernel_size, depth, T, num_steps, activation='relu', power_iters=0, spectral_limits=None, learn_scales=False, learn_shift=False, bias=True):\n\t\tsuper().__init__(input_shape, T, num_steps, spectral_limits, learn_scales, learn_shift)\n\n\t\t# define rhs\n\t\tself.F = torch.nn.ModuleList( [ PreActConv2d(input_shape, depth=depth, kernel_size=kernel_size, activation=activation, power_iters=power_iters, bias=bias) for _ in range(num_steps) ] )\n\n\t\t# intialize rhs\n\t\tself.initialize()\n"
] |
[
[
"torch.sigmoid",
"torch.ones",
"torch.nn.init.constant_",
"torch.is_tensor",
"torch.tensor",
"torch.nn.init.xavier_uniform_",
"torch.stack",
"torch.nn.init.zeros_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lompabo/aiiti-01-2021
|
[
"d2596458da8382940f51cb8f388aaf5357bce881"
] |
[
"util/nab.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport json\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\n\n# Configuration\nanomaly_color = 'sandybrown'\nprediction_color = 'yellowgreen'\ntraining_color = 'yellowgreen'\nvalidation_color = 'gold'\ntest_color = 'coral'\nfigsize=(12, 4)\n\ndef load_series(file_name, data_folder):\n # Load the input data\n data_path = os.path.join(data_folder, 'data', file_name)\n data = pd.read_csv(data_path)\n data['timestamp'] = pd.to_datetime(data['timestamp'])\n data.set_index('timestamp', inplace=True)\n # Load the labels\n label_path = os.path.join(data_folder, 'labels', 'combined_labels.json')\n with open(label_path) as fp:\n labels = pd.Series(json.load(fp)[file_name])\n labels = pd.to_datetime(labels)\n # Load the windows\n window_path = os.path.join(data_folder, 'labels', 'combined_windows.json')\n window_cols = ['begin', 'end']\n with open(window_path) as fp:\n windows = pd.DataFrame(columns=window_cols,\n data=json.load(fp)[file_name])\n windows['begin'] = pd.to_datetime(windows['begin'])\n windows['end'] = pd.to_datetime(windows['end'])\n # Return data\n return data, labels, windows\n\n\ndef plot_series(data, labels=None,\n windows=None,\n predictions=None,\n highlights=None,\n val_start=None,\n test_start=None,\n figsize=figsize):\n # Open a new figure\n # plt.close('all')\n plt.figure(figsize=figsize)\n # Plot data\n plt.plot(data.index, data.values, zorder=0)\n # Rotated x ticks\n plt.xticks(rotation=45)\n # Plot labels\n if labels is not None:\n plt.scatter(labels.values, data.loc[labels],\n color=anomaly_color, zorder=2)\n # Plot windows\n if windows is not None:\n for _, wdw in windows.iterrows():\n plt.axvspan(wdw['begin'], wdw['end'],\n color=anomaly_color, alpha=0.3, zorder=1)\n \n # Plot training data\n if val_start is not None:\n plt.axvspan(data.index[0], val_start,\n color=training_color, alpha=0.1, zorder=-1)\n if val_start is None and test_start is not None:\n plt.axvspan(data.index[0], test_start,\n color=training_color, alpha=0.1, zorder=-1)\n if val_start is not None:\n plt.axvspan(val_start, test_start,\n color=validation_color, alpha=0.1, zorder=-1)\n if test_start is not None:\n plt.axvspan(test_start, data.index[-1],\n color=test_color, alpha=0.3, zorder=0)\n # Predictions\n if predictions is not None:\n plt.scatter(predictions.values, data.loc[predictions],\n color=prediction_color, alpha=.4, zorder=3)\n plt.tight_layout()\n\n\ndef plot_autocorrelation(data, max_lag=100, figsize=figsize):\n # Open a new figure\n # plt.close('all')\n plt.figure(figsize=figsize)\n # Autocorrelation plot\n pd.plotting.autocorrelation_plot(data['value'])\n # Customized x limits\n plt.xlim(0, max_lag)\n # Rotated x ticks\n plt.xticks(rotation=45)\n plt.tight_layout()\n\n\ndef plot_histogram(data, bins=10, vmin=None, vmax=None, figsize=figsize):\n # Build a new figure\n # plt.close('all')\n plt.figure(figsize=figsize)\n # Plot a histogram\n plt.hist(data, density=True, bins=bins)\n # Update limits\n lims = plt.xlim()\n if vmin is not None:\n lims = (vmin, lims[1])\n if vmax is not None:\n lims = (lims[0], vmax)\n plt.xlim(lims)\n plt.tight_layout()\n\n\ndef plot_histogram2d(xdata, ydata, bins=10, figsize=figsize):\n # Build a new figure\n # plt.close('all')\n plt.figure(figsize=figsize)\n # Plot a histogram\n plt.hist2d(xdata, ydata, density=True, bins=bins)\n plt.tight_layout()\n\n\ndef plot_density_estimator_1D(estimator, xr, figsize=figsize):\n # Build a new figure\n # plt.close('all')\n plt.figure(figsize=figsize)\n # Plot the estimated density\n xvals = xr.reshape((-1, 1))\n dvals = np.exp(estimator.score_samples(xvals))\n plt.plot(xvals, dvals)\n plt.tight_layout()\n\n\ndef plot_density_estimator_2D(estimator, xr, yr, figsize=figsize):\n # Plot the estimated density\n nx = len(xr)\n ny = len(yr)\n xc = np.repeat(xr, ny)\n yc = np.tile(yr, nx)\n data = np.vstack((xc, yc)).T\n dvals = np.exp(estimator.score_samples(data))\n dvals = dvals.reshape((nx, ny))\n # Build a new figure\n # plt.close('all')\n plt.figure(figsize=figsize)\n plt.pcolor(dvals)\n plt.tight_layout()\n # plt.xticks(np.arange(0, len(xr)), xr)\n # plt.yticks(np.arange(0, len(xr)), yr)\n\n\ndef get_pred(signal, thr):\n return pd.Series(signal.index[signal >= thr])\n\n\ndef get_metrics(pred, labels, windows):\n tp = [] # True positives\n fp = [] # False positives\n fn = [] # False negatives\n advance = [] # Time advance, for true positives\n # Loop over all windows\n used_pred = set()\n for idx, w in windows.iterrows():\n # Search for the earliest prediction\n pmin = None\n for p in pred:\n if p >= w['begin'] and p < w['end']:\n used_pred.add(p)\n if pmin is None or p < pmin:\n pmin = p\n # Compute true pos. (incl. advance) and false neg.\n l = labels[idx]\n if pmin is None:\n fn.append(l)\n else:\n tp.append(l)\n advance.append(l-pmin)\n # Compute false positives\n for p in pred:\n if p not in used_pred:\n fp.append(p)\n # Return all metrics as pandas series\n return pd.Series(tp), \\\n pd.Series(fp), \\\n pd.Series(fn), \\\n pd.Series(advance)\n\n\nclass ADSimpleCostModel:\n\n def __init__(self, c_alrm, c_missed, c_late):\n self.c_alrm = c_alrm\n self.c_missed = c_missed\n self.c_late = c_late\n\n def cost(self, signal, labels, windows, thr):\n # Obtain predictions\n pred = get_pred(signal, thr)\n # Obtain metrics\n tp, fp, fn, adv = get_metrics(pred, labels, windows)\n # Compute the cost\n adv_det = [a for a in adv if a.total_seconds() <= 0]\n cost = self.c_alrm * len(fp) + \\\n self.c_missed * len(fn) + \\\n self.c_late * (len(adv_det))\n return cost\n\n\ndef opt_thr(signal, labels, windows, cmodel, thr_range):\n costs = [cmodel.cost(signal, labels, windows, thr)\n for thr in thr_range]\n costs = np.array(costs)\n best_idx = np.argmin(costs)\n return thr_range[best_idx], costs[best_idx]\n\n\n\n\nclass KDEDetector:\n\n def __init__(self, bandwidth=0.1, thr=0.0):\n self.est = KernelDensity(kernel='gaussian',\n bandwidth=bandwidth)\n self.thr = thr\n\n def fit_estimator(self, X):\n kde2.fit(X)\n\n def fit_threshold(self, cmodel, tr):\n pass\n\n"
] |
[
[
"pandas.to_datetime",
"pandas.Series",
"matplotlib.pyplot.plot",
"numpy.argmin",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"numpy.repeat",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.hist2d",
"pandas.plotting.autocorrelation_plot",
"numpy.array",
"matplotlib.pyplot.axvspan",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"numpy.tile",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.pcolor",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lkl089/corona-discord-bot
|
[
"b946e7f41c19e1849ae7c63ebf5be0ba9331a0fc"
] |
[
"chart/chart_spain.py"
] |
[
"from data import recent_confirmed as week\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rc\nimport matplotlib.font_manager as fm\nimport platform\nfrom datetime import datetime\n\nnow = datetime.now()\nmonth = now.strftime(\"%m\")\nday = now.strftime(\"%d\")\ntoday = month + day\n\nif platform.system() == 'Windows':\n# 윈도우인 경우\n font_name = fm.FontProperties(fname=\"c:/Windows/Fonts/malgun.ttf\").get_name()\n rc('font', family=font_name)\nelse:\n# 우분투인 경우\n font_name = fm.FontProperties(fname=\"/usr/share/fonts/truetype/nanum/NanumBarunGothic.ttf\").get_name()\n rc('font', family=font_name)\n\nax = plt.subplot()\nplt.title('주간 스페인 코로나 상황 (보건복지부 발표기준)')\nax.set_axisbelow(True)\nax.yaxis.grid(color='gray', linestyle='dashed')\nax.xaxis.grid(color='gray', linestyle='dashed')\nplt.xticks(fontsize=8)\n#ax.set_xticks([0, 1, 2, 3, 4, 5, 6, 7])\n#ax.set_xticklabels(['',week.day6, week.day5, week.day4, week.day3, week.day2, week.day1, week.today], rotation=40)\n\nconfimed_spa = ['0',week.w_3_6, week.w_3_5, week.w_3_4, week.w_3_3, week.w_3_2,week.w_3_1,week.w_3_t]\n\nprint(confimed_spa)\n\nplt.plot([0, 1, 2, 3, 4, 5, 6],[week.w_3_6, week.w_3_5, week.w_3_4, week.w_3_3, week.w_3_2,week.w_3_1,week.w_3_t],c=\"r\",lw=\"3\",ls=\"--\",marker=\"o\",ms=\"8\",mec=\"blue\")\nlocs, labels=plt.xticks()\nxticks=['',week.day6, week.day5, week.day4, week.day3, week.day2, week.day1, week.today]\nplt.xticks(locs, xticks)\nplt.xticks(locs,xticks)\nplt.legend(['확진자 (명)'])\n\nplt.draw()\nfig = plt.gcf()\nif platform.system() == 'Windows':\n# 윈도우인 경우\n world_chart = fig.savefig('./data/confim_spain.png', dpi=fig.dpi)\nelse:\n# 우분투인 경우\n world_chart = fig.savefig('/discord-bot/data/confim_spain.png', dpi=fig.dpi)\n\n\nplt.cla()\nplt.clf()"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cla",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xticks",
"matplotlib.rc"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
avanwyk/cipy
|
[
"e5c8462eed8649abcb89c8a256d3868c89186b35"
] |
[
"cipy/algorithms/pso/base.py"
] |
[
"# Copyright 2016 Andrich van Wyk\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" Collection of functions used to implement the PSO algorithm.\nThe implementation defined here is the synchronous modified gbest PSO with the\ninertia term as per:\n\n* Shi, Yuhui, and Russell Eberhart. \"A modified particle swarm optimizer.\"\n Evolutionary Computation Proceedings, 1998.\n IEEE World Congress on Computational Intelligence.,\n The 1998 IEEE International Conference on. IEEE, 1998.\n\nFunction 'pso' defines the entry point for running the algorithm.\n\"\"\"\nimport numpy as np\n\nfrom cipy.algorithms.core import dictionary_based_metrics\nfrom cipy.algorithms.pso import functions\nfrom cipy.algorithms.pso import types\n\n\ndef optimize(objective_function, domain,\n stopping_condition, parameters=None,\n position_update=functions.std_position,\n velocity_update=functions.std_velocity,\n parameter_update=functions.std_parameter_update,\n measurements=(),\n measurer=dictionary_based_metrics):\n \"\"\" Perform particle swarm optimization of the given fitness function.\n Args:\n objective_function: the cost function to optimize.\n stopping_condition: function specifying the stopping condition.\n parameters: dictionary: parameter dictionary for the PSO.\n\n Returns:\n cipy.algorithms.pso.Particle: The global best particle.\n \"\"\"\n params = __init_parameters__(parameters)\n\n rng = np.random.RandomState(params['seed'])\n\n initial_swarm = [functions.initialize_particle(rng, domain,\n objective_function)\n for i in range(params['swarm_size'])]\n state = types.PSOState(rng, params, iterations=0, swarm=initial_swarm)\n\n topology_function = state.params['topology']\n update_fitness = functions.update_fitness\n update_particle = functions.update_particle\n\n results, measure = measurer(measurements)\n while not stopping_condition(state):\n n_bests = topology_function(state)\n\n state = state._replace(swarm=[update_particle(position_update,\n velocity_update,\n state, n_bests, ip)\n for ip in enumerate(state.swarm)])\n\n state = state._replace(swarm=[update_fitness(objective_function,\n particle)\n for particle in state.swarm],\n iterations=state.iterations + 1)\n\n state = parameter_update(state, objective_function)\n\n results = measure(results, state)\n\n return functions.solution(state.swarm), results\n\n\ndef default_parameters():\n return {'swarm_size': 25, 'n_s': 5, 'inertia': 0.729844,\n 'c_1': 1.496180, 'c_2': 1.496180, 'v_max': None,\n 'topology': functions.gbest_topology, 'seed': None}\n\n\ndef __init_parameters__(params):\n return {**default_parameters(), **({} if params is None else params)}\n"
] |
[
[
"numpy.random.RandomState"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcoMancha/BreastCancerDetector
|
[
"4e11da37bf94a0f496f236e9706205ac81683058",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"be0dfdcebd1ae66da6d0cf48e2525c24942ae877",
"9444dce96954c546333d5aecc92a06c3bfd19aa5"
] |
[
"env/lib/python3.7/site-packages/numpy/random/__init__.py",
"env/lib/python3.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py",
"env/lib/python3.7/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py",
"env/lib/python3.7/site-packages/sklearn/utils/tests/test_seq_dataset.py",
"env/lib/python3.7/site-packages/scipy/io/arff/tests/test_arffread.py",
"env/lib/python3.7/site-packages/matplotlib/style/core.py",
"env/lib/python3.7/site-packages/matplotlib/backends/backend_gtk3.py",
"env/lib/python3.7/site-packages/pandas/tests/reshape/test_pivot.py",
"env/lib/python3.7/site-packages/matplotlib/backend_bases.py",
"env/lib/python3.7/site-packages/pandas/tests/groupby/test_categorical.py",
"env/lib/python3.7/site-packages/scipy/linalg/interpolative.py",
"env/lib/python3.7/site-packages/scipy/interpolate/_cubic.py",
"env/lib/python3.7/site-packages/scipy/ndimage/measurements.py",
"env/lib/python3.7/site-packages/numpy/ma/tests/test_old_ma.py",
"env/lib/python3.7/site-packages/sklearn/decomposition/online_lda.py",
"env/lib/python3.7/site-packages/scipy/optimize/zeros.py",
"env/lib/python3.7/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py"
] |
[
"\"\"\"\n========================\nRandom Number Generation\n========================\n\nUse ``default_rng()`` to create a `Generator` and call its methods.\n\n=============== =========================================================\nGenerator\n--------------- ---------------------------------------------------------\nGenerator Class implementing all of the random number distributions\ndefault_rng Default constructor for ``Generator``\n=============== =========================================================\n\n============================================= ===\nBitGenerator Streams that work with Generator\n--------------------------------------------- ---\nMT19937\nPCG64\nPhilox\nSFC64\n============================================= ===\n\n============================================= ===\nGetting entropy to initialize a BitGenerator\n--------------------------------------------- ---\nSeedSequence\n============================================= ===\n\n\nLegacy\n------\n\nFor backwards compatibility with previous versions of numpy before 1.17, the\nvarious aliases to the global `RandomState` methods are left alone and do not\nuse the new `Generator` API.\n\n==================== =========================================================\nUtility functions\n-------------------- ---------------------------------------------------------\nrandom Uniformly distributed floats over ``[0, 1)``\nbytes Uniformly distributed random bytes.\npermutation Randomly permute a sequence / generate a random sequence.\nshuffle Randomly permute a sequence in place.\nchoice Random sample from 1-D array.\n==================== =========================================================\n\n==================== =========================================================\nCompatibility\nfunctions - removed\nin the new API\n-------------------- ---------------------------------------------------------\nrand Uniformly distributed values.\nrandn Normally distributed values.\nranf Uniformly distributed floating point numbers.\nrandom_integers Uniformly distributed integers in a given range.\n (deprecated, use ``integers(..., closed=True)`` instead)\nrandom_sample Alias for `random_sample`\nrandint Uniformly distributed integers in a given range\nseed Seed the legacy random number generator.\n==================== =========================================================\n\n==================== =========================================================\nUnivariate\ndistributions\n-------------------- ---------------------------------------------------------\nbeta Beta distribution over ``[0, 1]``.\nbinomial Binomial distribution.\nchisquare :math:`\\\\chi^2` distribution.\nexponential Exponential distribution.\nf F (Fisher-Snedecor) distribution.\ngamma Gamma distribution.\ngeometric Geometric distribution.\ngumbel Gumbel distribution.\nhypergeometric Hypergeometric distribution.\nlaplace Laplace distribution.\nlogistic Logistic distribution.\nlognormal Log-normal distribution.\nlogseries Logarithmic series distribution.\nnegative_binomial Negative binomial distribution.\nnoncentral_chisquare Non-central chi-square distribution.\nnoncentral_f Non-central F distribution.\nnormal Normal / Gaussian distribution.\npareto Pareto distribution.\npoisson Poisson distribution.\npower Power distribution.\nrayleigh Rayleigh distribution.\ntriangular Triangular distribution.\nuniform Uniform distribution.\nvonmises Von Mises circular distribution.\nwald Wald (inverse Gaussian) distribution.\nweibull Weibull distribution.\nzipf Zipf's distribution over ranked data.\n==================== =========================================================\n\n==================== ==========================================================\nMultivariate\ndistributions\n-------------------- ----------------------------------------------------------\ndirichlet Multivariate generalization of Beta distribution.\nmultinomial Multivariate generalization of the binomial distribution.\nmultivariate_normal Multivariate generalization of the normal distribution.\n==================== ==========================================================\n\n==================== =========================================================\nStandard\ndistributions\n-------------------- ---------------------------------------------------------\nstandard_cauchy Standard Cauchy-Lorentz distribution.\nstandard_exponential Standard exponential distribution.\nstandard_gamma Standard Gamma distribution.\nstandard_normal Standard normal distribution.\nstandard_t Standard Student's t-distribution.\n==================== =========================================================\n\n==================== =========================================================\nInternal functions\n-------------------- ---------------------------------------------------------\nget_state Get tuple representing internal state of generator.\nset_state Set state of generator.\n==================== =========================================================\n\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__all__ = [\n 'beta',\n 'binomial',\n 'bytes',\n 'chisquare',\n 'choice',\n 'dirichlet',\n 'exponential',\n 'f',\n 'gamma',\n 'geometric',\n 'get_state',\n 'gumbel',\n 'hypergeometric',\n 'laplace',\n 'logistic',\n 'lognormal',\n 'logseries',\n 'multinomial',\n 'multivariate_normal',\n 'negative_binomial',\n 'noncentral_chisquare',\n 'noncentral_f',\n 'normal',\n 'pareto',\n 'permutation',\n 'poisson',\n 'power',\n 'rand',\n 'randint',\n 'randn',\n 'random',\n 'random_integers',\n 'random_sample',\n 'ranf',\n 'rayleigh',\n 'sample',\n 'seed',\n 'set_state',\n 'shuffle',\n 'standard_cauchy',\n 'standard_exponential',\n 'standard_gamma',\n 'standard_normal',\n 'standard_t',\n 'triangular',\n 'uniform',\n 'vonmises',\n 'wald',\n 'weibull',\n 'zipf',\n]\n\n# add these for module-freeze analysis (like PyInstaller)\nfrom . import _pickle\nfrom . import common\nfrom . import bounded_integers\n\nfrom .mtrand import *\nfrom .generator import Generator, default_rng\nfrom .bit_generator import SeedSequence\nfrom .mt19937 import MT19937\nfrom .pcg64 import PCG64\nfrom .philox import Philox\nfrom .sfc64 import SFC64\nfrom .mtrand import RandomState\n\n__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',\n 'Philox', 'PCG64', 'SFC64', 'default_rng']\n\n\ndef __RandomState_ctor():\n \"\"\"Return a RandomState instance.\n\n This function exists solely to assist (un)pickling.\n\n Note that the state of the RandomState returned here is irrelevant, as this\n function's entire purpose is to return a newly allocated RandomState whose\n state pickle can set. Consequently the RandomState returned by this function\n is a freshly allocated copy with a seed=0.\n\n See https://github.com/numpy/numpy/issues/4763 for a detailed discussion\n\n \"\"\"\n return RandomState(seed=0)\n\n\nfrom numpy._pytesttester import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n",
"\"\"\"\nLocally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).\n\nReferences\n----------\n.. [1] A. V. Knyazev (2001),\n Toward the Optimal Preconditioned Eigensolver: Locally Optimal\n Block Preconditioned Conjugate Gradient Method.\n SIAM Journal on Scientific Computing 23, no. 2,\n pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124\n\n.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),\n Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)\n in hypre and PETSc. https://arxiv.org/abs/0705.2626\n\n.. [3] A. V. Knyazev's C and MATLAB implementations:\n https://bitbucket.org/joseroman/blopex\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,\n LinAlgError)\nfrom scipy.sparse.linalg import aslinearoperator\nfrom scipy.sparse.sputils import bmat\n\n__all__ = ['lobpcg']\n\n\ndef _save(ar, fileName):\n # Used only when verbosity level > 10.\n np.savetxt(fileName, ar)\n\n\ndef _report_nonhermitian(M, a, b, name):\n \"\"\"\n Report if `M` is not a hermitian matrix given the tolerances `a`, `b`.\n \"\"\"\n from scipy.linalg import norm\n\n md = M - M.T.conj()\n\n nmd = norm(md, 1)\n tol = np.spacing(max(10**a, (10**b)*norm(M, 1)))\n if nmd > tol:\n print('matrix %s is not sufficiently Hermitian for a=%d, b=%d:'\n % (name, a, b))\n print('condition: %.e < %e' % (nmd, tol))\n\n\ndef _as2d(ar):\n \"\"\"\n If the input array is 2D return it, if it is 1D, append a dimension,\n making it a column vector.\n \"\"\"\n if ar.ndim == 2:\n return ar\n else: # Assume 1!\n aux = np.array(ar, copy=False)\n aux.shape = (ar.shape[0], 1)\n return aux\n\n\ndef _makeOperator(operatorInput, expectedShape):\n \"\"\"Takes a dense numpy array or a sparse matrix or\n a function and makes an operator performing matrix * blockvector\n products.\"\"\"\n if operatorInput is None:\n return None\n else:\n operator = aslinearoperator(operatorInput)\n\n if operator.shape != expectedShape:\n raise ValueError('operator has invalid shape')\n\n return operator\n\n\ndef _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):\n \"\"\"Changes blockVectorV in place.\"\"\"\n gramYBV = np.dot(blockVectorBY.T.conj(), blockVectorV)\n tmp = cho_solve(factYBY, gramYBV)\n blockVectorV -= np.dot(blockVectorY, tmp)\n\n\ndef _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):\n if blockVectorBV is None:\n if B is not None:\n blockVectorBV = B(blockVectorV)\n else:\n blockVectorBV = blockVectorV # Shared data!!!\n gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV)\n gramVBV = cholesky(gramVBV)\n gramVBV = inv(gramVBV, overwrite_a=True)\n # gramVBV is now R^{-1}.\n blockVectorV = np.dot(blockVectorV, gramVBV)\n if B is not None:\n blockVectorBV = np.dot(blockVectorBV, gramVBV)\n else:\n blockVectorBV = None\n\n if retInvR:\n return blockVectorV, blockVectorBV, gramVBV\n else:\n return blockVectorV, blockVectorBV\n\n\ndef _get_indx(_lambda, num, largest):\n \"\"\"Get `num` indices into `_lambda` depending on `largest` option.\"\"\"\n ii = np.argsort(_lambda)\n if largest:\n ii = ii[:-num-1:-1]\n else:\n ii = ii[:num]\n\n return ii\n\n\ndef lobpcg(A, X,\n B=None, M=None, Y=None,\n tol=None, maxiter=20,\n largest=True, verbosityLevel=0,\n retLambdaHistory=False, retResidualNormsHistory=False):\n \"\"\"Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)\n\n LOBPCG is a preconditioned eigensolver for large symmetric positive\n definite (SPD) generalized eigenproblems.\n\n Parameters\n ----------\n A : {sparse matrix, dense matrix, LinearOperator}\n The symmetric linear operator of the problem, usually a\n sparse matrix. Often called the \"stiffness matrix\".\n X : array_like\n Initial approximation to the k eigenvectors. If A has\n shape=(n,n) then X should have shape shape=(n,k).\n B : {dense matrix, sparse matrix, LinearOperator}, optional\n the right hand side operator in a generalized eigenproblem.\n by default, B = Identity\n often called the \"mass matrix\"\n M : {dense matrix, sparse matrix, LinearOperator}, optional\n preconditioner to A; by default M = Identity\n M should approximate the inverse of A\n Y : array_like, optional\n n-by-sizeY matrix of constraints, sizeY < n\n The iterations will be performed in the B-orthogonal complement\n of the column-space of Y. Y must be full rank.\n tol : scalar, optional\n Solver tolerance (stopping criterion)\n by default: tol=n*sqrt(eps)\n maxiter : integer, optional\n maximum number of iterations\n by default: maxiter=min(n,20)\n largest : bool, optional\n when True, solve for the largest eigenvalues, otherwise the smallest\n verbosityLevel : integer, optional\n controls solver output. default: verbosityLevel = 0.\n retLambdaHistory : boolean, optional\n whether to return eigenvalue history\n retResidualNormsHistory : boolean, optional\n whether to return history of residual norms\n\n Returns\n -------\n w : array\n Array of k eigenvalues\n v : array\n An array of k eigenvectors. V has the same shape as X.\n lambdas : list of arrays, optional\n The eigenvalue history, if `retLambdaHistory` is True.\n rnorms : list of arrays, optional\n The history of residual norms, if `retResidualNormsHistory` is True.\n\n Examples\n --------\n\n Solve A x = lambda B x with constraints and preconditioning.\n\n >>> from scipy.sparse import spdiags, issparse\n >>> from scipy.sparse.linalg import lobpcg, LinearOperator\n >>> n = 100\n >>> vals = [np.arange(n, dtype=np.float64) + 1]\n >>> A = spdiags(vals, 0, n, n)\n >>> A.toarray()\n array([[ 1., 0., 0., ..., 0., 0., 0.],\n [ 0., 2., 0., ..., 0., 0., 0.],\n [ 0., 0., 3., ..., 0., 0., 0.],\n ...,\n [ 0., 0., 0., ..., 98., 0., 0.],\n [ 0., 0., 0., ..., 0., 99., 0.],\n [ 0., 0., 0., ..., 0., 0., 100.]])\n\n Constraints.\n\n >>> Y = np.eye(n, 3)\n\n Initial guess for eigenvectors, should have linearly independent\n columns. Column dimension = number of requested eigenvalues.\n\n >>> X = np.random.rand(n, 3)\n\n Preconditioner -- inverse of A (as an abstract linear operator).\n\n >>> invA = spdiags([1./vals[0]], 0, n, n)\n >>> def precond( x ):\n ... return invA * x\n >>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float)\n\n Here, ``invA`` could of course have been used directly as a preconditioner.\n Let us then solve the problem:\n\n >>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, largest=False)\n >>> eigs\n array([4., 5., 6.])\n\n Note that the vectors passed in Y are the eigenvectors of the 3 smallest\n eigenvalues. The results returned are orthogonal to those.\n\n Notes\n -----\n If both retLambdaHistory and retResidualNormsHistory are True,\n the return tuple has the following format\n (lambda, V, lambda history, residual norms history).\n\n In the following ``n`` denotes the matrix size and ``m`` the number\n of required eigenvalues (smallest or largest).\n\n The LOBPCG code internally solves eigenproblems of the size 3``m`` on every\n iteration by calling the \"standard\" dense eigensolver, so if ``m`` is not\n small enough compared to ``n``, it does not make sense to call the LOBPCG\n code, but rather one should use the \"standard\" eigensolver,\n e.g. numpy or scipy function in this case.\n If one calls the LOBPCG algorithm for 5``m``>``n``,\n it will most likely break internally, so the code tries to call\n the standard function instead.\n\n It is not that n should be large for the LOBPCG to work, but rather the\n ratio ``n``/``m`` should be large. It you call LOBPCG with ``m``=1\n and ``n``=10, it works though ``n`` is small. The method is intended\n for extremely large ``n``/``m``, see e.g., reference [28] in\n https://arxiv.org/abs/0705.2626\n\n The convergence speed depends basically on two factors:\n\n 1. How well relatively separated the seeking eigenvalues are from the rest\n of the eigenvalues. One can try to vary ``m`` to make this better.\n\n 2. How well conditioned the problem is. This can be changed by using proper\n preconditioning. For example, a rod vibration test problem (under tests\n directory) is ill-conditioned for large ``n``, so convergence will be\n slow, unless efficient preconditioning is used. For this specific\n problem, a good simple preconditioner function would be a linear solve\n for A, which is easy to code since A is tridiagonal.\n\n *Acknowledgements*\n\n lobpcg.py code was written by Robert Cimrman.\n Many thanks belong to Andrew Knyazev, the author of the algorithm,\n for lots of advice and support.\n\n References\n ----------\n .. [1] A. V. Knyazev (2001),\n Toward the Optimal Preconditioned Eigensolver: Locally Optimal\n Block Preconditioned Conjugate Gradient Method.\n SIAM Journal on Scientific Computing 23, no. 2,\n pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124\n\n .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov\n (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers\n (BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626\n\n .. [3] A. V. Knyazev's C and MATLAB implementations:\n https://bitbucket.org/joseroman/blopex\n \"\"\"\n blockVectorX = X\n blockVectorY = Y\n residualTolerance = tol\n maxIterations = maxiter\n\n if blockVectorY is not None:\n sizeY = blockVectorY.shape[1]\n else:\n sizeY = 0\n\n # Block size.\n if len(blockVectorX.shape) != 2:\n raise ValueError('expected rank-2 array for argument X')\n\n n, sizeX = blockVectorX.shape\n\n if verbosityLevel:\n aux = \"Solving \"\n if B is None:\n aux += \"standard\"\n else:\n aux += \"generalized\"\n aux += \" eigenvalue problem with\"\n if M is None:\n aux += \"out\"\n aux += \" preconditioning\\n\\n\"\n aux += \"matrix size %d\\n\" % n\n aux += \"block size %d\\n\\n\" % sizeX\n if blockVectorY is None:\n aux += \"No constraints\\n\\n\"\n else:\n if sizeY > 1:\n aux += \"%d constraints\\n\\n\" % sizeY\n else:\n aux += \"%d constraint\\n\\n\" % sizeY\n print(aux)\n\n A = _makeOperator(A, (n, n))\n B = _makeOperator(B, (n, n))\n M = _makeOperator(M, (n, n))\n\n if (n - sizeY) < (5 * sizeX):\n # warn('The problem size is small compared to the block size.' \\\n # ' Using dense eigensolver instead of LOBPCG.')\n\n sizeX = min(sizeX, n)\n\n if blockVectorY is not None:\n raise NotImplementedError('The dense eigensolver '\n 'does not support constraints.')\n\n # Define the closed range of indices of eigenvalues to return.\n if largest:\n eigvals = (n - sizeX, n-1)\n else:\n eigvals = (0, sizeX-1)\n\n A_dense = A(np.eye(n, dtype=A.dtype))\n B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))\n\n vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals,\n check_finite=False)\n if largest:\n # Reverse order to be compatible with eigs() in 'LM' mode.\n vals = vals[::-1]\n vecs = vecs[:, ::-1]\n\n return vals, vecs\n\n if (residualTolerance is None) or (residualTolerance <= 0.0):\n residualTolerance = np.sqrt(1e-15) * n\n\n # Apply constraints to X.\n if blockVectorY is not None:\n\n if B is not None:\n blockVectorBY = B(blockVectorY)\n else:\n blockVectorBY = blockVectorY\n\n # gramYBY is a dense array.\n gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)\n try:\n # gramYBY is a Cholesky factor from now on...\n gramYBY = cho_factor(gramYBY)\n except LinAlgError:\n raise ValueError('cannot handle linearly dependent constraints')\n\n _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)\n\n ##\n # B-orthonormalize X.\n blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)\n\n ##\n # Compute the initial Ritz vectors: solve the eigenproblem.\n blockVectorAX = A(blockVectorX)\n gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)\n\n _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)\n ii = _get_indx(_lambda, sizeX, largest)\n _lambda = _lambda[ii]\n\n eigBlockVector = np.asarray(eigBlockVector[:, ii])\n blockVectorX = np.dot(blockVectorX, eigBlockVector)\n blockVectorAX = np.dot(blockVectorAX, eigBlockVector)\n if B is not None:\n blockVectorBX = np.dot(blockVectorBX, eigBlockVector)\n\n ##\n # Active index set.\n activeMask = np.ones((sizeX,), dtype=bool)\n\n lambdaHistory = [_lambda]\n residualNormsHistory = []\n\n previousBlockSize = sizeX\n ident = np.eye(sizeX, dtype=A.dtype)\n ident0 = np.eye(sizeX, dtype=A.dtype)\n\n ##\n # Main iteration loop.\n\n blockVectorP = None # set during iteration\n blockVectorAP = None\n blockVectorBP = None\n\n iterationNumber = -1\n while iterationNumber < maxIterations:\n iterationNumber += 1\n if verbosityLevel > 0:\n print('iteration %d' % iterationNumber)\n\n if B is not None:\n aux = blockVectorBX * _lambda[np.newaxis, :]\n\n else:\n aux = blockVectorX * _lambda[np.newaxis, :]\n\n blockVectorR = blockVectorAX - aux\n\n aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)\n residualNorms = np.sqrt(aux)\n\n residualNormsHistory.append(residualNorms)\n\n ii = np.where(residualNorms > residualTolerance, True, False)\n activeMask = activeMask & ii\n if verbosityLevel > 2:\n print(activeMask)\n\n currentBlockSize = activeMask.sum()\n if currentBlockSize != previousBlockSize:\n previousBlockSize = currentBlockSize\n ident = np.eye(currentBlockSize, dtype=A.dtype)\n\n if currentBlockSize == 0:\n break\n\n if verbosityLevel > 0:\n print('current block size:', currentBlockSize)\n print('eigenvalue:', _lambda)\n print('residual norms:', residualNorms)\n if verbosityLevel > 10:\n print(eigBlockVector)\n\n activeBlockVectorR = _as2d(blockVectorR[:, activeMask])\n\n if iterationNumber > 0:\n activeBlockVectorP = _as2d(blockVectorP[:, activeMask])\n activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])\n if B is not None:\n activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])\n\n if M is not None:\n # Apply preconditioner T to the active residuals.\n activeBlockVectorR = M(activeBlockVectorR)\n\n ##\n # Apply constraints to the preconditioned residuals.\n if blockVectorY is not None:\n _applyConstraints(activeBlockVectorR,\n gramYBY, blockVectorBY, blockVectorY)\n\n ##\n # B-orthonormalize the preconditioned residuals.\n\n aux = _b_orthonormalize(B, activeBlockVectorR)\n activeBlockVectorR, activeBlockVectorBR = aux\n\n activeBlockVectorAR = A(activeBlockVectorR)\n\n if iterationNumber > 0:\n if B is not None:\n aux = _b_orthonormalize(B, activeBlockVectorP,\n activeBlockVectorBP, retInvR=True)\n activeBlockVectorP, activeBlockVectorBP, invR = aux\n activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)\n\n else:\n aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)\n activeBlockVectorP, _, invR = aux\n activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)\n\n ##\n # Perform the Rayleigh Ritz Procedure:\n # Compute symmetric Gram matrices:\n\n if B is not None:\n xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)\n waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)\n xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)\n\n if iterationNumber > 0:\n xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)\n wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)\n pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)\n xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)\n wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)\n\n gramA = bmat([[np.diag(_lambda), xaw, xap],\n [xaw.T.conj(), waw, wap],\n [xap.T.conj(), wap.T.conj(), pap]])\n\n gramB = bmat([[ident0, xbw, xbp],\n [xbw.T.conj(), ident, wbp],\n [xbp.T.conj(), wbp.T.conj(), ident]])\n else:\n gramA = bmat([[np.diag(_lambda), xaw],\n [xaw.T.conj(), waw]])\n gramB = bmat([[ident0, xbw],\n [xbw.T.conj(), ident]])\n\n else:\n xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)\n waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)\n xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorR)\n\n if iterationNumber > 0:\n xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)\n wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)\n pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)\n xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorP)\n wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorP)\n\n gramA = bmat([[np.diag(_lambda), xaw, xap],\n [xaw.T.conj(), waw, wap],\n [xap.T.conj(), wap.T.conj(), pap]])\n\n gramB = bmat([[ident0, xbw, xbp],\n [xbw.T.conj(), ident, wbp],\n [xbp.T.conj(), wbp.T.conj(), ident]])\n else:\n gramA = bmat([[np.diag(_lambda), xaw],\n [xaw.T.conj(), waw]])\n gramB = bmat([[ident0, xbw],\n [xbw.T.conj(), ident]])\n\n if verbosityLevel > 0:\n _report_nonhermitian(gramA, 3, -1, 'gramA')\n _report_nonhermitian(gramB, 3, -1, 'gramB')\n\n if verbosityLevel > 10:\n _save(gramA, 'gramA')\n _save(gramB, 'gramB')\n\n # Solve the generalized eigenvalue problem.\n _lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False)\n ii = _get_indx(_lambda, sizeX, largest)\n\n if verbosityLevel > 10:\n print(ii)\n print(_lambda)\n\n _lambda = _lambda[ii]\n eigBlockVector = eigBlockVector[:, ii]\n\n lambdaHistory.append(_lambda)\n\n if verbosityLevel > 10:\n print('lambda:', _lambda)\n# # Normalize eigenvectors!\n# aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )\n# eigVecNorms = np.sqrt( aux )\n# eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]\n# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )\n\n if verbosityLevel > 10:\n print(eigBlockVector)\n\n # Compute Ritz vectors.\n if B is not None:\n if iterationNumber > 0:\n eigBlockVectorX = eigBlockVector[:sizeX]\n eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]\n eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]\n\n pp = np.dot(activeBlockVectorR, eigBlockVectorR)\n pp += np.dot(activeBlockVectorP, eigBlockVectorP)\n\n app = np.dot(activeBlockVectorAR, eigBlockVectorR)\n app += np.dot(activeBlockVectorAP, eigBlockVectorP)\n\n bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)\n bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)\n else:\n eigBlockVectorX = eigBlockVector[:sizeX]\n eigBlockVectorR = eigBlockVector[sizeX:]\n\n pp = np.dot(activeBlockVectorR, eigBlockVectorR)\n app = np.dot(activeBlockVectorAR, eigBlockVectorR)\n bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)\n\n if verbosityLevel > 10:\n print(pp)\n print(app)\n print(bpp)\n\n blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp\n blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app\n blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp\n\n blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp\n\n else:\n if iterationNumber > 0:\n eigBlockVectorX = eigBlockVector[:sizeX]\n eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]\n eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]\n\n pp = np.dot(activeBlockVectorR, eigBlockVectorR)\n pp += np.dot(activeBlockVectorP, eigBlockVectorP)\n\n app = np.dot(activeBlockVectorAR, eigBlockVectorR)\n app += np.dot(activeBlockVectorAP, eigBlockVectorP)\n else:\n eigBlockVectorX = eigBlockVector[:sizeX]\n eigBlockVectorR = eigBlockVector[sizeX:]\n\n pp = np.dot(activeBlockVectorR, eigBlockVectorR)\n app = np.dot(activeBlockVectorAR, eigBlockVectorR)\n\n if verbosityLevel > 10:\n print(pp)\n print(app)\n\n blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp\n blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app\n\n blockVectorP, blockVectorAP = pp, app\n\n if B is not None:\n aux = blockVectorBX * _lambda[np.newaxis, :]\n\n else:\n aux = blockVectorX * _lambda[np.newaxis, :]\n\n blockVectorR = blockVectorAX - aux\n\n aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)\n residualNorms = np.sqrt(aux)\n\n if verbosityLevel > 0:\n print('final eigenvalue:', _lambda)\n print('final residual norms:', residualNorms)\n\n if retLambdaHistory:\n if retResidualNormsHistory:\n return _lambda, blockVectorX, lambdaHistory, residualNormsHistory\n else:\n return _lambda, blockVectorX, lambdaHistory\n else:\n if retResidualNormsHistory:\n return _lambda, blockVectorX, residualNormsHistory\n else:\n return _lambda, blockVectorX\n",
"\"\"\"UnitDblFormatter module containing class UnitDblFormatter.\"\"\"\n\nimport matplotlib.ticker as ticker\n\n__all__ = ['UnitDblFormatter']\n\n\nclass UnitDblFormatter(ticker.ScalarFormatter):\n \"\"\"The formatter for UnitDbl data types. This allows for formatting\n with the unit string.\n \"\"\"\n def __init__(self, *args, **kwargs):\n 'The arguments are identical to matplotlib.ticker.ScalarFormatter.'\n ticker.ScalarFormatter.__init__(self, *args, **kwargs)\n\n def __call__(self, x, pos=None):\n 'Return the format for tick val x at position pos'\n if len(self.locs) == 0:\n return ''\n else:\n return '{:.12}'.format(x)\n\n def format_data_short(self, value):\n \"Return the value formatted in 'short' format.\"\n return '{:.12}'.format(value)\n\n def format_data(self, value):\n \"Return the value formatted into a string.\"\n return '{:.12}'.format(value)\n",
"# Author: Tom Dupre la Tour\n# Joan Massich <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport pytest\nimport scipy.sparse as sp\nfrom numpy.testing import assert_array_equal\nfrom sklearn.utils.seq_dataset import (\n ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64)\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.utils.testing import assert_allclose\n\niris = load_iris()\nX64 = iris.data.astype(np.float64)\ny64 = iris.target.astype(np.float64)\nX_csr64 = sp.csr_matrix(X64)\nsample_weight64 = np.arange(y64.size, dtype=np.float64)\n\nX32 = iris.data.astype(np.float32)\ny32 = iris.target.astype(np.float32)\nX_csr32 = sp.csr_matrix(X32)\nsample_weight32 = np.arange(y32.size, dtype=np.float32)\n\n\ndef assert_csr_equal_values(current, expected):\n current.eliminate_zeros()\n expected.eliminate_zeros()\n expected = expected.astype(current.dtype)\n assert current.shape[0] == expected.shape[0]\n assert current.shape[1] == expected.shape[1]\n assert_array_equal(current.data, expected.data)\n assert_array_equal(current.indices, expected.indices)\n assert_array_equal(current.indptr, expected.indptr)\n\n\ndef make_dense_dataset_32():\n return ArrayDataset32(X32, y32, sample_weight32, seed=42)\n\n\ndef make_dense_dataset_64():\n return ArrayDataset64(X64, y64, sample_weight64, seed=42)\n\n\ndef make_sparse_dataset_32():\n return CSRDataset32(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32,\n sample_weight32, seed=42)\n\n\ndef make_sparse_dataset_64():\n return CSRDataset64(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64,\n sample_weight64, seed=42)\n\n\[email protected]('dataset_constructor', [\n make_dense_dataset_32,\n make_dense_dataset_64,\n make_sparse_dataset_32,\n make_sparse_dataset_64,\n])\ndef test_seq_dataset_basic_iteration(dataset_constructor):\n NUMBER_OF_RUNS = 5\n dataset = dataset_constructor()\n for _ in range(NUMBER_OF_RUNS):\n # next sample\n xi_, yi, swi, idx = dataset._next_py()\n xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))\n\n assert_csr_equal_values(xi, X_csr64[idx])\n assert yi == y64[idx]\n assert swi == sample_weight64[idx]\n\n # random sample\n xi_, yi, swi, idx = dataset._random_py()\n xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))\n\n assert_csr_equal_values(xi, X_csr64[idx])\n assert yi == y64[idx]\n assert swi == sample_weight64[idx]\n\n\[email protected]('make_dense_dataset,make_sparse_dataset', [\n (make_dense_dataset_32, make_sparse_dataset_32),\n (make_dense_dataset_64, make_sparse_dataset_64),\n])\ndef test_seq_dataset_shuffle(make_dense_dataset, make_sparse_dataset):\n dense_dataset, sparse_dataset = make_dense_dataset(), make_sparse_dataset()\n # not shuffled\n for i in range(5):\n _, _, _, idx1 = dense_dataset._next_py()\n _, _, _, idx2 = sparse_dataset._next_py()\n assert idx1 == i\n assert idx2 == i\n\n for i in [132, 50, 9, 18, 58]:\n _, _, _, idx1 = dense_dataset._random_py()\n _, _, _, idx2 = sparse_dataset._random_py()\n assert idx1 == i\n assert idx2 == i\n\n seed = 77\n dense_dataset._shuffle_py(seed)\n sparse_dataset._shuffle_py(seed)\n\n idx_next = [63, 91, 148, 87, 29]\n idx_shuffle = [137, 125, 56, 121, 127]\n for i, j in zip(idx_next, idx_shuffle):\n _, _, _, idx1 = dense_dataset._next_py()\n _, _, _, idx2 = sparse_dataset._next_py()\n assert idx1 == i\n assert idx2 == i\n\n _, _, _, idx1 = dense_dataset._random_py()\n _, _, _, idx2 = sparse_dataset._random_py()\n assert idx1 == j\n assert idx2 == j\n\n\[email protected]('make_dataset_32,make_dataset_64', [\n (make_dense_dataset_32, make_dense_dataset_64),\n (make_sparse_dataset_32, make_sparse_dataset_64),\n])\ndef test_fused_types_consistency(make_dataset_32, make_dataset_64):\n dataset_32, dataset_64 = make_dataset_32(), make_dataset_64()\n NUMBER_OF_RUNS = 5\n for _ in range(NUMBER_OF_RUNS):\n # next sample\n (xi_data32, _, _), yi32, _, _ = dataset_32._next_py()\n (xi_data64, _, _), yi64, _, _ = dataset_64._next_py()\n\n assert xi_data32.dtype == np.float32\n assert xi_data64.dtype == np.float64\n\n assert_allclose(xi_data64, xi_data32, rtol=1e-5)\n assert_allclose(yi64, yi32, rtol=1e-5)\n\n\ndef test_buffer_dtype_mismatch_error():\n with pytest.raises(ValueError, match='Buffer dtype mismatch'):\n ArrayDataset64(X32, y32, sample_weight32, seed=42),\n\n with pytest.raises(ValueError, match='Buffer dtype mismatch'):\n ArrayDataset32(X64, y64, sample_weight64, seed=42),\n\n with pytest.raises(ValueError, match='Buffer dtype mismatch'):\n CSRDataset64(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32,\n sample_weight32, seed=42),\n\n with pytest.raises(ValueError, match='Buffer dtype mismatch'):\n CSRDataset32(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64,\n sample_weight64, seed=42),\n",
"from __future__ import division, print_function, absolute_import\n\nimport datetime\nimport os\nimport sys\nfrom os.path import join as pjoin\nfrom scipy._lib.six import xrange\n\nif sys.version_info[0] >= 3:\n from io import StringIO\nelse:\n from cStringIO import StringIO\n\nimport numpy as np\n\nfrom numpy.testing import (assert_array_almost_equal,\n assert_array_equal, assert_equal, assert_)\nimport pytest\nfrom pytest import raises as assert_raises\n\nfrom scipy.io.arff.arffread import loadarff\nfrom scipy.io.arff.arffread import read_header, ParseArffError\n\n\ndata_path = pjoin(os.path.dirname(__file__), 'data')\n\ntest1 = pjoin(data_path, 'test1.arff')\ntest2 = pjoin(data_path, 'test2.arff')\ntest3 = pjoin(data_path, 'test3.arff')\n\ntest4 = pjoin(data_path, 'test4.arff')\ntest5 = pjoin(data_path, 'test5.arff')\ntest6 = pjoin(data_path, 'test6.arff')\ntest7 = pjoin(data_path, 'test7.arff')\ntest8 = pjoin(data_path, 'test8.arff')\ntest9 = pjoin(data_path, 'test9.arff')\ntest10 = pjoin(data_path, 'test10.arff')\ntest11 = pjoin(data_path, 'test11.arff')\ntest_quoted_nominal = pjoin(data_path, 'quoted_nominal.arff')\ntest_quoted_nominal_spaces = pjoin(data_path, 'quoted_nominal_spaces.arff')\n\nexpect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),\n (-0.1, -0.2, -0.3, -0.4, 'class2'),\n (1, 2, 3, 4, 'class3')]\nexpected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']\n\nmissing = pjoin(data_path, 'missing.arff')\nexpect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])\nexpect_missing = np.empty(3, [('yop', float), ('yap', float)])\nexpect_missing['yop'] = expect_missing_raw[:, 0]\nexpect_missing['yap'] = expect_missing_raw[:, 1]\n\n\nclass TestData(object):\n def test1(self):\n # Parsing trivial file with nothing.\n self._test(test4)\n\n def test2(self):\n # Parsing trivial file with some comments in the data section.\n self._test(test5)\n\n def test3(self):\n # Parsing trivial file with nominal attribute of 1 character.\n self._test(test6)\n\n def _test(self, test_file):\n data, meta = loadarff(test_file)\n for i in range(len(data)):\n for j in range(4):\n assert_array_almost_equal(expect4_data[i][j], data[i][j])\n assert_equal(meta.types(), expected_types)\n\n def test_filelike(self):\n # Test reading from file-like object (StringIO)\n f1 = open(test1)\n data1, meta1 = loadarff(f1)\n f1.close()\n f2 = open(test1)\n data2, meta2 = loadarff(StringIO(f2.read()))\n f2.close()\n assert_(data1 == data2)\n assert_(repr(meta1) == repr(meta2))\n\n @pytest.mark.skipif(sys.version_info < (3, 6),\n reason='Passing path-like objects to IO functions requires Python >= 3.6')\n def test_path(self):\n # Test reading from `pathlib.Path` object\n from pathlib import Path\n\n with open(test1) as f1:\n data1, meta1 = loadarff(f1)\n\n data2, meta2 = loadarff(Path(test1))\n\n assert_(data1 == data2)\n assert_(repr(meta1) == repr(meta2))\n\n\nclass TestMissingData(object):\n def test_missing(self):\n data, meta = loadarff(missing)\n for i in ['yop', 'yap']:\n assert_array_almost_equal(data[i], expect_missing[i])\n\n\nclass TestNoData(object):\n def test_nodata(self):\n # The file nodata.arff has no data in the @DATA section.\n # Reading it should result in an array with length 0.\n nodata_filename = os.path.join(data_path, 'nodata.arff')\n data, meta = loadarff(nodata_filename)\n expected_dtype = np.dtype([('sepallength', '<f8'),\n ('sepalwidth', '<f8'),\n ('petallength', '<f8'),\n ('petalwidth', '<f8'),\n ('class', 'S15')])\n assert_equal(data.dtype, expected_dtype)\n assert_equal(data.size, 0)\n\n\nclass TestHeader(object):\n def test_type_parsing(self):\n # Test parsing type of attribute from their value.\n ofile = open(test2)\n rel, attrs = read_header(ofile)\n ofile.close()\n\n expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',\n 'numeric', 'string', 'string', 'nominal', 'nominal']\n\n for i in range(len(attrs)):\n assert_(attrs[i].type_name == expected[i])\n\n def test_badtype_parsing(self):\n # Test parsing wrong type of attribute from their value.\n def badtype_read():\n ofile = open(test3)\n rel, attrs = read_header(ofile)\n ofile.close()\n\n assert_raises(ParseArffError, badtype_read)\n\n def test_fullheader1(self):\n # Parsing trivial header with nothing.\n ofile = open(test1)\n rel, attrs = read_header(ofile)\n ofile.close()\n\n # Test relation\n assert_(rel == 'test1')\n\n # Test numerical attributes\n assert_(len(attrs) == 5)\n for i in range(4):\n assert_(attrs[i].name == 'attr%d' % i)\n assert_(attrs[i].type_name == 'numeric')\n\n # Test nominal attribute\n assert_(attrs[4].name == 'class')\n assert_(attrs[4].values == ('class0', 'class1', 'class2', 'class3'))\n\n def test_dateheader(self):\n ofile = open(test7)\n rel, attrs = read_header(ofile)\n ofile.close()\n\n assert_(rel == 'test7')\n\n assert_(len(attrs) == 5)\n\n assert_(attrs[0].name == 'attr_year')\n assert_(attrs[0].date_format == '%Y')\n\n assert_(attrs[1].name == 'attr_month')\n assert_(attrs[1].date_format == '%Y-%m')\n\n assert_(attrs[2].name == 'attr_date')\n assert_(attrs[2].date_format == '%Y-%m-%d')\n\n assert_(attrs[3].name == 'attr_datetime_local')\n assert_(attrs[3].date_format == '%Y-%m-%d %H:%M')\n\n assert_(attrs[4].name == 'attr_datetime_missing')\n assert_(attrs[4].date_format == '%Y-%m-%d %H:%M')\n\n def test_dateheader_unsupported(self):\n def read_dateheader_unsupported():\n ofile = open(test8)\n rel, attrs = read_header(ofile)\n ofile.close()\n\n assert_raises(ValueError, read_dateheader_unsupported)\n\n\nclass TestDateAttribute(object):\n def setup_method(self):\n self.data, self.meta = loadarff(test7)\n\n def test_year_attribute(self):\n expected = np.array([\n '1999',\n '2004',\n '1817',\n '2100',\n '2013',\n '1631'\n ], dtype='datetime64[Y]')\n\n assert_array_equal(self.data[\"attr_year\"], expected)\n\n def test_month_attribute(self):\n expected = np.array([\n '1999-01',\n '2004-12',\n '1817-04',\n '2100-09',\n '2013-11',\n '1631-10'\n ], dtype='datetime64[M]')\n\n assert_array_equal(self.data[\"attr_month\"], expected)\n\n def test_date_attribute(self):\n expected = np.array([\n '1999-01-31',\n '2004-12-01',\n '1817-04-28',\n '2100-09-10',\n '2013-11-30',\n '1631-10-15'\n ], dtype='datetime64[D]')\n\n assert_array_equal(self.data[\"attr_date\"], expected)\n\n def test_datetime_local_attribute(self):\n expected = np.array([\n datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),\n datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),\n datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),\n datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),\n datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),\n datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)\n ], dtype='datetime64[m]')\n\n assert_array_equal(self.data[\"attr_datetime_local\"], expected)\n\n def test_datetime_missing(self):\n expected = np.array([\n 'nat',\n '2004-12-01T23:59',\n 'nat',\n 'nat',\n '2013-11-30T04:55',\n '1631-10-15T20:04'\n ], dtype='datetime64[m]')\n\n assert_array_equal(self.data[\"attr_datetime_missing\"], expected)\n\n def test_datetime_timezone(self):\n assert_raises(ParseArffError, loadarff, test8)\n\n\nclass TestRelationalAttribute(object):\n def setup_method(self):\n self.data, self.meta = loadarff(test9)\n\n def test_attributes(self):\n assert_equal(len(self.meta._attributes), 1)\n\n relational = list(self.meta._attributes.values())[0]\n\n assert_equal(relational.name, 'attr_date_number')\n assert_equal(relational.type_name, 'relational')\n assert_equal(len(relational.attributes), 2)\n assert_equal(relational.attributes[0].name,\n 'attr_date')\n assert_equal(relational.attributes[0].type_name,\n 'date')\n assert_equal(relational.attributes[1].name,\n 'attr_number')\n assert_equal(relational.attributes[1].type_name,\n 'numeric')\n\n def test_data(self):\n dtype_instance = [('attr_date', 'datetime64[D]'),\n ('attr_number', np.float_)]\n\n expected = [\n np.array([('1999-01-31', 1), ('1935-11-27', 10)],\n dtype=dtype_instance),\n np.array([('2004-12-01', 2), ('1942-08-13', 20)],\n dtype=dtype_instance),\n np.array([('1817-04-28', 3)],\n dtype=dtype_instance),\n np.array([('2100-09-10', 4), ('1957-04-17', 40),\n ('1721-01-14', 400)],\n dtype=dtype_instance),\n np.array([('2013-11-30', 5)],\n dtype=dtype_instance),\n np.array([('1631-10-15', 6)],\n dtype=dtype_instance)\n ]\n\n for i in range(len(self.data[\"attr_date_number\"])):\n assert_array_equal(self.data[\"attr_date_number\"][i],\n expected[i])\n\n\nclass TestRelationalAttributeLong(object):\n def setup_method(self):\n self.data, self.meta = loadarff(test10)\n\n def test_attributes(self):\n assert_equal(len(self.meta._attributes), 1)\n\n relational = list(self.meta._attributes.values())[0]\n\n assert_equal(relational.name, 'attr_relational')\n assert_equal(relational.type_name, 'relational')\n assert_equal(len(relational.attributes), 1)\n assert_equal(relational.attributes[0].name,\n 'attr_number')\n assert_equal(relational.attributes[0].type_name, 'numeric')\n\n def test_data(self):\n dtype_instance = [('attr_number', np.float_)]\n\n expected = np.array([(n,) for n in xrange(30000)],\n dtype=dtype_instance)\n\n assert_array_equal(self.data[\"attr_relational\"][0],\n expected)\n\n \nclass TestQuotedNominal(object):\n \"\"\"\n Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes.\n \"\"\"\n \n def setup_method(self):\n self.data, self.meta = loadarff(test_quoted_nominal)\n\n def test_attributes(self):\n assert_equal(len(self.meta._attributes), 2)\n\n age, smoker = self.meta._attributes.values()\n\n assert_equal(age.name, 'age')\n assert_equal(age.type_name, 'numeric')\n assert_equal(smoker.name, 'smoker')\n assert_equal(smoker.type_name, 'nominal')\n assert_equal(smoker.values, ['yes', 'no'])\n\n def test_data(self):\n \n age_dtype_instance = np.float_\n smoker_dtype_instance = '<S3'\n\n age_expected = np.array([\n 18,\n 24,\n 44,\n 56,\n 89,\n 11,\n ], dtype=age_dtype_instance)\n \n smoker_expected = np.array([\n 'no',\n 'yes',\n 'no',\n 'no',\n 'yes',\n 'no',\n ], dtype=smoker_dtype_instance)\n\n assert_array_equal(self.data[\"age\"], age_expected)\n assert_array_equal(self.data[\"smoker\"], smoker_expected)\n\n\nclass TestQuotedNominalSpaces(object):\n \"\"\"\n Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes.\n \"\"\"\n \n def setup_method(self):\n self.data, self.meta = loadarff(test_quoted_nominal_spaces)\n\n def test_attributes(self):\n assert_equal(len(self.meta._attributes), 2)\n\n age, smoker = self.meta._attributes.values()\n\n assert_equal(age.name, 'age')\n assert_equal(age.type_name, 'numeric')\n assert_equal(smoker.name, 'smoker')\n assert_equal(smoker.type_name, 'nominal')\n assert_equal(smoker.values, [' yes', 'no '])\n\n def test_data(self):\n \n age_dtype_instance = np.float_\n smoker_dtype_instance = '<S5'\n\n age_expected = np.array([\n 18,\n 24,\n 44,\n 56,\n 89,\n 11,\n ], dtype=age_dtype_instance)\n \n smoker_expected = np.array([\n 'no ',\n ' yes',\n 'no ',\n 'no ',\n ' yes',\n 'no ',\n ], dtype=smoker_dtype_instance)\n\n assert_array_equal(self.data[\"age\"], age_expected)\n assert_array_equal(self.data[\"smoker\"], smoker_expected)\n",
"\"\"\"\nCore functions and attributes for the matplotlib style library:\n\n``use``\n Select style sheet to override the current matplotlib settings.\n``context``\n Context manager to use a style sheet temporarily.\n``available``\n List available style sheets.\n``library``\n A dictionary of style names and matplotlib settings.\n\"\"\"\n\nimport contextlib\nimport logging\nimport os\nimport re\nimport warnings\n\nimport matplotlib as mpl\nfrom matplotlib import cbook, rc_params_from_file, rcParamsDefault\n\n_log = logging.getLogger(__name__)\n\n__all__ = ['use', 'context', 'available', 'library', 'reload_library']\n\n\nBASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib')\n# Users may want multiple library paths, so store a list of paths.\nUSER_LIBRARY_PATHS = [os.path.join(mpl.get_configdir(), 'stylelib')]\nSTYLE_EXTENSION = 'mplstyle'\nSTYLE_FILE_PATTERN = re.compile(r'([\\S]+).%s$' % STYLE_EXTENSION)\n\n\n# A list of rcParams that should not be applied from styles\nSTYLE_BLACKLIST = {\n 'interactive', 'backend', 'backend.qt4', 'webagg.port', 'webagg.address',\n 'webagg.port_retries', 'webagg.open_in_browser', 'backend_fallback',\n 'toolbar', 'timezone', 'datapath', 'figure.max_open_warning',\n 'savefig.directory', 'tk.window_focus', 'docstring.hardcopy'}\n\n\ndef _remove_blacklisted_style_params(d, warn=True):\n o = {}\n for key, val in d.items():\n if key in STYLE_BLACKLIST:\n if warn:\n cbook._warn_external(\n \"Style includes a parameter, '{0}', that is not related \"\n \"to style. Ignoring\".format(key))\n else:\n o[key] = val\n return o\n\n\ndef is_style_file(filename):\n \"\"\"Return True if the filename looks like a style file.\"\"\"\n return STYLE_FILE_PATTERN.match(filename) is not None\n\n\ndef _apply_style(d, warn=True):\n mpl.rcParams.update(_remove_blacklisted_style_params(d, warn=warn))\n\n\ndef use(style):\n \"\"\"Use matplotlib style settings from a style specification.\n\n The style name of 'default' is reserved for reverting back to\n the default style settings.\n\n Parameters\n ----------\n style : str, dict, or list\n A style specification. Valid options are:\n\n +------+-------------------------------------------------------------+\n | str | The name of a style or a path/URL to a style file. For a |\n | | list of available style names, see `style.available`. |\n +------+-------------------------------------------------------------+\n | dict | Dictionary with valid key/value pairs for |\n | | `matplotlib.rcParams`. |\n +------+-------------------------------------------------------------+\n | list | A list of style specifiers (str or dict) applied from first |\n | | to last in the list. |\n +------+-------------------------------------------------------------+\n\n\n \"\"\"\n style_alias = {'mpl20': 'default',\n 'mpl15': 'classic'}\n if isinstance(style, str) or hasattr(style, 'keys'):\n # If name is a single str or dict, make it a single element list.\n styles = [style]\n else:\n styles = style\n\n styles = (style_alias.get(s, s) if isinstance(s, str) else s\n for s in styles)\n for style in styles:\n if not isinstance(style, str):\n _apply_style(style)\n elif style == 'default':\n # Deprecation warnings were already handled when creating\n # rcParamsDefault, no need to reemit them here.\n with cbook._suppress_matplotlib_deprecation_warning():\n _apply_style(rcParamsDefault, warn=False)\n elif style in library:\n _apply_style(library[style])\n else:\n try:\n rc = rc_params_from_file(style, use_default_template=False)\n _apply_style(rc)\n except IOError:\n raise IOError(\n \"{!r} not found in the style library and input is not a \"\n \"valid URL or path; see `style.available` for list of \"\n \"available styles\".format(style))\n\n\[email protected]\ndef context(style, after_reset=False):\n \"\"\"Context manager for using style settings temporarily.\n\n Parameters\n ----------\n style : str, dict, or list\n A style specification. Valid options are:\n\n +------+-------------------------------------------------------------+\n | str | The name of a style or a path/URL to a style file. For a |\n | | list of available style names, see `style.available`. |\n +------+-------------------------------------------------------------+\n | dict | Dictionary with valid key/value pairs for |\n | | `matplotlib.rcParams`. |\n +------+-------------------------------------------------------------+\n | list | A list of style specifiers (str or dict) applied from first |\n | | to last in the list. |\n +------+-------------------------------------------------------------+\n\n after_reset : bool\n If True, apply style after resetting settings to their defaults;\n otherwise, apply style on top of the current settings.\n \"\"\"\n with mpl.rc_context():\n if after_reset:\n mpl.rcdefaults()\n use(style)\n yield\n\n\ndef load_base_library():\n \"\"\"Load style library defined in this package.\"\"\"\n library = read_style_directory(BASE_LIBRARY_PATH)\n return library\n\n\ndef iter_user_libraries():\n for stylelib_path in USER_LIBRARY_PATHS:\n stylelib_path = os.path.expanduser(stylelib_path)\n if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):\n yield stylelib_path\n\n\ndef update_user_library(library):\n \"\"\"Update style library with user-defined rc files\"\"\"\n for stylelib_path in iter_user_libraries():\n styles = read_style_directory(stylelib_path)\n update_nested_dict(library, styles)\n return library\n\n\ndef iter_style_files(style_dir):\n \"\"\"Yield file path and name of styles in the given directory.\"\"\"\n for path in os.listdir(style_dir):\n filename = os.path.basename(path)\n if is_style_file(filename):\n match = STYLE_FILE_PATTERN.match(filename)\n path = os.path.abspath(os.path.join(style_dir, path))\n yield path, match.group(1)\n\n\ndef read_style_directory(style_dir):\n \"\"\"Return dictionary of styles defined in `style_dir`.\"\"\"\n styles = dict()\n for path, name in iter_style_files(style_dir):\n with warnings.catch_warnings(record=True) as warns:\n styles[name] = rc_params_from_file(path,\n use_default_template=False)\n\n for w in warns:\n message = 'In %s: %s' % (path, w.message)\n _log.warning(message)\n\n return styles\n\n\ndef update_nested_dict(main_dict, new_dict):\n \"\"\"Update nested dict (only level of nesting) with new values.\n\n Unlike dict.update, this assumes that the values of the parent dict are\n dicts (or dict-like), so you shouldn't replace the nested dict if it\n already exists. Instead you should update the sub-dict.\n \"\"\"\n # update named styles specified by user\n for name, rc_dict in new_dict.items():\n main_dict.setdefault(name, {}).update(rc_dict)\n return main_dict\n\n\n# Load style library\n# ==================\n_base_library = load_base_library()\n\nlibrary = None\navailable = []\n\n\ndef reload_library():\n \"\"\"Reload style library.\"\"\"\n global library\n available[:] = library = update_user_library(_base_library)\nreload_library()\n",
"import functools\nimport logging\nimport os\nfrom pathlib import Path\nimport sys\n\nimport matplotlib\nfrom matplotlib import backend_tools, cbook, rcParams\nfrom matplotlib._pylab_helpers import Gcf\nfrom matplotlib.backend_bases import (\n _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,\n StatusbarBase, TimerBase, ToolContainerBase, cursors)\nfrom matplotlib.backend_managers import ToolManager\nfrom matplotlib.figure import Figure\nfrom matplotlib.widgets import SubplotTool\n\ntry:\n import gi\nexcept ImportError:\n raise ImportError(\"The GTK3 backends require PyGObject\")\n\ntry:\n # :raises ValueError: If module/version is already loaded, already\n # required, or unavailable.\n gi.require_version(\"Gtk\", \"3.0\")\nexcept ValueError as e:\n # in this case we want to re-raise as ImportError so the\n # auto-backend selection logic correctly skips.\n raise ImportError from e\n\nfrom gi.repository import GLib, GObject, Gtk, Gdk\n\n\n_log = logging.getLogger(__name__)\n\nbackend_version = \"%s.%s.%s\" % (\n Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())\n\n# the true dots per inch on the screen; should be display dependent\n# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi\nPIXELS_PER_INCH = 96\n\ntry:\n cursord = {\n cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),\n cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),\n cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),\n cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),\n cursors.WAIT : Gdk.Cursor.new(Gdk.CursorType.WATCH),\n }\nexcept TypeError as exc:\n # Happens when running headless. Convert to ImportError to cooperate with\n # backend switching.\n raise ImportError(exc)\n\n\nclass TimerGTK3(TimerBase):\n '''\n Subclass of :class:`backend_bases.TimerBase` using GTK3 for timer events.\n\n Attributes\n ----------\n interval : int\n The time between timer events in milliseconds. Default is 1000 ms.\n single_shot : bool\n Boolean flag indicating whether this timer should operate as single\n shot (run once and then stop). Defaults to False.\n callbacks : list\n Stores list of (func, args) tuples that will be called upon timer\n events. This list can be manipulated directly, or the functions\n `add_callback` and `remove_callback` can be used.\n\n '''\n def _timer_start(self):\n # Need to stop it, otherwise we potentially leak a timer id that will\n # never be stopped.\n self._timer_stop()\n self._timer = GLib.timeout_add(self._interval, self._on_timer)\n\n def _timer_stop(self):\n if self._timer is not None:\n GLib.source_remove(self._timer)\n self._timer = None\n\n def _timer_set_interval(self):\n # Only stop and restart it if the timer has already been started\n if self._timer is not None:\n self._timer_stop()\n self._timer_start()\n\n def _on_timer(self):\n TimerBase._on_timer(self)\n\n # Gtk timeout_add() requires that the callback returns True if it\n # is to be called again.\n if self.callbacks and not self._single:\n return True\n else:\n self._timer = None\n return False\n\n\nclass FigureCanvasGTK3(Gtk.DrawingArea, FigureCanvasBase):\n keyvald = {65507: 'control',\n 65505: 'shift',\n 65513: 'alt',\n 65508: 'control',\n 65506: 'shift',\n 65514: 'alt',\n 65361: 'left',\n 65362: 'up',\n 65363: 'right',\n 65364: 'down',\n 65307: 'escape',\n 65470: 'f1',\n 65471: 'f2',\n 65472: 'f3',\n 65473: 'f4',\n 65474: 'f5',\n 65475: 'f6',\n 65476: 'f7',\n 65477: 'f8',\n 65478: 'f9',\n 65479: 'f10',\n 65480: 'f11',\n 65481: 'f12',\n 65300: 'scroll_lock',\n 65299: 'break',\n 65288: 'backspace',\n 65293: 'enter',\n 65379: 'insert',\n 65535: 'delete',\n 65360: 'home',\n 65367: 'end',\n 65365: 'pageup',\n 65366: 'pagedown',\n 65438: '0',\n 65436: '1',\n 65433: '2',\n 65435: '3',\n 65430: '4',\n 65437: '5',\n 65432: '6',\n 65429: '7',\n 65431: '8',\n 65434: '9',\n 65451: '+',\n 65453: '-',\n 65450: '*',\n 65455: '/',\n 65439: 'dec',\n 65421: 'enter',\n }\n\n # Setting this as a static constant prevents\n # this resulting expression from leaking\n event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK |\n Gdk.EventMask.EXPOSURE_MASK |\n Gdk.EventMask.KEY_PRESS_MASK |\n Gdk.EventMask.KEY_RELEASE_MASK |\n Gdk.EventMask.ENTER_NOTIFY_MASK |\n Gdk.EventMask.LEAVE_NOTIFY_MASK |\n Gdk.EventMask.POINTER_MOTION_MASK |\n Gdk.EventMask.POINTER_MOTION_HINT_MASK|\n Gdk.EventMask.SCROLL_MASK)\n\n def __init__(self, figure):\n FigureCanvasBase.__init__(self, figure)\n GObject.GObject.__init__(self)\n\n self._idle_draw_id = 0\n self._lastCursor = None\n\n self.connect('scroll_event', self.scroll_event)\n self.connect('button_press_event', self.button_press_event)\n self.connect('button_release_event', self.button_release_event)\n self.connect('configure_event', self.configure_event)\n self.connect('draw', self.on_draw_event)\n self.connect('key_press_event', self.key_press_event)\n self.connect('key_release_event', self.key_release_event)\n self.connect('motion_notify_event', self.motion_notify_event)\n self.connect('leave_notify_event', self.leave_notify_event)\n self.connect('enter_notify_event', self.enter_notify_event)\n self.connect('size_allocate', self.size_allocate)\n\n self.set_events(self.__class__.event_mask)\n\n self.set_double_buffered(True)\n self.set_can_focus(True)\n self._renderer_init()\n\n def destroy(self):\n #Gtk.DrawingArea.destroy(self)\n self.close_event()\n if self._idle_draw_id != 0:\n GLib.source_remove(self._idle_draw_id)\n\n def scroll_event(self, widget, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.get_allocation().height - event.y\n if event.direction==Gdk.ScrollDirection.UP:\n step = 1\n else:\n step = -1\n FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)\n return False # finish event propagation?\n\n def button_press_event(self, widget, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.get_allocation().height - event.y\n FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)\n return False # finish event propagation?\n\n def button_release_event(self, widget, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.get_allocation().height - event.y\n FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)\n return False # finish event propagation?\n\n def key_press_event(self, widget, event):\n key = self._get_key(event)\n FigureCanvasBase.key_press_event(self, key, guiEvent=event)\n return True # stop event propagation\n\n def key_release_event(self, widget, event):\n key = self._get_key(event)\n FigureCanvasBase.key_release_event(self, key, guiEvent=event)\n return True # stop event propagation\n\n def motion_notify_event(self, widget, event):\n if event.is_hint:\n t, x, y, state = event.window.get_pointer()\n else:\n x, y, state = event.x, event.y, event.get_state()\n\n # flipy so y=0 is bottom of canvas\n y = self.get_allocation().height - y\n FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)\n return False # finish event propagation?\n\n def leave_notify_event(self, widget, event):\n FigureCanvasBase.leave_notify_event(self, event)\n\n def enter_notify_event(self, widget, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.get_allocation().height - event.y\n FigureCanvasBase.enter_notify_event(self, guiEvent=event, xy=(x, y))\n\n def size_allocate(self, widget, allocation):\n dpival = self.figure.dpi\n winch = allocation.width / dpival\n hinch = allocation.height / dpival\n self.figure.set_size_inches(winch, hinch, forward=False)\n FigureCanvasBase.resize_event(self)\n self.draw_idle()\n\n def _get_key(self, event):\n if event.keyval in self.keyvald:\n key = self.keyvald[event.keyval]\n elif event.keyval < 256:\n key = chr(event.keyval)\n else:\n key = None\n\n modifiers = [\n (Gdk.ModifierType.MOD4_MASK, 'super'),\n (Gdk.ModifierType.MOD1_MASK, 'alt'),\n (Gdk.ModifierType.CONTROL_MASK, 'ctrl'),\n ]\n for key_mask, prefix in modifiers:\n if event.state & key_mask:\n key = '{0}+{1}'.format(prefix, key)\n\n return key\n\n def configure_event(self, widget, event):\n if widget.get_property(\"window\") is None:\n return\n w, h = event.width, event.height\n if w < 3 or h < 3:\n return # empty fig\n # resize the figure (in inches)\n dpi = self.figure.dpi\n self.figure.set_size_inches(w / dpi, h / dpi, forward=False)\n return False # finish event propagation?\n\n def on_draw_event(self, widget, ctx):\n # to be overwritten by GTK3Agg or GTK3Cairo\n pass\n\n def draw(self):\n # docstring inherited\n if self.is_drawable():\n self.queue_draw()\n\n def draw_idle(self):\n # docstring inherited\n if self._idle_draw_id != 0:\n return\n def idle_draw(*args):\n try:\n self.draw()\n finally:\n self._idle_draw_id = 0\n return False\n self._idle_draw_id = GLib.idle_add(idle_draw)\n\n def new_timer(self, *args, **kwargs):\n # docstring inherited\n return TimerGTK3(*args, **kwargs)\n\n def flush_events(self):\n # docstring inherited\n Gdk.threads_enter()\n while Gtk.events_pending():\n Gtk.main_iteration()\n Gdk.flush()\n Gdk.threads_leave()\n\n\nclass FigureManagerGTK3(FigureManagerBase):\n \"\"\"\n Attributes\n ----------\n canvas : `FigureCanvas`\n The FigureCanvas instance\n num : int or str\n The Figure number\n toolbar : Gtk.Toolbar\n The Gtk.Toolbar\n vbox : Gtk.VBox\n The Gtk.VBox containing the canvas and toolbar\n window : Gtk.Window\n The Gtk.Window\n\n \"\"\"\n def __init__(self, canvas, num):\n FigureManagerBase.__init__(self, canvas, num)\n\n self.window = Gtk.Window()\n self.window.set_wmclass(\"matplotlib\", \"Matplotlib\")\n self.set_window_title(\"Figure %d\" % num)\n try:\n self.window.set_icon_from_file(window_icon)\n except Exception:\n # Some versions of gtk throw a glib.GError but not all, so I am not\n # sure how to catch it. I am unhappy doing a blanket catch here,\n # but am not sure what a better way is - JDH\n _log.info('Could not load matplotlib icon: %s', sys.exc_info()[1])\n\n self.vbox = Gtk.Box()\n self.vbox.set_property(\"orientation\", Gtk.Orientation.VERTICAL)\n self.window.add(self.vbox)\n self.vbox.show()\n\n self.canvas.show()\n\n self.vbox.pack_start(self.canvas, True, True, 0)\n # calculate size for window\n w = int(self.canvas.figure.bbox.width)\n h = int(self.canvas.figure.bbox.height)\n\n self.toolmanager = self._get_toolmanager()\n self.toolbar = self._get_toolbar()\n self.statusbar = None\n\n def add_widget(child, expand, fill, padding):\n child.show()\n self.vbox.pack_end(child, False, False, 0)\n size_request = child.size_request()\n return size_request.height\n\n if self.toolmanager:\n backend_tools.add_tools_to_manager(self.toolmanager)\n if self.toolbar:\n backend_tools.add_tools_to_container(self.toolbar)\n self.statusbar = StatusbarGTK3(self.toolmanager)\n h += add_widget(self.statusbar, False, False, 0)\n h += add_widget(Gtk.HSeparator(), False, False, 0)\n\n if self.toolbar is not None:\n self.toolbar.show()\n h += add_widget(self.toolbar, False, False, 0)\n\n self.window.set_default_size(w, h)\n\n def destroy(*args):\n Gcf.destroy(num)\n self.window.connect(\"destroy\", destroy)\n self.window.connect(\"delete_event\", destroy)\n if matplotlib.is_interactive():\n self.window.show()\n self.canvas.draw_idle()\n\n self.canvas.grab_focus()\n\n def destroy(self, *args):\n self.vbox.destroy()\n self.window.destroy()\n self.canvas.destroy()\n if self.toolbar:\n self.toolbar.destroy()\n\n if (Gcf.get_num_fig_managers() == 0 and\n not matplotlib.is_interactive() and\n Gtk.main_level() >= 1):\n Gtk.main_quit()\n\n def show(self):\n # show the figure window\n self.window.show()\n self.window.present()\n\n def full_screen_toggle(self):\n self._full_screen_flag = not self._full_screen_flag\n if self._full_screen_flag:\n self.window.fullscreen()\n else:\n self.window.unfullscreen()\n _full_screen_flag = False\n\n def _get_toolbar(self):\n # must be inited after the window, drawingArea and figure\n # attrs are set\n if rcParams['toolbar'] == 'toolbar2':\n toolbar = NavigationToolbar2GTK3(self.canvas, self.window)\n elif rcParams['toolbar'] == 'toolmanager':\n toolbar = ToolbarGTK3(self.toolmanager)\n else:\n toolbar = None\n return toolbar\n\n def _get_toolmanager(self):\n # must be initialised after toolbar has been set\n if rcParams['toolbar'] == 'toolmanager':\n toolmanager = ToolManager(self.canvas.figure)\n else:\n toolmanager = None\n return toolmanager\n\n def get_window_title(self):\n return self.window.get_title()\n\n def set_window_title(self, title):\n self.window.set_title(title)\n\n def resize(self, width, height):\n 'set the canvas size in pixels'\n #_, _, cw, ch = self.canvas.allocation\n #_, _, ww, wh = self.window.allocation\n #self.window.resize (width-cw+ww, height-ch+wh)\n self.window.resize(width, height)\n\n\nclass NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):\n def __init__(self, canvas, window):\n self.win = window\n GObject.GObject.__init__(self)\n NavigationToolbar2.__init__(self, canvas)\n self.ctx = None\n\n def set_message(self, s):\n self.message.set_label(s)\n\n def set_cursor(self, cursor):\n self.canvas.get_property(\"window\").set_cursor(cursord[cursor])\n Gtk.main_iteration()\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'\n self.ctx = self.canvas.get_property(\"window\").cairo_create()\n\n # todo: instead of redrawing the entire figure, copy the part of\n # the figure that was covered by the previous rubberband rectangle\n self.canvas.draw()\n\n height = self.canvas.figure.bbox.height\n y1 = height - y1\n y0 = height - y0\n w = abs(x1 - x0)\n h = abs(y1 - y0)\n rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]\n\n self.ctx.new_path()\n self.ctx.set_line_width(0.5)\n self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])\n self.ctx.set_source_rgb(0, 0, 0)\n self.ctx.stroke()\n\n def _init_toolbar(self):\n self.set_style(Gtk.ToolbarStyle.ICONS)\n basedir = os.path.join(rcParams['datapath'], 'images')\n\n self._gtk_ids = {}\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n self.insert(Gtk.SeparatorToolItem(), -1)\n continue\n fname = os.path.join(basedir, image_file + '.png')\n image = Gtk.Image()\n image.set_from_file(fname)\n self._gtk_ids[text] = tbutton = Gtk.ToolButton()\n tbutton.set_label(text)\n tbutton.set_icon_widget(image)\n self.insert(tbutton, -1)\n tbutton.connect('clicked', getattr(self, callback))\n tbutton.set_tooltip_text(tooltip_text)\n\n toolitem = Gtk.SeparatorToolItem()\n self.insert(toolitem, -1)\n toolitem.set_draw(False)\n toolitem.set_expand(True)\n\n toolitem = Gtk.ToolItem()\n self.insert(toolitem, -1)\n self.message = Gtk.Label()\n toolitem.add(self.message)\n\n self.show_all()\n\n @cbook.deprecated(\"3.1\")\n def get_filechooser(self):\n fc = FileChooserDialog(\n title='Save the figure',\n parent=self.win,\n path=os.path.expanduser(rcParams['savefig.directory']),\n filetypes=self.canvas.get_supported_filetypes(),\n default_filetype=self.canvas.get_default_filetype())\n fc.set_current_name(self.canvas.get_default_filename())\n return fc\n\n def save_figure(self, *args):\n dialog = Gtk.FileChooserDialog(\n title=\"Save the figure\",\n parent=self.canvas.get_toplevel(),\n action=Gtk.FileChooserAction.SAVE,\n buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_SAVE, Gtk.ResponseType.OK),\n )\n for name, fmts \\\n in self.canvas.get_supported_filetypes_grouped().items():\n ff = Gtk.FileFilter()\n ff.set_name(name)\n for fmt in fmts:\n ff.add_pattern(\"*.\" + fmt)\n dialog.add_filter(ff)\n if self.canvas.get_default_filetype() in fmts:\n dialog.set_filter(ff)\n\n @functools.partial(dialog.connect, \"notify::filter\")\n def on_notify_filter(*args):\n name = dialog.get_filter().get_name()\n fmt = self.canvas.get_supported_filetypes_grouped()[name][0]\n dialog.set_current_name(\n str(Path(dialog.get_current_name()).with_suffix(\".\" + fmt)))\n\n dialog.set_current_folder(rcParams[\"savefig.directory\"])\n dialog.set_current_name(self.canvas.get_default_filename())\n dialog.set_do_overwrite_confirmation(True)\n\n response = dialog.run()\n fname = dialog.get_filename()\n ff = dialog.get_filter() # Doesn't autoadjust to filename :/\n fmt = self.canvas.get_supported_filetypes_grouped()[ff.get_name()][0]\n dialog.destroy()\n if response == Gtk.ResponseType.CANCEL:\n return\n # Save dir for next time, unless empty str (which means use cwd).\n if rcParams['savefig.directory']:\n rcParams['savefig.directory'] = os.path.dirname(fname)\n try:\n self.canvas.figure.savefig(fname, format=fmt)\n except Exception as e:\n error_msg_gtk(str(e), parent=self)\n\n def configure_subplots(self, button):\n toolfig = Figure(figsize=(6, 3))\n canvas = self._get_canvas(toolfig)\n toolfig.subplots_adjust(top=0.9)\n tool = SubplotTool(self.canvas.figure, toolfig)\n\n w = int(toolfig.bbox.width)\n h = int(toolfig.bbox.height)\n\n window = Gtk.Window()\n try:\n window.set_icon_from_file(window_icon)\n except Exception:\n # we presumably already logged a message on the\n # failure of the main plot, don't keep reporting\n pass\n window.set_title(\"Subplot Configuration Tool\")\n window.set_default_size(w, h)\n vbox = Gtk.Box()\n vbox.set_property(\"orientation\", Gtk.Orientation.VERTICAL)\n window.add(vbox)\n vbox.show()\n\n canvas.show()\n vbox.pack_start(canvas, True, True, 0)\n window.show()\n\n def _get_canvas(self, fig):\n return self.canvas.__class__(fig)\n\n def set_history_buttons(self):\n can_backward = self._nav_stack._pos > 0\n can_forward = self._nav_stack._pos < len(self._nav_stack._elements) - 1\n if 'Back' in self._gtk_ids:\n self._gtk_ids['Back'].set_sensitive(can_backward)\n if 'Forward' in self._gtk_ids:\n self._gtk_ids['Forward'].set_sensitive(can_forward)\n\n\[email protected](\"3.1\")\nclass FileChooserDialog(Gtk.FileChooserDialog):\n \"\"\"GTK+ file selector which remembers the last file/directory\n selected and presents the user with a menu of supported image formats\n \"\"\"\n def __init__(self,\n title = 'Save file',\n parent = None,\n action = Gtk.FileChooserAction.SAVE,\n buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_SAVE, Gtk.ResponseType.OK),\n path = None,\n filetypes = [],\n default_filetype = None\n ):\n super().__init__(title, parent, action, buttons)\n self.set_default_response(Gtk.ResponseType.OK)\n self.set_do_overwrite_confirmation(True)\n\n if not path:\n path = os.getcwd()\n\n # create an extra widget to list supported image formats\n self.set_current_folder(path)\n self.set_current_name('image.' + default_filetype)\n\n hbox = Gtk.Box(spacing=10)\n hbox.pack_start(Gtk.Label(label=\"File Format:\"), False, False, 0)\n\n liststore = Gtk.ListStore(GObject.TYPE_STRING)\n cbox = Gtk.ComboBox()\n cbox.set_model(liststore)\n cell = Gtk.CellRendererText()\n cbox.pack_start(cell, True)\n cbox.add_attribute(cell, 'text', 0)\n hbox.pack_start(cbox, False, False, 0)\n\n self.filetypes = filetypes\n sorted_filetypes = sorted(filetypes.items())\n default = 0\n for i, (ext, name) in enumerate(sorted_filetypes):\n liststore.append([\"%s (*.%s)\" % (name, ext)])\n if ext == default_filetype:\n default = i\n cbox.set_active(default)\n self.ext = default_filetype\n\n def cb_cbox_changed(cbox, data=None):\n \"\"\"File extension changed\"\"\"\n head, filename = os.path.split(self.get_filename())\n root, ext = os.path.splitext(filename)\n ext = ext[1:]\n new_ext = sorted_filetypes[cbox.get_active()][0]\n self.ext = new_ext\n\n if ext in self.filetypes:\n filename = root + '.' + new_ext\n elif ext == '':\n filename = filename.rstrip('.') + '.' + new_ext\n\n self.set_current_name(filename)\n cbox.connect(\"changed\", cb_cbox_changed)\n\n hbox.show_all()\n self.set_extra_widget(hbox)\n\n @cbook.deprecated(\"3.0\", alternative=\"sorted(self.filetypes.items())\")\n def sorted_filetypes(self):\n return sorted(self.filetypes.items())\n\n def get_filename_from_user(self):\n if self.run() == int(Gtk.ResponseType.OK):\n return self.get_filename(), self.ext\n else:\n return None, self.ext\n\n\nclass ToolbarGTK3(ToolContainerBase, Gtk.Box):\n _icon_extension = '.png'\n\n def __init__(self, toolmanager):\n ToolContainerBase.__init__(self, toolmanager)\n Gtk.Box.__init__(self)\n self.set_property(\"orientation\", Gtk.Orientation.VERTICAL)\n\n self._toolarea = Gtk.Box()\n self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)\n self.pack_start(self._toolarea, False, False, 0)\n self._toolarea.show_all()\n self._groups = {}\n self._toolitems = {}\n\n def add_toolitem(self, name, group, position, image_file, description,\n toggle):\n if toggle:\n tbutton = Gtk.ToggleToolButton()\n else:\n tbutton = Gtk.ToolButton()\n tbutton.set_label(name)\n\n if image_file is not None:\n image = Gtk.Image()\n image.set_from_file(image_file)\n tbutton.set_icon_widget(image)\n\n if position is None:\n position = -1\n\n self._add_button(tbutton, group, position)\n signal = tbutton.connect('clicked', self._call_tool, name)\n tbutton.set_tooltip_text(description)\n tbutton.show_all()\n self._toolitems.setdefault(name, [])\n self._toolitems[name].append((tbutton, signal))\n\n def _add_button(self, button, group, position):\n if group not in self._groups:\n if self._groups:\n self._add_separator()\n toolbar = Gtk.Toolbar()\n toolbar.set_style(Gtk.ToolbarStyle.ICONS)\n self._toolarea.pack_start(toolbar, False, False, 0)\n toolbar.show_all()\n self._groups[group] = toolbar\n self._groups[group].insert(button, position)\n\n def _call_tool(self, btn, name):\n self.trigger_tool(name)\n\n def toggle_toolitem(self, name, toggled):\n if name not in self._toolitems:\n return\n for toolitem, signal in self._toolitems[name]:\n toolitem.handler_block(signal)\n toolitem.set_active(toggled)\n toolitem.handler_unblock(signal)\n\n def remove_toolitem(self, name):\n if name not in self._toolitems:\n self.toolmanager.message_event('%s Not in toolbar' % name, self)\n return\n\n for group in self._groups:\n for toolitem, _signal in self._toolitems[name]:\n if toolitem in self._groups[group]:\n self._groups[group].remove(toolitem)\n del self._toolitems[name]\n\n def _add_separator(self):\n sep = Gtk.Separator()\n sep.set_property(\"orientation\", Gtk.Orientation.VERTICAL)\n self._toolarea.pack_start(sep, False, True, 0)\n sep.show_all()\n\n\nclass StatusbarGTK3(StatusbarBase, Gtk.Statusbar):\n def __init__(self, *args, **kwargs):\n StatusbarBase.__init__(self, *args, **kwargs)\n Gtk.Statusbar.__init__(self)\n self._context = self.get_context_id('message')\n\n def set_message(self, s):\n self.pop(self._context)\n self.push(self._context, s)\n\n\nclass RubberbandGTK3(backend_tools.RubberbandBase):\n def draw_rubberband(self, x0, y0, x1, y1):\n NavigationToolbar2GTK3.draw_rubberband(\n self._make_classic_style_pseudo_toolbar(), None, x0, y0, x1, y1)\n\n\nclass SaveFigureGTK3(backend_tools.SaveFigureBase):\n\n @cbook.deprecated(\"3.1\")\n def get_filechooser(self):\n fc = FileChooserDialog(\n title='Save the figure',\n parent=self.figure.canvas.manager.window,\n path=os.path.expanduser(rcParams['savefig.directory']),\n filetypes=self.figure.canvas.get_supported_filetypes(),\n default_filetype=self.figure.canvas.get_default_filetype())\n fc.set_current_name(self.figure.canvas.get_default_filename())\n return fc\n\n def trigger(self, *args, **kwargs):\n\n class PseudoToolbar:\n canvas = self.figure.canvas\n\n return NavigationToolbar2GTK3.save_figure(PseudoToolbar())\n\n\nclass SetCursorGTK3(backend_tools.SetCursorBase):\n def set_cursor(self, cursor):\n NavigationToolbar2GTK3.set_cursor(\n self._make_classic_style_pseudo_toolbar(), cursor)\n\n\nclass ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):\n def __init__(self, *args, **kwargs):\n backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)\n self.window = None\n\n def init_window(self):\n if self.window:\n return\n self.window = Gtk.Window(title=\"Subplot Configuration Tool\")\n\n try:\n self.window.window.set_icon_from_file(window_icon)\n except Exception:\n # we presumably already logged a message on the\n # failure of the main plot, don't keep reporting\n pass\n\n self.vbox = Gtk.Box()\n self.vbox.set_property(\"orientation\", Gtk.Orientation.VERTICAL)\n self.window.add(self.vbox)\n self.vbox.show()\n self.window.connect('destroy', self.destroy)\n\n toolfig = Figure(figsize=(6, 3))\n canvas = self.figure.canvas.__class__(toolfig)\n\n toolfig.subplots_adjust(top=0.9)\n SubplotTool(self.figure, toolfig)\n\n w = int(toolfig.bbox.width)\n h = int(toolfig.bbox.height)\n\n self.window.set_default_size(w, h)\n\n canvas.show()\n self.vbox.pack_start(canvas, True, True, 0)\n self.window.show()\n\n def destroy(self, *args):\n self.window.destroy()\n self.window = None\n\n def _get_canvas(self, fig):\n return self.canvas.__class__(fig)\n\n def trigger(self, sender, event, data=None):\n self.init_window()\n self.window.present()\n\n\nclass HelpGTK3(backend_tools.ToolHelpBase):\n def _normalize_shortcut(self, key):\n \"\"\"\n Convert Matplotlib key presses to GTK+ accelerator identifiers.\n\n Related to `FigureCanvasGTK3._get_key`.\n \"\"\"\n special = {\n 'backspace': 'BackSpace',\n 'pagedown': 'Page_Down',\n 'pageup': 'Page_Up',\n 'scroll_lock': 'Scroll_Lock',\n }\n\n parts = key.split('+')\n mods = ['<' + mod + '>' for mod in parts[:-1]]\n key = parts[-1]\n\n if key in special:\n key = special[key]\n elif len(key) > 1:\n key = key.capitalize()\n elif key.isupper():\n mods += ['<shift>']\n\n return ''.join(mods) + key\n\n def _show_shortcuts_window(self):\n section = Gtk.ShortcutsSection()\n\n for name, tool in sorted(self.toolmanager.tools.items()):\n if not tool.description:\n continue\n\n # Putting everything in a separate group allows GTK to\n # automatically split them into separate columns/pages, which is\n # useful because we have lots of shortcuts, some with many keys\n # that are very wide.\n group = Gtk.ShortcutsGroup()\n section.add(group)\n # A hack to remove the title since we have no group naming.\n group.forall(lambda widget, data: widget.set_visible(False), None)\n\n shortcut = Gtk.ShortcutsShortcut(\n accelerator=' '.join(\n self._normalize_shortcut(key)\n for key in self.toolmanager.get_tool_keymap(name)\n # Will never be sent:\n if 'cmd+' not in key),\n title=tool.name,\n subtitle=tool.description)\n group.add(shortcut)\n\n window = Gtk.ShortcutsWindow(\n title='Help',\n modal=True,\n transient_for=self._figure.canvas.get_toplevel())\n section.show() # Must be done explicitly before add!\n window.add(section)\n\n window.show_all()\n\n def _show_shortcuts_dialog(self):\n dialog = Gtk.MessageDialog(\n self._figure.canvas.get_toplevel(),\n 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, self._get_help_text(),\n title=\"Help\")\n dialog.run()\n dialog.destroy()\n\n def trigger(self, *args):\n if Gtk.check_version(3, 20, 0) is None:\n self._show_shortcuts_window()\n else:\n self._show_shortcuts_dialog()\n\n\nclass ToolCopyToClipboardGTK3(backend_tools.ToolCopyToClipboardBase):\n def trigger(self, *args, **kwargs):\n clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n window = self.canvas.get_window()\n x, y, width, height = window.get_geometry()\n pb = Gdk.pixbuf_get_from_window(window, x, y, width, height)\n clipboard.set_image(pb)\n\n\n# Define the file to use as the GTk icon\nif sys.platform == 'win32':\n icon_filename = 'matplotlib.png'\nelse:\n icon_filename = 'matplotlib.svg'\nwindow_icon = os.path.join(\n matplotlib.rcParams['datapath'], 'images', icon_filename)\n\n\ndef error_msg_gtk(msg, parent=None):\n if parent is not None: # find the toplevel Gtk.Window\n parent = parent.get_toplevel()\n if not parent.is_toplevel():\n parent = None\n\n if not isinstance(msg, str):\n msg = ','.join(map(str, msg))\n\n dialog = Gtk.MessageDialog(\n parent = parent,\n type = Gtk.MessageType.ERROR,\n buttons = Gtk.ButtonsType.OK,\n message_format = msg)\n dialog.run()\n dialog.destroy()\n\n\nbackend_tools.ToolSaveFigure = SaveFigureGTK3\nbackend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3\nbackend_tools.ToolSetCursor = SetCursorGTK3\nbackend_tools.ToolRubberband = RubberbandGTK3\nbackend_tools.ToolHelp = HelpGTK3\nbackend_tools.ToolCopyToClipboard = ToolCopyToClipboardGTK3\n\nToolbar = ToolbarGTK3\n\n\n@_Backend.export\nclass _BackendGTK3(_Backend):\n required_interactive_framework = \"gtk3\"\n FigureCanvas = FigureCanvasGTK3\n FigureManager = FigureManagerGTK3\n\n @staticmethod\n def trigger_manager_draw(manager):\n manager.canvas.draw_idle()\n\n @staticmethod\n def mainloop():\n if Gtk.main_level() == 0:\n Gtk.main()\n",
"from collections import OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Grouper,\n Index,\n MultiIndex,\n Series,\n concat,\n date_range,\n)\nfrom pandas.api.types import CategoricalDtype as CDT\nfrom pandas.core.reshape.pivot import crosstab, pivot_table\nimport pandas.util.testing as tm\n\n\[email protected](params=[True, False])\ndef dropna(request):\n return request.param\n\n\[email protected](params=[([0] * 4, [1] * 4), (range(0, 3), range(1, 4))])\ndef interval_values(request, closed):\n left, right = request.param\n return Categorical(pd.IntervalIndex.from_arrays(left, right, closed))\n\n\nclass TestPivotTable:\n def setup_method(self, method):\n self.data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n def test_pivot_table(self, observed):\n index = [\"A\", \"B\"]\n columns = \"C\"\n table = pivot_table(\n self.data, values=\"D\", index=index, columns=columns, observed=observed\n )\n\n table2 = self.data.pivot_table(\n values=\"D\", index=index, columns=columns, observed=observed\n )\n tm.assert_frame_equal(table, table2)\n\n # this works\n pivot_table(self.data, values=\"D\", index=index, observed=observed)\n\n if len(index) > 1:\n assert table.index.names == tuple(index)\n else:\n assert table.index.name == index[0]\n\n if len(columns) > 1:\n assert table.columns.names == columns\n else:\n assert table.columns.name == columns[0]\n\n expected = self.data.groupby(index + [columns])[\"D\"].agg(np.mean).unstack()\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_table_categorical_observed_equal(self, observed):\n # issue #24923\n df = pd.DataFrame(\n {\"col1\": list(\"abcde\"), \"col2\": list(\"fghij\"), \"col3\": [1, 2, 3, 4, 5]}\n )\n\n expected = df.pivot_table(\n index=\"col1\", values=\"col3\", columns=\"col2\", aggfunc=np.sum, fill_value=0\n )\n\n expected.index = expected.index.astype(\"category\")\n expected.columns = expected.columns.astype(\"category\")\n\n df.col1 = df.col1.astype(\"category\")\n df.col2 = df.col2.astype(\"category\")\n\n result = df.pivot_table(\n index=\"col1\",\n values=\"col3\",\n columns=\"col2\",\n aggfunc=np.sum,\n fill_value=0,\n observed=observed,\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_nocols(self):\n df = DataFrame(\n {\"rows\": [\"a\", \"b\", \"c\"], \"cols\": [\"x\", \"y\", \"z\"], \"values\": [1, 2, 3]}\n )\n rs = df.pivot_table(columns=\"cols\", aggfunc=np.sum)\n xp = df.pivot_table(index=\"cols\", aggfunc=np.sum).T\n tm.assert_frame_equal(rs, xp)\n\n rs = df.pivot_table(columns=\"cols\", aggfunc={\"values\": \"mean\"})\n xp = df.pivot_table(index=\"cols\", aggfunc={\"values\": \"mean\"}).T\n tm.assert_frame_equal(rs, xp)\n\n def test_pivot_table_dropna(self):\n df = DataFrame(\n {\n \"amount\": {0: 60000, 1: 100000, 2: 50000, 3: 30000},\n \"customer\": {0: \"A\", 1: \"A\", 2: \"B\", 3: \"C\"},\n \"month\": {0: 201307, 1: 201309, 2: 201308, 3: 201310},\n \"product\": {0: \"a\", 1: \"b\", 2: \"c\", 3: \"d\"},\n \"quantity\": {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000},\n }\n )\n pv_col = df.pivot_table(\n \"quantity\", \"month\", [\"customer\", \"product\"], dropna=False\n )\n pv_ind = df.pivot_table(\n \"quantity\", [\"customer\", \"product\"], \"month\", dropna=False\n )\n\n m = MultiIndex.from_tuples(\n [\n (\"A\", \"a\"),\n (\"A\", \"b\"),\n (\"A\", \"c\"),\n (\"A\", \"d\"),\n (\"B\", \"a\"),\n (\"B\", \"b\"),\n (\"B\", \"c\"),\n (\"B\", \"d\"),\n (\"C\", \"a\"),\n (\"C\", \"b\"),\n (\"C\", \"c\"),\n (\"C\", \"d\"),\n ],\n names=[\"customer\", \"product\"],\n )\n tm.assert_index_equal(pv_col.columns, m)\n tm.assert_index_equal(pv_ind.index, m)\n\n def test_pivot_table_categorical(self):\n\n cat1 = Categorical(\n [\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True\n )\n cat2 = Categorical(\n [\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True\n )\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n result = pd.pivot_table(df, values=\"values\", index=[\"A\", \"B\"], dropna=True)\n\n exp_index = pd.MultiIndex.from_arrays([cat1, cat2], names=[\"A\", \"B\"])\n expected = DataFrame({\"values\": [1, 2, 3, 4]}, index=exp_index)\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_dropna_categoricals(self, dropna):\n # GH 15193\n categories = [\"a\", \"b\", \"c\", \"d\"]\n\n df = DataFrame(\n {\n \"A\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n \"B\": [1, 2, 3, 1, 2, 3, 1, 2, 3],\n \"C\": range(0, 9),\n }\n )\n\n df[\"A\"] = df[\"A\"].astype(CDT(categories, ordered=False))\n result = df.pivot_table(index=\"B\", columns=\"A\", values=\"C\", dropna=dropna)\n expected_columns = Series([\"a\", \"b\", \"c\"], name=\"A\")\n expected_columns = expected_columns.astype(CDT(categories, ordered=False))\n expected_index = Series([1, 2, 3], name=\"B\")\n expected = DataFrame(\n [[0, 3, 6], [1, 4, 7], [2, 5, 8]],\n index=expected_index,\n columns=expected_columns,\n )\n if not dropna:\n # add back the non observed to compare\n expected = expected.reindex(columns=Categorical(categories)).astype(\"float\")\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_with_non_observable_dropna(self, dropna):\n # gh-21133\n df = pd.DataFrame(\n {\n \"A\": pd.Categorical(\n [np.nan, \"low\", \"high\", \"low\", \"high\"],\n categories=[\"low\", \"high\"],\n ordered=True,\n ),\n \"B\": range(5),\n }\n )\n\n result = df.pivot_table(index=\"A\", values=\"B\", dropna=dropna)\n expected = pd.DataFrame(\n {\"B\": [2, 3]},\n index=pd.Index(\n pd.Categorical.from_codes(\n [0, 1], categories=[\"low\", \"high\"], ordered=True\n ),\n name=\"A\",\n ),\n )\n\n tm.assert_frame_equal(result, expected)\n\n # gh-21378\n df = pd.DataFrame(\n {\n \"A\": pd.Categorical(\n [\"left\", \"low\", \"high\", \"low\", \"high\"],\n categories=[\"low\", \"high\", \"left\"],\n ordered=True,\n ),\n \"B\": range(5),\n }\n )\n\n result = df.pivot_table(index=\"A\", values=\"B\", dropna=dropna)\n expected = pd.DataFrame(\n {\"B\": [2, 3, 0]},\n index=pd.Index(\n pd.Categorical.from_codes(\n [0, 1, 2], categories=[\"low\", \"high\", \"left\"], ordered=True\n ),\n name=\"A\",\n ),\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_with_interval_index(self, interval_values, dropna):\n # GH 25814\n df = DataFrame({\"A\": interval_values, \"B\": 1})\n result = df.pivot_table(index=\"A\", values=\"B\", dropna=dropna)\n expected = DataFrame({\"B\": 1}, index=Index(interval_values.unique(), name=\"A\"))\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_with_interval_index_margins(self):\n # GH 25815\n ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2])\n df = DataFrame(\n {\n \"A\": np.arange(4, 0, -1, dtype=np.intp),\n \"B\": [\"a\", \"b\", \"a\", \"b\"],\n \"C\": pd.Categorical(ordered_cat, ordered=True).sort_values(\n ascending=False\n ),\n }\n )\n\n pivot_tab = pd.pivot_table(\n df, index=\"C\", columns=\"B\", values=\"A\", aggfunc=\"sum\", margins=True\n )\n\n result = pivot_tab[\"All\"]\n expected = Series(\n [3, 7, 10],\n index=Index([pd.Interval(0, 1), pd.Interval(1, 2), \"All\"], name=\"C\"),\n name=\"All\",\n dtype=np.intp,\n )\n tm.assert_series_equal(result, expected)\n\n def test_pass_array(self):\n result = self.data.pivot_table(\"D\", index=self.data.A, columns=self.data.C)\n expected = self.data.pivot_table(\"D\", index=\"A\", columns=\"C\")\n tm.assert_frame_equal(result, expected)\n\n def test_pass_function(self):\n result = self.data.pivot_table(\"D\", index=lambda x: x // 5, columns=self.data.C)\n expected = self.data.pivot_table(\"D\", index=self.data.index // 5, columns=\"C\")\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_multiple(self):\n index = [\"A\", \"B\"]\n columns = \"C\"\n table = pivot_table(self.data, index=index, columns=columns)\n expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_dtypes(self):\n\n # can convert dtypes\n f = DataFrame(\n {\n \"a\": [\"cat\", \"bat\", \"cat\", \"bat\"],\n \"v\": [1, 2, 3, 4],\n \"i\": [\"a\", \"b\", \"a\", \"b\"],\n }\n )\n assert f.dtypes[\"v\"] == \"int64\"\n\n z = pivot_table(\n f, values=\"v\", index=[\"a\"], columns=[\"i\"], fill_value=0, aggfunc=np.sum\n )\n result = z.dtypes\n expected = Series([np.dtype(\"int64\")] * 2, index=Index(list(\"ab\"), name=\"i\"))\n tm.assert_series_equal(result, expected)\n\n # cannot convert dtypes\n f = DataFrame(\n {\n \"a\": [\"cat\", \"bat\", \"cat\", \"bat\"],\n \"v\": [1.5, 2.5, 3.5, 4.5],\n \"i\": [\"a\", \"b\", \"a\", \"b\"],\n }\n )\n assert f.dtypes[\"v\"] == \"float64\"\n\n z = pivot_table(\n f, values=\"v\", index=[\"a\"], columns=[\"i\"], fill_value=0, aggfunc=np.mean\n )\n result = z.dtypes\n expected = Series([np.dtype(\"float64\")] * 2, index=Index(list(\"ab\"), name=\"i\"))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"columns,values\",\n [\n (\"bool1\", [\"float1\", \"float2\"]),\n (\"bool1\", [\"float1\", \"float2\", \"bool1\"]),\n (\"bool2\", [\"float1\", \"float2\", \"bool1\"]),\n ],\n )\n def test_pivot_preserve_dtypes(self, columns, values):\n # GH 7142 regression test\n v = np.arange(5, dtype=np.float64)\n df = DataFrame(\n {\"float1\": v, \"float2\": v + 2.0, \"bool1\": v <= 2, \"bool2\": v <= 3}\n )\n\n df_res = df.reset_index().pivot_table(\n index=\"index\", columns=columns, values=values\n )\n\n result = dict(df_res.dtypes)\n expected = {\n col: np.dtype(\"O\") if col[0].startswith(\"b\") else np.dtype(\"float64\")\n for col in df_res\n }\n assert result == expected\n\n def test_pivot_no_values(self):\n # GH 14380\n idx = pd.DatetimeIndex(\n [\"2011-01-01\", \"2011-02-01\", \"2011-01-02\", \"2011-01-01\", \"2011-01-02\"]\n )\n df = pd.DataFrame({\"A\": [1, 2, 3, 4, 5]}, index=idx)\n res = df.pivot_table(index=df.index.month, columns=df.index.day)\n\n exp_columns = pd.MultiIndex.from_tuples([(\"A\", 1), (\"A\", 2)])\n exp = pd.DataFrame(\n [[2.5, 4.0], [2.0, np.nan]], index=[1, 2], columns=exp_columns\n )\n tm.assert_frame_equal(res, exp)\n\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5],\n \"dt\": pd.date_range(\"2011-01-01\", freq=\"D\", periods=5),\n },\n index=idx,\n )\n res = df.pivot_table(\n index=df.index.month, columns=pd.Grouper(key=\"dt\", freq=\"M\")\n )\n exp_columns = pd.MultiIndex.from_tuples([(\"A\", pd.Timestamp(\"2011-01-31\"))])\n exp_columns.names = [None, \"dt\"]\n exp = pd.DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns)\n tm.assert_frame_equal(res, exp)\n\n res = df.pivot_table(\n index=pd.Grouper(freq=\"A\"), columns=pd.Grouper(key=\"dt\", freq=\"M\")\n )\n exp = pd.DataFrame(\n [3], index=pd.DatetimeIndex([\"2011-12-31\"]), columns=exp_columns\n )\n tm.assert_frame_equal(res, exp)\n\n def test_pivot_multi_values(self):\n result = pivot_table(\n self.data, values=[\"D\", \"E\"], index=\"A\", columns=[\"B\", \"C\"], fill_value=0\n )\n expected = pivot_table(\n self.data.drop([\"F\"], axis=1), index=\"A\", columns=[\"B\", \"C\"], fill_value=0\n )\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_multi_functions(self):\n f = lambda func: pivot_table(\n self.data, values=[\"D\", \"E\"], index=[\"A\", \"B\"], columns=\"C\", aggfunc=func\n )\n result = f([np.mean, np.std])\n means = f(np.mean)\n stds = f(np.std)\n expected = concat([means, stds], keys=[\"mean\", \"std\"], axis=1)\n tm.assert_frame_equal(result, expected)\n\n # margins not supported??\n f = lambda func: pivot_table(\n self.data,\n values=[\"D\", \"E\"],\n index=[\"A\", \"B\"],\n columns=\"C\",\n aggfunc=func,\n margins=True,\n )\n result = f([np.mean, np.std])\n means = f(np.mean)\n stds = f(np.std)\n expected = concat([means, stds], keys=[\"mean\", \"std\"], axis=1)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_index_with_nan(self, method):\n # GH 3588\n nan = np.nan\n df = DataFrame(\n {\n \"a\": [\"R1\", \"R2\", nan, \"R4\"],\n \"b\": [\"C1\", \"C2\", \"C3\", \"C4\"],\n \"c\": [10, 15, 17, 20],\n }\n )\n if method:\n result = df.pivot(\"a\", \"b\", \"c\")\n else:\n result = pd.pivot(df, \"a\", \"b\", \"c\")\n expected = DataFrame(\n [\n [nan, nan, 17, nan],\n [10, nan, nan, nan],\n [nan, 15, nan, nan],\n [nan, nan, nan, 20],\n ],\n index=Index([nan, \"R1\", \"R2\", \"R4\"], name=\"a\"),\n columns=Index([\"C1\", \"C2\", \"C3\", \"C4\"], name=\"b\"),\n )\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(df.pivot(\"b\", \"a\", \"c\"), expected.T)\n\n # GH9491\n df = DataFrame(\n {\n \"a\": pd.date_range(\"2014-02-01\", periods=6, freq=\"D\"),\n \"c\": 100 + np.arange(6),\n }\n )\n df[\"b\"] = df[\"a\"] - pd.Timestamp(\"2014-02-02\")\n df.loc[1, \"a\"] = df.loc[3, \"a\"] = nan\n df.loc[1, \"b\"] = df.loc[4, \"b\"] = nan\n\n if method:\n pv = df.pivot(\"a\", \"b\", \"c\")\n else:\n pv = pd.pivot(df, \"a\", \"b\", \"c\")\n assert pv.notna().values.sum() == len(df)\n\n for _, row in df.iterrows():\n assert pv.loc[row[\"a\"], row[\"b\"]] == row[\"c\"]\n\n if method:\n result = df.pivot(\"b\", \"a\", \"c\")\n else:\n result = pd.pivot(df, \"b\", \"a\", \"c\")\n tm.assert_frame_equal(result, pv.T)\n\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_with_tz(self, method):\n # GH 5878\n df = DataFrame(\n {\n \"dt1\": [\n datetime(2013, 1, 1, 9, 0),\n datetime(2013, 1, 2, 9, 0),\n datetime(2013, 1, 1, 9, 0),\n datetime(2013, 1, 2, 9, 0),\n ],\n \"dt2\": [\n datetime(2014, 1, 1, 9, 0),\n datetime(2014, 1, 1, 9, 0),\n datetime(2014, 1, 2, 9, 0),\n datetime(2014, 1, 2, 9, 0),\n ],\n \"data1\": np.arange(4, dtype=\"int64\"),\n \"data2\": np.arange(4, dtype=\"int64\"),\n }\n )\n\n df[\"dt1\"] = df[\"dt1\"].apply(lambda d: pd.Timestamp(d, tz=\"US/Pacific\"))\n df[\"dt2\"] = df[\"dt2\"].apply(lambda d: pd.Timestamp(d, tz=\"Asia/Tokyo\"))\n\n exp_col1 = Index([\"data1\", \"data1\", \"data2\", \"data2\"])\n exp_col2 = pd.DatetimeIndex(\n [\"2014/01/01 09:00\", \"2014/01/02 09:00\"] * 2, name=\"dt2\", tz=\"Asia/Tokyo\"\n )\n exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])\n expected = DataFrame(\n [[0, 2, 0, 2], [1, 3, 1, 3]],\n index=pd.DatetimeIndex(\n [\"2013/01/01 09:00\", \"2013/01/02 09:00\"], name=\"dt1\", tz=\"US/Pacific\"\n ),\n columns=exp_col,\n )\n\n if method:\n pv = df.pivot(index=\"dt1\", columns=\"dt2\")\n else:\n pv = pd.pivot(df, index=\"dt1\", columns=\"dt2\")\n tm.assert_frame_equal(pv, expected)\n\n expected = DataFrame(\n [[0, 2], [1, 3]],\n index=pd.DatetimeIndex(\n [\"2013/01/01 09:00\", \"2013/01/02 09:00\"], name=\"dt1\", tz=\"US/Pacific\"\n ),\n columns=pd.DatetimeIndex(\n [\"2014/01/01 09:00\", \"2014/01/02 09:00\"], name=\"dt2\", tz=\"Asia/Tokyo\"\n ),\n )\n\n if method:\n pv = df.pivot(index=\"dt1\", columns=\"dt2\", values=\"data1\")\n else:\n pv = pd.pivot(df, index=\"dt1\", columns=\"dt2\", values=\"data1\")\n tm.assert_frame_equal(pv, expected)\n\n def test_pivot_tz_in_values(self):\n # GH 14948\n df = pd.DataFrame(\n [\n {\n \"uid\": u\"aa\",\n \"ts\": pd.Timestamp(\"2016-08-12 13:00:00-0700\", tz=\"US/Pacific\"),\n },\n {\n \"uid\": u\"aa\",\n \"ts\": pd.Timestamp(\"2016-08-12 08:00:00-0700\", tz=\"US/Pacific\"),\n },\n {\n \"uid\": u\"aa\",\n \"ts\": pd.Timestamp(\"2016-08-12 14:00:00-0700\", tz=\"US/Pacific\"),\n },\n {\n \"uid\": u\"aa\",\n \"ts\": pd.Timestamp(\"2016-08-25 11:00:00-0700\", tz=\"US/Pacific\"),\n },\n {\n \"uid\": u\"aa\",\n \"ts\": pd.Timestamp(\"2016-08-25 13:00:00-0700\", tz=\"US/Pacific\"),\n },\n ]\n )\n\n df = df.set_index(\"ts\").reset_index()\n mins = df.ts.map(lambda x: x.replace(hour=0, minute=0, second=0, microsecond=0))\n\n result = pd.pivot_table(\n df.set_index(\"ts\").reset_index(),\n values=\"ts\",\n index=[\"uid\"],\n columns=[mins],\n aggfunc=np.min,\n )\n expected = pd.DataFrame(\n [\n [\n pd.Timestamp(\"2016-08-12 08:00:00-0700\", tz=\"US/Pacific\"),\n pd.Timestamp(\"2016-08-25 11:00:00-0700\", tz=\"US/Pacific\"),\n ]\n ],\n index=pd.Index([\"aa\"], name=\"uid\"),\n columns=pd.DatetimeIndex(\n [\n pd.Timestamp(\"2016-08-12 00:00:00\", tz=\"US/Pacific\"),\n pd.Timestamp(\"2016-08-25 00:00:00\", tz=\"US/Pacific\"),\n ],\n name=\"ts\",\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_periods(self, method):\n df = DataFrame(\n {\n \"p1\": [\n pd.Period(\"2013-01-01\", \"D\"),\n pd.Period(\"2013-01-02\", \"D\"),\n pd.Period(\"2013-01-01\", \"D\"),\n pd.Period(\"2013-01-02\", \"D\"),\n ],\n \"p2\": [\n pd.Period(\"2013-01\", \"M\"),\n pd.Period(\"2013-01\", \"M\"),\n pd.Period(\"2013-02\", \"M\"),\n pd.Period(\"2013-02\", \"M\"),\n ],\n \"data1\": np.arange(4, dtype=\"int64\"),\n \"data2\": np.arange(4, dtype=\"int64\"),\n }\n )\n\n exp_col1 = Index([\"data1\", \"data1\", \"data2\", \"data2\"])\n exp_col2 = pd.PeriodIndex([\"2013-01\", \"2013-02\"] * 2, name=\"p2\", freq=\"M\")\n exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])\n expected = DataFrame(\n [[0, 2, 0, 2], [1, 3, 1, 3]],\n index=pd.PeriodIndex([\"2013-01-01\", \"2013-01-02\"], name=\"p1\", freq=\"D\"),\n columns=exp_col,\n )\n if method:\n pv = df.pivot(index=\"p1\", columns=\"p2\")\n else:\n pv = pd.pivot(df, index=\"p1\", columns=\"p2\")\n tm.assert_frame_equal(pv, expected)\n\n expected = DataFrame(\n [[0, 2], [1, 3]],\n index=pd.PeriodIndex([\"2013-01-01\", \"2013-01-02\"], name=\"p1\", freq=\"D\"),\n columns=pd.PeriodIndex([\"2013-01\", \"2013-02\"], name=\"p2\", freq=\"M\"),\n )\n if method:\n pv = df.pivot(index=\"p1\", columns=\"p2\", values=\"data1\")\n else:\n pv = pd.pivot(df, index=\"p1\", columns=\"p2\", values=\"data1\")\n tm.assert_frame_equal(pv, expected)\n\n @pytest.mark.parametrize(\n \"values\",\n [\n [\"baz\", \"zoo\"],\n np.array([\"baz\", \"zoo\"]),\n pd.Series([\"baz\", \"zoo\"]),\n pd.Index([\"baz\", \"zoo\"]),\n ],\n )\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_with_list_like_values(self, values, method):\n # issue #17160\n df = pd.DataFrame(\n {\n \"foo\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"two\"],\n \"bar\": [\"A\", \"B\", \"C\", \"A\", \"B\", \"C\"],\n \"baz\": [1, 2, 3, 4, 5, 6],\n \"zoo\": [\"x\", \"y\", \"z\", \"q\", \"w\", \"t\"],\n }\n )\n\n if method:\n result = df.pivot(index=\"foo\", columns=\"bar\", values=values)\n else:\n result = pd.pivot(df, index=\"foo\", columns=\"bar\", values=values)\n\n data = [[1, 2, 3, \"x\", \"y\", \"z\"], [4, 5, 6, \"q\", \"w\", \"t\"]]\n index = Index(data=[\"one\", \"two\"], name=\"foo\")\n columns = MultiIndex(\n levels=[[\"baz\", \"zoo\"], [\"A\", \"B\", \"C\"]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=[None, \"bar\"],\n )\n expected = DataFrame(data=data, index=index, columns=columns, dtype=\"object\")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"values\",\n [\n [\"bar\", \"baz\"],\n np.array([\"bar\", \"baz\"]),\n pd.Series([\"bar\", \"baz\"]),\n pd.Index([\"bar\", \"baz\"]),\n ],\n )\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_with_list_like_values_nans(self, values, method):\n # issue #17160\n df = pd.DataFrame(\n {\n \"foo\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"two\"],\n \"bar\": [\"A\", \"B\", \"C\", \"A\", \"B\", \"C\"],\n \"baz\": [1, 2, 3, 4, 5, 6],\n \"zoo\": [\"x\", \"y\", \"z\", \"q\", \"w\", \"t\"],\n }\n )\n\n if method:\n result = df.pivot(index=\"zoo\", columns=\"foo\", values=values)\n else:\n result = pd.pivot(df, index=\"zoo\", columns=\"foo\", values=values)\n\n data = [\n [np.nan, \"A\", np.nan, 4],\n [np.nan, \"C\", np.nan, 6],\n [np.nan, \"B\", np.nan, 5],\n [\"A\", np.nan, 1, np.nan],\n [\"B\", np.nan, 2, np.nan],\n [\"C\", np.nan, 3, np.nan],\n ]\n index = Index(data=[\"q\", \"t\", \"w\", \"x\", \"y\", \"z\"], name=\"zoo\")\n columns = MultiIndex(\n levels=[[\"bar\", \"baz\"], [\"one\", \"two\"]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[None, \"foo\"],\n )\n expected = DataFrame(data=data, index=index, columns=columns, dtype=\"object\")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.xfail(\n reason=\"MultiIndexed unstack with tuple names fails with KeyError GH#19966\"\n )\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_with_multiindex(self, method):\n # issue #17160\n index = Index(data=[0, 1, 2, 3, 4, 5])\n data = [\n [\"one\", \"A\", 1, \"x\"],\n [\"one\", \"B\", 2, \"y\"],\n [\"one\", \"C\", 3, \"z\"],\n [\"two\", \"A\", 4, \"q\"],\n [\"two\", \"B\", 5, \"w\"],\n [\"two\", \"C\", 6, \"t\"],\n ]\n columns = MultiIndex(\n levels=[[\"bar\", \"baz\"], [\"first\", \"second\"]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1]],\n )\n df = DataFrame(data=data, index=index, columns=columns, dtype=\"object\")\n if method:\n result = df.pivot(\n index=(\"bar\", \"first\"),\n columns=(\"bar\", \"second\"),\n values=(\"baz\", \"first\"),\n )\n else:\n result = pd.pivot(\n df,\n index=(\"bar\", \"first\"),\n columns=(\"bar\", \"second\"),\n values=(\"baz\", \"first\"),\n )\n\n data = {\n \"A\": Series([1, 4], index=[\"one\", \"two\"]),\n \"B\": Series([2, 5], index=[\"one\", \"two\"]),\n \"C\": Series([3, 6], index=[\"one\", \"two\"]),\n }\n expected = DataFrame(data)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"method\", [True, False])\n def test_pivot_with_tuple_of_values(self, method):\n # issue #17160\n df = pd.DataFrame(\n {\n \"foo\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"two\"],\n \"bar\": [\"A\", \"B\", \"C\", \"A\", \"B\", \"C\"],\n \"baz\": [1, 2, 3, 4, 5, 6],\n \"zoo\": [\"x\", \"y\", \"z\", \"q\", \"w\", \"t\"],\n }\n )\n with pytest.raises(KeyError, match=r\"^\\('bar', 'baz'\\)$\"):\n # tuple is seen as a single column name\n if method:\n df.pivot(index=\"zoo\", columns=\"foo\", values=(\"bar\", \"baz\"))\n else:\n pd.pivot(df, index=\"zoo\", columns=\"foo\", values=(\"bar\", \"baz\"))\n\n def test_margins(self):\n def _check_output(\n result, values_col, index=[\"A\", \"B\"], columns=[\"C\"], margins_col=\"All\"\n ):\n col_margins = result.loc[result.index[:-1], margins_col]\n expected_col_margins = self.data.groupby(index)[values_col].mean()\n tm.assert_series_equal(col_margins, expected_col_margins, check_names=False)\n assert col_margins.name == margins_col\n\n result = result.sort_index()\n index_margins = result.loc[(margins_col, \"\")].iloc[:-1]\n\n expected_ix_margins = self.data.groupby(columns)[values_col].mean()\n tm.assert_series_equal(\n index_margins, expected_ix_margins, check_names=False\n )\n assert index_margins.name == (margins_col, \"\")\n\n grand_total_margins = result.loc[(margins_col, \"\"), margins_col]\n expected_total_margins = self.data[values_col].mean()\n assert grand_total_margins == expected_total_margins\n\n # column specified\n result = self.data.pivot_table(\n values=\"D\", index=[\"A\", \"B\"], columns=\"C\", margins=True, aggfunc=np.mean\n )\n _check_output(result, \"D\")\n\n # Set a different margins_name (not 'All')\n result = self.data.pivot_table(\n values=\"D\",\n index=[\"A\", \"B\"],\n columns=\"C\",\n margins=True,\n aggfunc=np.mean,\n margins_name=\"Totals\",\n )\n _check_output(result, \"D\", margins_col=\"Totals\")\n\n # no column specified\n table = self.data.pivot_table(\n index=[\"A\", \"B\"], columns=\"C\", margins=True, aggfunc=np.mean\n )\n for value_col in table.columns.levels[0]:\n _check_output(table[value_col], value_col)\n\n # no col\n\n # to help with a buglet\n self.data.columns = [k * 2 for k in self.data.columns]\n table = self.data.pivot_table(index=[\"AA\", \"BB\"], margins=True, aggfunc=np.mean)\n for value_col in table.columns:\n totals = table.loc[(\"All\", \"\"), value_col]\n assert totals == self.data[value_col].mean()\n\n # no rows\n rtable = self.data.pivot_table(\n columns=[\"AA\", \"BB\"], margins=True, aggfunc=np.mean\n )\n assert isinstance(rtable, Series)\n\n table = self.data.pivot_table(index=[\"AA\", \"BB\"], margins=True, aggfunc=\"mean\")\n for item in [\"DD\", \"EE\", \"FF\"]:\n totals = table.loc[(\"All\", \"\"), item]\n assert totals == self.data[item].mean()\n\n def test_margins_dtype(self):\n # GH 17013\n\n df = self.data.copy()\n df[[\"D\", \"E\", \"F\"]] = np.arange(len(df) * 3).reshape(len(df), 3)\n\n mi_val = list(product([\"bar\", \"foo\"], [\"one\", \"two\"])) + [(\"All\", \"\")]\n mi = MultiIndex.from_tuples(mi_val, names=(\"A\", \"B\"))\n expected = DataFrame(\n {\"dull\": [12, 21, 3, 9, 45], \"shiny\": [33, 0, 36, 51, 120]}, index=mi\n ).rename_axis(\"C\", axis=1)\n expected[\"All\"] = expected[\"dull\"] + expected[\"shiny\"]\n\n result = df.pivot_table(\n values=\"D\",\n index=[\"A\", \"B\"],\n columns=\"C\",\n margins=True,\n aggfunc=np.sum,\n fill_value=0,\n )\n\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.xfail(reason=\"GH#17035 (len of floats is casted back to floats)\")\n def test_margins_dtype_len(self):\n mi_val = list(product([\"bar\", \"foo\"], [\"one\", \"two\"])) + [(\"All\", \"\")]\n mi = MultiIndex.from_tuples(mi_val, names=(\"A\", \"B\"))\n expected = DataFrame(\n {\"dull\": [1, 1, 2, 1, 5], \"shiny\": [2, 0, 2, 2, 6]}, index=mi\n ).rename_axis(\"C\", axis=1)\n expected[\"All\"] = expected[\"dull\"] + expected[\"shiny\"]\n\n result = self.data.pivot_table(\n values=\"D\",\n index=[\"A\", \"B\"],\n columns=\"C\",\n margins=True,\n aggfunc=len,\n fill_value=0,\n )\n\n tm.assert_frame_equal(expected, result)\n\n def test_pivot_integer_columns(self):\n # caused by upstream bug in unstack\n\n d = date.min\n data = list(\n product(\n [\"foo\", \"bar\"],\n [\"A\", \"B\", \"C\"],\n [\"x1\", \"x2\"],\n [d + timedelta(i) for i in range(20)],\n [1.0],\n )\n )\n df = DataFrame(data)\n table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])\n\n df2 = df.rename(columns=str)\n table2 = df2.pivot_table(values=\"4\", index=[\"0\", \"1\", \"3\"], columns=[\"2\"])\n\n tm.assert_frame_equal(table, table2, check_names=False)\n\n def test_pivot_no_level_overlap(self):\n # GH #1181\n\n data = DataFrame(\n {\n \"a\": [\"a\", \"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"b\"] * 2,\n \"b\": [0, 0, 0, 0, 1, 1, 1, 1] * 2,\n \"c\": ([\"foo\"] * 4 + [\"bar\"] * 4) * 2,\n \"value\": np.random.randn(16),\n }\n )\n\n table = data.pivot_table(\"value\", index=\"a\", columns=[\"b\", \"c\"])\n\n grouped = data.groupby([\"a\", \"b\", \"c\"])[\"value\"].mean()\n expected = grouped.unstack(\"b\").unstack(\"c\").dropna(axis=1, how=\"all\")\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_columns_lexsorted(self):\n\n n = 10000\n\n dtype = np.dtype(\n [\n (\"Index\", object),\n (\"Symbol\", object),\n (\"Year\", int),\n (\"Month\", int),\n (\"Day\", int),\n (\"Quantity\", int),\n (\"Price\", float),\n ]\n )\n\n products = np.array(\n [\n (\"SP500\", \"ADBE\"),\n (\"SP500\", \"NVDA\"),\n (\"SP500\", \"ORCL\"),\n (\"NDQ100\", \"AAPL\"),\n (\"NDQ100\", \"MSFT\"),\n (\"NDQ100\", \"GOOG\"),\n (\"FTSE\", \"DGE.L\"),\n (\"FTSE\", \"TSCO.L\"),\n (\"FTSE\", \"GSK.L\"),\n ],\n dtype=[(\"Index\", object), (\"Symbol\", object)],\n )\n items = np.empty(n, dtype=dtype)\n iproduct = np.random.randint(0, len(products), n)\n items[\"Index\"] = products[\"Index\"][iproduct]\n items[\"Symbol\"] = products[\"Symbol\"][iproduct]\n dr = pd.date_range(date(2000, 1, 1), date(2010, 12, 31))\n dates = dr[np.random.randint(0, len(dr), n)]\n items[\"Year\"] = dates.year\n items[\"Month\"] = dates.month\n items[\"Day\"] = dates.day\n items[\"Price\"] = np.random.lognormal(4.0, 2.0, n)\n\n df = DataFrame(items)\n\n pivoted = df.pivot_table(\n \"Price\",\n index=[\"Month\", \"Day\"],\n columns=[\"Index\", \"Symbol\", \"Year\"],\n aggfunc=\"mean\",\n )\n\n assert pivoted.columns.is_monotonic\n\n def test_pivot_complex_aggfunc(self):\n f = OrderedDict([(\"D\", [\"std\"]), (\"E\", [\"sum\"])])\n expected = self.data.groupby([\"A\", \"B\"]).agg(f).unstack(\"B\")\n result = self.data.pivot_table(index=\"A\", columns=\"B\", aggfunc=f)\n\n tm.assert_frame_equal(result, expected)\n\n def test_margins_no_values_no_cols(self):\n # Regression test on pivot table: no values or cols passed.\n result = self.data[[\"A\", \"B\"]].pivot_table(\n index=[\"A\", \"B\"], aggfunc=len, margins=True\n )\n result_list = result.tolist()\n assert sum(result_list[:-1]) == result_list[-1]\n\n def test_margins_no_values_two_rows(self):\n # Regression test on pivot table: no values passed but rows are a\n # multi-index\n result = self.data[[\"A\", \"B\", \"C\"]].pivot_table(\n index=[\"A\", \"B\"], columns=\"C\", aggfunc=len, margins=True\n )\n assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]\n\n def test_margins_no_values_one_row_one_col(self):\n # Regression test on pivot table: no values passed but row and col\n # defined\n result = self.data[[\"A\", \"B\"]].pivot_table(\n index=\"A\", columns=\"B\", aggfunc=len, margins=True\n )\n assert result.All.tolist() == [4.0, 7.0, 11.0]\n\n def test_margins_no_values_two_row_two_cols(self):\n # Regression test on pivot table: no values passed but rows and cols\n # are multi-indexed\n self.data[\"D\"] = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\"]\n result = self.data[[\"A\", \"B\", \"C\", \"D\"]].pivot_table(\n index=[\"A\", \"B\"], columns=[\"C\", \"D\"], aggfunc=len, margins=True\n )\n assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]\n\n @pytest.mark.parametrize(\"margin_name\", [\"foo\", \"one\", 666, None, [\"a\", \"b\"]])\n def test_pivot_table_with_margins_set_margin_name(self, margin_name):\n # see gh-3335\n msg = (\n r'Conflicting name \"{}\" in margins|'\n \"margins_name argument must be a string\"\n ).format(margin_name)\n with pytest.raises(ValueError, match=msg):\n # multi-index index\n pivot_table(\n self.data,\n values=\"D\",\n index=[\"A\", \"B\"],\n columns=[\"C\"],\n margins=True,\n margins_name=margin_name,\n )\n with pytest.raises(ValueError, match=msg):\n # multi-index column\n pivot_table(\n self.data,\n values=\"D\",\n index=[\"C\"],\n columns=[\"A\", \"B\"],\n margins=True,\n margins_name=margin_name,\n )\n with pytest.raises(ValueError, match=msg):\n # non-multi-index index/column\n pivot_table(\n self.data,\n values=\"D\",\n index=[\"A\"],\n columns=[\"B\"],\n margins=True,\n margins_name=margin_name,\n )\n\n def test_pivot_timegrouper(self):\n df = DataFrame(\n {\n \"Branch\": \"A A A A A A A B\".split(),\n \"Buyer\": \"Carl Mark Carl Carl Joe Joe Joe Carl\".split(),\n \"Quantity\": [1, 3, 5, 1, 8, 1, 9, 3],\n \"Date\": [\n datetime(2013, 1, 1),\n datetime(2013, 1, 1),\n datetime(2013, 10, 1),\n datetime(2013, 10, 2),\n datetime(2013, 10, 1),\n datetime(2013, 10, 2),\n datetime(2013, 12, 2),\n datetime(2013, 12, 2),\n ],\n }\n ).set_index(\"Date\")\n\n expected = DataFrame(\n np.array([10, 18, 3], dtype=\"int64\").reshape(1, 3),\n index=[datetime(2013, 12, 31)],\n columns=\"Carl Joe Mark\".split(),\n )\n expected.index.name = \"Date\"\n expected.columns.name = \"Buyer\"\n\n result = pivot_table(\n df,\n index=Grouper(freq=\"A\"),\n columns=\"Buyer\",\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=\"Buyer\",\n columns=Grouper(freq=\"A\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected.T)\n\n expected = DataFrame(\n np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3),\n index=[datetime(2013, 1, 1), datetime(2013, 7, 1)],\n columns=\"Carl Joe Mark\".split(),\n )\n expected.index.name = \"Date\"\n expected.columns.name = \"Buyer\"\n\n result = pivot_table(\n df,\n index=Grouper(freq=\"6MS\"),\n columns=\"Buyer\",\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=\"Buyer\",\n columns=Grouper(freq=\"6MS\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected.T)\n\n # passing the name\n df = df.reset_index()\n result = pivot_table(\n df,\n index=Grouper(freq=\"6MS\", key=\"Date\"),\n columns=\"Buyer\",\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=\"Buyer\",\n columns=Grouper(freq=\"6MS\", key=\"Date\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected.T)\n\n msg = \"'The grouper name foo is not found'\"\n with pytest.raises(KeyError, match=msg):\n pivot_table(\n df,\n index=Grouper(freq=\"6MS\", key=\"foo\"),\n columns=\"Buyer\",\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n with pytest.raises(KeyError, match=msg):\n pivot_table(\n df,\n index=\"Buyer\",\n columns=Grouper(freq=\"6MS\", key=\"foo\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n\n # passing the level\n df = df.set_index(\"Date\")\n result = pivot_table(\n df,\n index=Grouper(freq=\"6MS\", level=\"Date\"),\n columns=\"Buyer\",\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=\"Buyer\",\n columns=Grouper(freq=\"6MS\", level=\"Date\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected.T)\n\n msg = \"The level foo is not valid\"\n with pytest.raises(ValueError, match=msg):\n pivot_table(\n df,\n index=Grouper(freq=\"6MS\", level=\"foo\"),\n columns=\"Buyer\",\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n with pytest.raises(ValueError, match=msg):\n pivot_table(\n df,\n index=\"Buyer\",\n columns=Grouper(freq=\"6MS\", level=\"foo\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n\n # double grouper\n df = DataFrame(\n {\n \"Branch\": \"A A A A A A A B\".split(),\n \"Buyer\": \"Carl Mark Carl Carl Joe Joe Joe Carl\".split(),\n \"Quantity\": [1, 3, 5, 1, 8, 1, 9, 3],\n \"Date\": [\n datetime(2013, 11, 1, 13, 0),\n datetime(2013, 9, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 11, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 10, 2, 12, 0),\n datetime(2013, 12, 5, 14, 0),\n ],\n \"PayDay\": [\n datetime(2013, 10, 4, 0, 0),\n datetime(2013, 10, 15, 13, 5),\n datetime(2013, 9, 5, 20, 0),\n datetime(2013, 11, 2, 10, 0),\n datetime(2013, 10, 7, 20, 0),\n datetime(2013, 9, 5, 10, 0),\n datetime(2013, 12, 30, 12, 0),\n datetime(2013, 11, 20, 14, 0),\n ],\n }\n )\n\n result = pivot_table(\n df,\n index=Grouper(freq=\"M\", key=\"Date\"),\n columns=Grouper(freq=\"M\", key=\"PayDay\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n expected = DataFrame(\n np.array(\n [\n np.nan,\n 3,\n np.nan,\n np.nan,\n 6,\n np.nan,\n 1,\n 9,\n np.nan,\n 9,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n 3,\n np.nan,\n ]\n ).reshape(4, 4),\n index=[\n datetime(2013, 9, 30),\n datetime(2013, 10, 31),\n datetime(2013, 11, 30),\n datetime(2013, 12, 31),\n ],\n columns=[\n datetime(2013, 9, 30),\n datetime(2013, 10, 31),\n datetime(2013, 11, 30),\n datetime(2013, 12, 31),\n ],\n )\n expected.index.name = \"Date\"\n expected.columns.name = \"PayDay\"\n\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=Grouper(freq=\"M\", key=\"PayDay\"),\n columns=Grouper(freq=\"M\", key=\"Date\"),\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected.T)\n\n tuples = [\n (datetime(2013, 9, 30), datetime(2013, 10, 31)),\n (datetime(2013, 10, 31), datetime(2013, 9, 30)),\n (datetime(2013, 10, 31), datetime(2013, 11, 30)),\n (datetime(2013, 10, 31), datetime(2013, 12, 31)),\n (datetime(2013, 11, 30), datetime(2013, 10, 31)),\n (datetime(2013, 12, 31), datetime(2013, 11, 30)),\n ]\n idx = MultiIndex.from_tuples(tuples, names=[\"Date\", \"PayDay\"])\n expected = DataFrame(\n np.array(\n [3, np.nan, 6, np.nan, 1, np.nan, 9, np.nan, 9, np.nan, np.nan, 3]\n ).reshape(6, 2),\n index=idx,\n columns=[\"A\", \"B\"],\n )\n expected.columns.name = \"Branch\"\n\n result = pivot_table(\n df,\n index=[Grouper(freq=\"M\", key=\"Date\"), Grouper(freq=\"M\", key=\"PayDay\")],\n columns=[\"Branch\"],\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=[\"Branch\"],\n columns=[Grouper(freq=\"M\", key=\"Date\"), Grouper(freq=\"M\", key=\"PayDay\")],\n values=\"Quantity\",\n aggfunc=np.sum,\n )\n tm.assert_frame_equal(result, expected.T)\n\n def test_pivot_datetime_tz(self):\n dates1 = [\n \"2011-07-19 07:00:00\",\n \"2011-07-19 08:00:00\",\n \"2011-07-19 09:00:00\",\n \"2011-07-19 07:00:00\",\n \"2011-07-19 08:00:00\",\n \"2011-07-19 09:00:00\",\n ]\n dates2 = [\n \"2013-01-01 15:00:00\",\n \"2013-01-01 15:00:00\",\n \"2013-01-01 15:00:00\",\n \"2013-02-01 15:00:00\",\n \"2013-02-01 15:00:00\",\n \"2013-02-01 15:00:00\",\n ]\n df = DataFrame(\n {\n \"label\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"],\n \"dt1\": dates1,\n \"dt2\": dates2,\n \"value1\": np.arange(6, dtype=\"int64\"),\n \"value2\": [1, 2] * 3,\n }\n )\n df[\"dt1\"] = df[\"dt1\"].apply(lambda d: pd.Timestamp(d, tz=\"US/Pacific\"))\n df[\"dt2\"] = df[\"dt2\"].apply(lambda d: pd.Timestamp(d, tz=\"Asia/Tokyo\"))\n\n exp_idx = pd.DatetimeIndex(\n [\"2011-07-19 07:00:00\", \"2011-07-19 08:00:00\", \"2011-07-19 09:00:00\"],\n tz=\"US/Pacific\",\n name=\"dt1\",\n )\n exp_col1 = Index([\"value1\", \"value1\"])\n exp_col2 = Index([\"a\", \"b\"], name=\"label\")\n exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])\n expected = DataFrame([[0, 3], [1, 4], [2, 5]], index=exp_idx, columns=exp_col)\n result = pivot_table(df, index=[\"dt1\"], columns=[\"label\"], values=[\"value1\"])\n tm.assert_frame_equal(result, expected)\n\n exp_col1 = Index([\"sum\", \"sum\", \"sum\", \"sum\", \"mean\", \"mean\", \"mean\", \"mean\"])\n exp_col2 = Index([\"value1\", \"value1\", \"value2\", \"value2\"] * 2)\n exp_col3 = pd.DatetimeIndex(\n [\"2013-01-01 15:00:00\", \"2013-02-01 15:00:00\"] * 4,\n tz=\"Asia/Tokyo\",\n name=\"dt2\",\n )\n exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])\n expected = DataFrame(\n np.array(\n [\n [0, 3, 1, 2, 0, 3, 1, 2],\n [1, 4, 2, 1, 1, 4, 2, 1],\n [2, 5, 1, 2, 2, 5, 1, 2],\n ],\n dtype=\"int64\",\n ),\n index=exp_idx,\n columns=exp_col,\n )\n\n result = pivot_table(\n df,\n index=[\"dt1\"],\n columns=[\"dt2\"],\n values=[\"value1\", \"value2\"],\n aggfunc=[np.sum, np.mean],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_dtaccessor(self):\n # GH 8103\n dates1 = [\n \"2011-07-19 07:00:00\",\n \"2011-07-19 08:00:00\",\n \"2011-07-19 09:00:00\",\n \"2011-07-19 07:00:00\",\n \"2011-07-19 08:00:00\",\n \"2011-07-19 09:00:00\",\n ]\n dates2 = [\n \"2013-01-01 15:00:00\",\n \"2013-01-01 15:00:00\",\n \"2013-01-01 15:00:00\",\n \"2013-02-01 15:00:00\",\n \"2013-02-01 15:00:00\",\n \"2013-02-01 15:00:00\",\n ]\n df = DataFrame(\n {\n \"label\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"],\n \"dt1\": dates1,\n \"dt2\": dates2,\n \"value1\": np.arange(6, dtype=\"int64\"),\n \"value2\": [1, 2] * 3,\n }\n )\n df[\"dt1\"] = df[\"dt1\"].apply(lambda d: pd.Timestamp(d))\n df[\"dt2\"] = df[\"dt2\"].apply(lambda d: pd.Timestamp(d))\n\n result = pivot_table(\n df, index=\"label\", columns=df[\"dt1\"].dt.hour, values=\"value1\"\n )\n\n exp_idx = Index([\"a\", \"b\"], name=\"label\")\n expected = DataFrame(\n {7: [0, 3], 8: [1, 4], 9: [2, 5]},\n index=exp_idx,\n columns=Index([7, 8, 9], name=\"dt1\"),\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df, index=df[\"dt2\"].dt.month, columns=df[\"dt1\"].dt.hour, values=\"value1\"\n )\n\n expected = DataFrame(\n {7: [0, 3], 8: [1, 4], 9: [2, 5]},\n index=Index([1, 2], name=\"dt2\"),\n columns=Index([7, 8, 9], name=\"dt1\"),\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=df[\"dt2\"].dt.year.values,\n columns=[df[\"dt1\"].dt.hour, df[\"dt2\"].dt.month],\n values=\"value1\",\n )\n\n exp_col = MultiIndex.from_arrays(\n [[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=[\"dt1\", \"dt2\"]\n )\n expected = DataFrame(\n np.array([[0, 3, 1, 4, 2, 5]], dtype=\"int64\"), index=[2013], columns=exp_col\n )\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(\n df,\n index=np.array([\"X\", \"X\", \"X\", \"X\", \"Y\", \"Y\"]),\n columns=[df[\"dt1\"].dt.hour, df[\"dt2\"].dt.month],\n values=\"value1\",\n )\n expected = DataFrame(\n np.array(\n [[0, 3, 1, np.nan, 2, np.nan], [np.nan, np.nan, np.nan, 4, np.nan, 5]]\n ),\n index=[\"X\", \"Y\"],\n columns=exp_col,\n )\n tm.assert_frame_equal(result, expected)\n\n def test_daily(self):\n rng = date_range(\"1/1/2000\", \"12/31/2004\", freq=\"D\")\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n annual = pivot_table(\n DataFrame(ts), index=ts.index.year, columns=ts.index.dayofyear\n )\n annual.columns = annual.columns.droplevel(0)\n\n doy = np.asarray(ts.index.dayofyear)\n\n for i in range(1, 367):\n subset = ts[doy == i]\n subset.index = subset.index.year\n\n result = annual[i].dropna()\n tm.assert_series_equal(result, subset, check_names=False)\n assert result.name == i\n\n def test_monthly(self):\n rng = date_range(\"1/1/2000\", \"12/31/2004\", freq=\"M\")\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n annual = pivot_table(\n pd.DataFrame(ts), index=ts.index.year, columns=ts.index.month\n )\n annual.columns = annual.columns.droplevel(0)\n\n month = ts.index.month\n for i in range(1, 13):\n subset = ts[month == i]\n subset.index = subset.index.year\n result = annual[i].dropna()\n tm.assert_series_equal(result, subset, check_names=False)\n assert result.name == i\n\n def test_pivot_table_with_iterator_values(self):\n # GH 12017\n aggs = {\"D\": \"sum\", \"E\": \"mean\"}\n\n pivot_values_list = pd.pivot_table(\n self.data, index=[\"A\"], values=list(aggs.keys()), aggfunc=aggs\n )\n\n pivot_values_keys = pd.pivot_table(\n self.data, index=[\"A\"], values=aggs.keys(), aggfunc=aggs\n )\n tm.assert_frame_equal(pivot_values_keys, pivot_values_list)\n\n agg_values_gen = (value for value in aggs.keys())\n pivot_values_gen = pd.pivot_table(\n self.data, index=[\"A\"], values=agg_values_gen, aggfunc=aggs\n )\n tm.assert_frame_equal(pivot_values_gen, pivot_values_list)\n\n def test_pivot_table_margins_name_with_aggfunc_list(self):\n # GH 13354\n margins_name = \"Weekly\"\n costs = pd.DataFrame(\n {\n \"item\": [\"bacon\", \"cheese\", \"bacon\", \"cheese\"],\n \"cost\": [2.5, 4.5, 3.2, 3.3],\n \"day\": [\"M\", \"M\", \"T\", \"T\"],\n }\n )\n table = costs.pivot_table(\n index=\"item\",\n columns=\"day\",\n margins=True,\n margins_name=margins_name,\n aggfunc=[np.mean, max],\n )\n ix = pd.Index([\"bacon\", \"cheese\", margins_name], dtype=\"object\", name=\"item\")\n tups = [\n (\"mean\", \"cost\", \"M\"),\n (\"mean\", \"cost\", \"T\"),\n (\"mean\", \"cost\", margins_name),\n (\"max\", \"cost\", \"M\"),\n (\"max\", \"cost\", \"T\"),\n (\"max\", \"cost\", margins_name),\n ]\n cols = pd.MultiIndex.from_tuples(tups, names=[None, None, \"day\"])\n expected = pd.DataFrame(table.values, index=ix, columns=cols)\n tm.assert_frame_equal(table, expected)\n\n @pytest.mark.xfail(reason=\"GH#17035 (np.mean of ints is casted back to ints)\")\n def test_categorical_margins(self, observed):\n # GH 10989\n df = pd.DataFrame(\n {\"x\": np.arange(8), \"y\": np.arange(8) // 4, \"z\": np.arange(8) % 2}\n )\n\n expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])\n expected.index = Index([0, 1, \"All\"], name=\"y\")\n expected.columns = Index([0, 1, \"All\"], name=\"z\")\n\n table = df.pivot_table(\"x\", \"y\", \"z\", dropna=observed, margins=True)\n tm.assert_frame_equal(table, expected)\n\n @pytest.mark.xfail(reason=\"GH#17035 (np.mean of ints is casted back to ints)\")\n def test_categorical_margins_category(self, observed):\n df = pd.DataFrame(\n {\"x\": np.arange(8), \"y\": np.arange(8) // 4, \"z\": np.arange(8) % 2}\n )\n\n expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])\n expected.index = Index([0, 1, \"All\"], name=\"y\")\n expected.columns = Index([0, 1, \"All\"], name=\"z\")\n\n df.y = df.y.astype(\"category\")\n df.z = df.z.astype(\"category\")\n table = df.pivot_table(\"x\", \"y\", \"z\", dropna=observed, margins=True)\n tm.assert_frame_equal(table, expected)\n\n def test_categorical_aggfunc(self, observed):\n # GH 9534\n df = pd.DataFrame(\n {\"C1\": [\"A\", \"B\", \"C\", \"C\"], \"C2\": [\"a\", \"a\", \"b\", \"b\"], \"V\": [1, 2, 3, 4]}\n )\n df[\"C1\"] = df[\"C1\"].astype(\"category\")\n result = df.pivot_table(\n \"V\", index=\"C1\", columns=\"C2\", dropna=observed, aggfunc=\"count\"\n )\n\n expected_index = pd.CategoricalIndex(\n [\"A\", \"B\", \"C\"], categories=[\"A\", \"B\", \"C\"], ordered=False, name=\"C1\"\n )\n expected_columns = pd.Index([\"a\", \"b\"], name=\"C2\")\n expected_data = np.array([[1.0, np.nan], [1.0, np.nan], [np.nan, 2.0]])\n expected = pd.DataFrame(\n expected_data, index=expected_index, columns=expected_columns\n )\n tm.assert_frame_equal(result, expected)\n\n def test_categorical_pivot_index_ordering(self, observed):\n # GH 8731\n df = pd.DataFrame(\n {\n \"Sales\": [100, 120, 220],\n \"Month\": [\"January\", \"January\", \"January\"],\n \"Year\": [2013, 2014, 2013],\n }\n )\n months = [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n ]\n df[\"Month\"] = df[\"Month\"].astype(\"category\").cat.set_categories(months)\n result = df.pivot_table(\n values=\"Sales\",\n index=\"Month\",\n columns=\"Year\",\n dropna=observed,\n aggfunc=\"sum\",\n )\n expected_columns = pd.Int64Index([2013, 2014], name=\"Year\")\n expected_index = pd.CategoricalIndex(\n [\"January\"], categories=months, ordered=False, name=\"Month\"\n )\n expected = pd.DataFrame(\n [[320, 120]], index=expected_index, columns=expected_columns\n )\n if not observed:\n result = result.dropna().astype(np.int64)\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_not_series(self):\n # GH 4386\n # pivot_table always returns a DataFrame\n # when values is not list like and columns is None\n # and aggfunc is not instance of list\n df = DataFrame({\"col1\": [3, 4, 5], \"col2\": [\"C\", \"D\", \"E\"], \"col3\": [1, 3, 9]})\n\n result = df.pivot_table(\"col1\", index=[\"col3\", \"col2\"], aggfunc=np.sum)\n m = MultiIndex.from_arrays([[1, 3, 9], [\"C\", \"D\", \"E\"]], names=[\"col3\", \"col2\"])\n expected = DataFrame([3, 4, 5], index=m, columns=[\"col1\"])\n\n tm.assert_frame_equal(result, expected)\n\n result = df.pivot_table(\"col1\", index=\"col3\", columns=\"col2\", aggfunc=np.sum)\n expected = DataFrame(\n [[3, np.NaN, np.NaN], [np.NaN, 4, np.NaN], [np.NaN, np.NaN, 5]],\n index=Index([1, 3, 9], name=\"col3\"),\n columns=Index([\"C\", \"D\", \"E\"], name=\"col2\"),\n )\n\n tm.assert_frame_equal(result, expected)\n\n result = df.pivot_table(\"col1\", index=\"col3\", aggfunc=[np.sum])\n m = MultiIndex.from_arrays([[\"sum\"], [\"col1\"]])\n expected = DataFrame([3, 4, 5], index=Index([1, 3, 9], name=\"col3\"), columns=m)\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_margins_name_unicode(self):\n # issue #13292\n greek = \"\\u0394\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae\"\n frame = pd.DataFrame({\"foo\": [1, 2, 3]})\n table = pd.pivot_table(\n frame, index=[\"foo\"], aggfunc=len, margins=True, margins_name=greek\n )\n index = pd.Index([1, 2, 3, greek], dtype=\"object\", name=\"foo\")\n expected = pd.DataFrame(index=index)\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_string_as_func(self):\n # GH #18713\n # for correctness purposes\n data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": range(11),\n }\n )\n\n result = pivot_table(data, index=\"A\", columns=\"B\", aggfunc=\"sum\")\n mi = MultiIndex(\n levels=[[\"C\"], [\"one\", \"two\"]], codes=[[0, 0], [0, 1]], names=[None, \"B\"]\n )\n expected = DataFrame(\n {(\"C\", \"one\"): {\"bar\": 15, \"foo\": 13}, (\"C\", \"two\"): {\"bar\": 7, \"foo\": 20}},\n columns=mi,\n ).rename_axis(\"A\")\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(data, index=\"A\", columns=\"B\", aggfunc=[\"sum\", \"mean\"])\n mi = MultiIndex(\n levels=[[\"sum\", \"mean\"], [\"C\"], [\"one\", \"two\"]],\n codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],\n names=[None, None, \"B\"],\n )\n expected = DataFrame(\n {\n (\"mean\", \"C\", \"one\"): {\"bar\": 5.0, \"foo\": 3.25},\n (\"mean\", \"C\", \"two\"): {\"bar\": 7.0, \"foo\": 6.666666666666667},\n (\"sum\", \"C\", \"one\"): {\"bar\": 15, \"foo\": 13},\n (\"sum\", \"C\", \"two\"): {\"bar\": 7, \"foo\": 20},\n },\n columns=mi,\n ).rename_axis(\"A\")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"f, f_numpy\",\n [\n (\"sum\", np.sum),\n (\"mean\", np.mean),\n (\"std\", np.std),\n ([\"sum\", \"mean\"], [np.sum, np.mean]),\n ([\"sum\", \"std\"], [np.sum, np.std]),\n ([\"std\", \"mean\"], [np.std, np.mean]),\n ],\n )\n def test_pivot_string_func_vs_func(self, f, f_numpy):\n # GH #18713\n # for consistency purposes\n result = pivot_table(self.data, index=\"A\", columns=\"B\", aggfunc=f)\n expected = pivot_table(self.data, index=\"A\", columns=\"B\", aggfunc=f_numpy)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.slow\n def test_pivot_number_of_levels_larger_than_int32(self):\n # GH 20601\n df = DataFrame(\n {\"ind1\": np.arange(2 ** 16), \"ind2\": np.arange(2 ** 16), \"count\": 0}\n )\n\n msg = \"Unstacked DataFrame is too big, causing int32 overflow\"\n with pytest.raises(ValueError, match=msg):\n df.pivot_table(\n index=\"ind1\", columns=\"ind2\", values=\"count\", aggfunc=\"count\"\n )\n\n def test_pivot_table_aggfunc_dropna(self, dropna):\n # GH 22159\n df = pd.DataFrame(\n {\n \"fruit\": [\"apple\", \"peach\", \"apple\"],\n \"size\": [1, 1, 2],\n \"taste\": [7, 6, 6],\n }\n )\n\n def ret_one(x):\n return 1\n\n def ret_sum(x):\n return sum(x)\n\n def ret_none(x):\n return np.nan\n\n result = pd.pivot_table(\n df, columns=\"fruit\", aggfunc=[ret_sum, ret_none, ret_one], dropna=dropna\n )\n\n data = [[3, 1, np.nan, np.nan, 1, 1], [13, 6, np.nan, np.nan, 1, 1]]\n col = pd.MultiIndex.from_product(\n [[\"ret_sum\", \"ret_none\", \"ret_one\"], [\"apple\", \"peach\"]],\n names=[None, \"fruit\"],\n )\n expected = pd.DataFrame(data, index=[\"size\", \"taste\"], columns=col)\n\n if dropna:\n expected = expected.dropna(axis=\"columns\")\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_aggfunc_scalar_dropna(self, dropna):\n # GH 22159\n df = pd.DataFrame(\n {\"A\": [\"one\", \"two\", \"one\"], \"x\": [3, np.nan, 2], \"y\": [1, np.nan, np.nan]}\n )\n\n result = pd.pivot_table(df, columns=\"A\", aggfunc=np.mean, dropna=dropna)\n\n data = [[2.5, np.nan], [1, np.nan]]\n col = pd.Index([\"one\", \"two\"], name=\"A\")\n expected = pd.DataFrame(data, index=[\"x\", \"y\"], columns=col)\n\n if dropna:\n expected = expected.dropna(axis=\"columns\")\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestCrosstab:\n def setup_method(self, method):\n df = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n self.df = df.append(df, ignore_index=True)\n\n def test_crosstab_single(self):\n df = self.df\n result = crosstab(df[\"A\"], df[\"C\"])\n expected = df.groupby([\"A\", \"C\"]).size().unstack()\n tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))\n\n def test_crosstab_multiple(self):\n df = self.df\n\n result = crosstab(df[\"A\"], [df[\"B\"], df[\"C\"]])\n expected = df.groupby([\"A\", \"B\", \"C\"]).size()\n expected = expected.unstack(\"B\").unstack(\"C\").fillna(0).astype(np.int64)\n tm.assert_frame_equal(result, expected)\n\n result = crosstab([df[\"B\"], df[\"C\"]], df[\"A\"])\n expected = df.groupby([\"B\", \"C\", \"A\"]).size()\n expected = expected.unstack(\"A\").fillna(0).astype(np.int64)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_ndarray(self):\n a = np.random.randint(0, 5, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 10, size=100)\n\n df = DataFrame({\"a\": a, \"b\": b, \"c\": c})\n\n result = crosstab(a, [b, c], rownames=[\"a\"], colnames=(\"b\", \"c\"))\n expected = crosstab(df[\"a\"], [df[\"b\"], df[\"c\"]])\n tm.assert_frame_equal(result, expected)\n\n result = crosstab([b, c], a, colnames=[\"a\"], rownames=(\"b\", \"c\"))\n expected = crosstab([df[\"b\"], df[\"c\"]], df[\"a\"])\n tm.assert_frame_equal(result, expected)\n\n # assign arbitrary names\n result = crosstab(self.df[\"A\"].values, self.df[\"C\"].values)\n assert result.index.name == \"row_0\"\n assert result.columns.name == \"col_0\"\n\n def test_crosstab_non_aligned(self):\n # GH 17005\n a = pd.Series([0, 1, 1], index=[\"a\", \"b\", \"c\"])\n b = pd.Series([3, 4, 3, 4, 3], index=[\"a\", \"b\", \"c\", \"d\", \"f\"])\n c = np.array([3, 4, 3])\n\n expected = pd.DataFrame(\n [[1, 0], [1, 1]],\n index=Index([0, 1], name=\"row_0\"),\n columns=Index([3, 4], name=\"col_0\"),\n )\n\n result = crosstab(a, b)\n tm.assert_frame_equal(result, expected)\n\n result = crosstab(a, c)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_margins(self):\n a = np.random.randint(0, 7, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 5, size=100)\n\n df = DataFrame({\"a\": a, \"b\": b, \"c\": c})\n\n result = crosstab(a, [b, c], rownames=[\"a\"], colnames=(\"b\", \"c\"), margins=True)\n\n assert result.index.names == (\"a\",)\n assert result.columns.names == [\"b\", \"c\"]\n\n all_cols = result[\"All\", \"\"]\n exp_cols = df.groupby([\"a\"]).size().astype(\"i8\")\n # to keep index.name\n exp_margin = Series([len(df)], index=Index([\"All\"], name=\"a\"))\n exp_cols = exp_cols.append(exp_margin)\n exp_cols.name = (\"All\", \"\")\n\n tm.assert_series_equal(all_cols, exp_cols)\n\n all_rows = result.loc[\"All\"]\n exp_rows = df.groupby([\"b\", \"c\"]).size().astype(\"i8\")\n exp_rows = exp_rows.append(Series([len(df)], index=[(\"All\", \"\")]))\n exp_rows.name = \"All\"\n\n exp_rows = exp_rows.reindex(all_rows.index)\n exp_rows = exp_rows.fillna(0).astype(np.int64)\n tm.assert_series_equal(all_rows, exp_rows)\n\n def test_crosstab_margins_set_margin_name(self):\n # GH 15972\n a = np.random.randint(0, 7, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 5, size=100)\n\n df = DataFrame({\"a\": a, \"b\": b, \"c\": c})\n\n result = crosstab(\n a,\n [b, c],\n rownames=[\"a\"],\n colnames=(\"b\", \"c\"),\n margins=True,\n margins_name=\"TOTAL\",\n )\n\n assert result.index.names == (\"a\",)\n assert result.columns.names == [\"b\", \"c\"]\n\n all_cols = result[\"TOTAL\", \"\"]\n exp_cols = df.groupby([\"a\"]).size().astype(\"i8\")\n # to keep index.name\n exp_margin = Series([len(df)], index=Index([\"TOTAL\"], name=\"a\"))\n exp_cols = exp_cols.append(exp_margin)\n exp_cols.name = (\"TOTAL\", \"\")\n\n tm.assert_series_equal(all_cols, exp_cols)\n\n all_rows = result.loc[\"TOTAL\"]\n exp_rows = df.groupby([\"b\", \"c\"]).size().astype(\"i8\")\n exp_rows = exp_rows.append(Series([len(df)], index=[(\"TOTAL\", \"\")]))\n exp_rows.name = \"TOTAL\"\n\n exp_rows = exp_rows.reindex(all_rows.index)\n exp_rows = exp_rows.fillna(0).astype(np.int64)\n tm.assert_series_equal(all_rows, exp_rows)\n\n msg = \"margins_name argument must be a string\"\n for margins_name in [666, None, [\"a\", \"b\"]]:\n with pytest.raises(ValueError, match=msg):\n crosstab(\n a,\n [b, c],\n rownames=[\"a\"],\n colnames=(\"b\", \"c\"),\n margins=True,\n margins_name=margins_name,\n )\n\n def test_crosstab_pass_values(self):\n a = np.random.randint(0, 7, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 5, size=100)\n values = np.random.randn(100)\n\n table = crosstab(\n [a, b], c, values, aggfunc=np.sum, rownames=[\"foo\", \"bar\"], colnames=[\"baz\"]\n )\n\n df = DataFrame({\"foo\": a, \"bar\": b, \"baz\": c, \"values\": values})\n\n expected = df.pivot_table(\n \"values\", index=[\"foo\", \"bar\"], columns=\"baz\", aggfunc=np.sum\n )\n tm.assert_frame_equal(table, expected)\n\n def test_crosstab_dropna(self):\n # GH 3820\n a = np.array([\"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"foo\", \"foo\"], dtype=object)\n b = np.array([\"one\", \"one\", \"two\", \"one\", \"two\", \"two\", \"two\"], dtype=object)\n c = np.array(\n [\"dull\", \"dull\", \"dull\", \"dull\", \"dull\", \"shiny\", \"shiny\"], dtype=object\n )\n res = pd.crosstab(a, [b, c], rownames=[\"a\"], colnames=[\"b\", \"c\"], dropna=False)\n m = MultiIndex.from_tuples(\n [(\"one\", \"dull\"), (\"one\", \"shiny\"), (\"two\", \"dull\"), (\"two\", \"shiny\")],\n names=[\"b\", \"c\"],\n )\n tm.assert_index_equal(res.columns, m)\n\n def test_crosstab_no_overlap(self):\n # GS 10291\n\n s1 = pd.Series([1, 2, 3], index=[1, 2, 3])\n s2 = pd.Series([4, 5, 6], index=[4, 5, 6])\n\n actual = crosstab(s1, s2)\n expected = pd.DataFrame()\n\n tm.assert_frame_equal(actual, expected)\n\n def test_margin_dropna(self):\n # GH 12577\n # pivot_table counts null into margin ('All')\n # when margins=true and dropna=true\n\n df = pd.DataFrame({\"a\": [1, 2, 2, 2, 2, np.nan], \"b\": [3, 3, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)\n expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])\n expected.index = Index([1.0, 2.0, \"All\"], name=\"a\")\n expected.columns = Index([3, 4, \"All\"], name=\"b\")\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame(\n {\"a\": [1, np.nan, np.nan, np.nan, 2, np.nan], \"b\": [3, np.nan, 4, 4, 4, 4]}\n )\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)\n expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])\n expected.index = Index([1.0, 2.0, \"All\"], name=\"a\")\n expected.columns = Index([3.0, 4.0, \"All\"], name=\"b\")\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame(\n {\"a\": [1, np.nan, np.nan, np.nan, np.nan, 2], \"b\": [3, 3, 4, 4, 4, 4]}\n )\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)\n expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])\n expected.index = Index([1.0, 2.0, \"All\"], name=\"a\")\n expected.columns = Index([3, 4, \"All\"], name=\"b\")\n tm.assert_frame_equal(actual, expected)\n\n # GH 12642\n # _add_margins raises KeyError: Level None not found\n # when margins=True and dropna=False\n df = pd.DataFrame({\"a\": [1, 2, 2, 2, 2, np.nan], \"b\": [3, 3, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)\n expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])\n expected.index = Index([1.0, 2.0, \"All\"], name=\"a\")\n expected.columns = Index([3, 4, \"All\"], name=\"b\")\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame(\n {\"a\": [1, np.nan, np.nan, np.nan, 2, np.nan], \"b\": [3, np.nan, 4, 4, 4, 4]}\n )\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)\n expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])\n expected.index = Index([1.0, 2.0, \"All\"], name=\"a\")\n expected.columns = Index([3.0, 4.0, \"All\"], name=\"b\")\n tm.assert_frame_equal(actual, expected)\n\n a = np.array([\"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"foo\", \"foo\"], dtype=object)\n b = np.array([\"one\", \"one\", \"two\", \"one\", \"two\", np.nan, \"two\"], dtype=object)\n c = np.array(\n [\"dull\", \"dull\", \"dull\", \"dull\", \"dull\", \"shiny\", \"shiny\"], dtype=object\n )\n\n actual = pd.crosstab(\n a, [b, c], rownames=[\"a\"], colnames=[\"b\", \"c\"], margins=True, dropna=False\n )\n m = MultiIndex.from_arrays(\n [\n [\"one\", \"one\", \"two\", \"two\", \"All\"],\n [\"dull\", \"shiny\", \"dull\", \"shiny\", \"\"],\n ],\n names=[\"b\", \"c\"],\n )\n expected = DataFrame(\n [[1, 0, 1, 0, 2], [2, 0, 1, 1, 5], [3, 0, 2, 1, 7]], columns=m\n )\n expected.index = Index([\"bar\", \"foo\", \"All\"], name=\"a\")\n tm.assert_frame_equal(actual, expected)\n\n actual = pd.crosstab(\n [a, b], c, rownames=[\"a\", \"b\"], colnames=[\"c\"], margins=True, dropna=False\n )\n m = MultiIndex.from_arrays(\n [[\"bar\", \"bar\", \"foo\", \"foo\", \"All\"], [\"one\", \"two\", \"one\", \"two\", \"\"]],\n names=[\"a\", \"b\"],\n )\n expected = DataFrame(\n [[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 2, 7]], index=m\n )\n expected.columns = Index([\"dull\", \"shiny\", \"All\"], name=\"c\")\n tm.assert_frame_equal(actual, expected)\n\n actual = pd.crosstab(\n [a, b], c, rownames=[\"a\", \"b\"], colnames=[\"c\"], margins=True, dropna=True\n )\n m = MultiIndex.from_arrays(\n [[\"bar\", \"bar\", \"foo\", \"foo\", \"All\"], [\"one\", \"two\", \"one\", \"two\", \"\"]],\n names=[\"a\", \"b\"],\n )\n expected = DataFrame(\n [[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m\n )\n expected.columns = Index([\"dull\", \"shiny\", \"All\"], name=\"c\")\n tm.assert_frame_equal(actual, expected)\n\n def test_crosstab_normalize(self):\n # Issue 12578\n df = pd.DataFrame(\n {\"a\": [1, 2, 2, 2, 2], \"b\": [3, 3, 4, 4, 4], \"c\": [1, 1, np.nan, 1, 1]}\n )\n\n rindex = pd.Index([1, 2], name=\"a\")\n cindex = pd.Index([3, 4], name=\"b\")\n full_normal = pd.DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex)\n row_normal = pd.DataFrame(\n [[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex\n )\n col_normal = pd.DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex)\n\n # Check all normalize args\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=\"all\"), full_normal)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True), full_normal)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=\"index\"), row_normal)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=\"columns\"), col_normal)\n tm.assert_frame_equal(\n pd.crosstab(df.a, df.b, normalize=1),\n pd.crosstab(df.a, df.b, normalize=\"columns\"),\n )\n tm.assert_frame_equal(\n pd.crosstab(df.a, df.b, normalize=0),\n pd.crosstab(df.a, df.b, normalize=\"index\"),\n )\n\n row_normal_margins = pd.DataFrame(\n [[1.0, 0], [0.25, 0.75], [0.4, 0.6]],\n index=pd.Index([1, 2, \"All\"], name=\"a\", dtype=\"object\"),\n columns=pd.Index([3, 4], name=\"b\", dtype=\"object\"),\n )\n col_normal_margins = pd.DataFrame(\n [[0.5, 0, 0.2], [0.5, 1.0, 0.8]],\n index=pd.Index([1, 2], name=\"a\", dtype=\"object\"),\n columns=pd.Index([3, 4, \"All\"], name=\"b\", dtype=\"object\"),\n )\n\n all_normal_margins = pd.DataFrame(\n [[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]],\n index=pd.Index([1, 2, \"All\"], name=\"a\", dtype=\"object\"),\n columns=pd.Index([3, 4, \"All\"], name=\"b\", dtype=\"object\"),\n )\n tm.assert_frame_equal(\n pd.crosstab(df.a, df.b, normalize=\"index\", margins=True), row_normal_margins\n )\n tm.assert_frame_equal(\n pd.crosstab(df.a, df.b, normalize=\"columns\", margins=True),\n col_normal_margins,\n )\n tm.assert_frame_equal(\n pd.crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins\n )\n\n # Test arrays\n pd.crosstab(\n [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2])\n )\n\n # Test with aggfunc\n norm_counts = pd.DataFrame(\n [[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]],\n index=pd.Index([1, 2, \"All\"], name=\"a\", dtype=\"object\"),\n columns=pd.Index([3, 4, \"All\"], name=\"b\"),\n )\n test_case = pd.crosstab(\n df.a, df.b, df.c, aggfunc=\"count\", normalize=\"all\", margins=True\n )\n tm.assert_frame_equal(test_case, norm_counts)\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 2, 2, 2], \"b\": [3, 3, 4, 4, 4], \"c\": [0, 4, np.nan, 3, 3]}\n )\n\n norm_sum = pd.DataFrame(\n [[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]],\n index=pd.Index([1, 2, \"All\"], name=\"a\", dtype=\"object\"),\n columns=pd.Index([3, 4, \"All\"], name=\"b\", dtype=\"object\"),\n )\n test_case = pd.crosstab(\n df.a, df.b, df.c, aggfunc=np.sum, normalize=\"all\", margins=True\n )\n tm.assert_frame_equal(test_case, norm_sum)\n\n def test_crosstab_with_empties(self):\n # Check handling of empties\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 2, 2, 2],\n \"b\": [3, 3, 4, 4, 4],\n \"c\": [np.nan, np.nan, np.nan, np.nan, np.nan],\n }\n )\n\n empty = pd.DataFrame(\n [[0.0, 0.0], [0.0, 0.0]],\n index=pd.Index([1, 2], name=\"a\", dtype=\"int64\"),\n columns=pd.Index([3, 4], name=\"b\"),\n )\n\n for i in [True, \"index\", \"columns\"]:\n calculated = pd.crosstab(\n df.a, df.b, values=df.c, aggfunc=\"count\", normalize=i\n )\n tm.assert_frame_equal(empty, calculated)\n\n nans = pd.DataFrame(\n [[0.0, np.nan], [0.0, 0.0]],\n index=pd.Index([1, 2], name=\"a\", dtype=\"int64\"),\n columns=pd.Index([3, 4], name=\"b\"),\n )\n\n calculated = pd.crosstab(\n df.a, df.b, values=df.c, aggfunc=\"count\", normalize=False\n )\n tm.assert_frame_equal(nans, calculated)\n\n def test_crosstab_errors(self):\n # Issue 12578\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 2, 2, 2], \"b\": [3, 3, 4, 4, 4], \"c\": [1, 1, np.nan, 1, 1]}\n )\n\n error = \"values cannot be used without an aggfunc.\"\n with pytest.raises(ValueError, match=error):\n pd.crosstab(df.a, df.b, values=df.c)\n\n error = \"aggfunc cannot be used without values\"\n with pytest.raises(ValueError, match=error):\n pd.crosstab(df.a, df.b, aggfunc=np.mean)\n\n error = \"Not a valid normalize argument\"\n with pytest.raises(ValueError, match=error):\n pd.crosstab(df.a, df.b, normalize=\"42\")\n\n with pytest.raises(ValueError, match=error):\n pd.crosstab(df.a, df.b, normalize=42)\n\n error = \"Not a valid margins argument\"\n with pytest.raises(ValueError, match=error):\n pd.crosstab(df.a, df.b, normalize=\"all\", margins=42)\n\n def test_crosstab_with_categorial_columns(self):\n # GH 8860\n df = pd.DataFrame(\n {\n \"MAKE\": [\"Honda\", \"Acura\", \"Tesla\", \"Honda\", \"Honda\", \"Acura\"],\n \"MODEL\": [\"Sedan\", \"Sedan\", \"Electric\", \"Pickup\", \"Sedan\", \"Sedan\"],\n }\n )\n categories = [\"Sedan\", \"Electric\", \"Pickup\"]\n df[\"MODEL\"] = df[\"MODEL\"].astype(\"category\").cat.set_categories(categories)\n result = pd.crosstab(df[\"MAKE\"], df[\"MODEL\"])\n\n expected_index = pd.Index([\"Acura\", \"Honda\", \"Tesla\"], name=\"MAKE\")\n expected_columns = pd.CategoricalIndex(\n categories, categories=categories, ordered=False, name=\"MODEL\"\n )\n expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]\n expected = pd.DataFrame(\n expected_data, index=expected_index, columns=expected_columns\n )\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_with_numpy_size(self):\n # GH 4003\n df = pd.DataFrame(\n {\n \"A\": [\"one\", \"one\", \"two\", \"three\"] * 6,\n \"B\": [\"A\", \"B\", \"C\"] * 8,\n \"C\": [\"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\"] * 4,\n \"D\": np.random.randn(24),\n \"E\": np.random.randn(24),\n }\n )\n result = pd.crosstab(\n index=[df[\"A\"], df[\"B\"]],\n columns=[df[\"C\"]],\n margins=True,\n aggfunc=np.size,\n values=df[\"D\"],\n )\n expected_index = pd.MultiIndex(\n levels=[[\"All\", \"one\", \"three\", \"two\"], [\"\", \"A\", \"B\", \"C\"]],\n codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],\n names=[\"A\", \"B\"],\n )\n expected_column = pd.Index([\"bar\", \"foo\", \"All\"], dtype=\"object\", name=\"C\")\n expected_data = np.array(\n [\n [2.0, 2.0, 4.0],\n [2.0, 2.0, 4.0],\n [2.0, 2.0, 4.0],\n [2.0, np.nan, 2.0],\n [np.nan, 2.0, 2.0],\n [2.0, np.nan, 2.0],\n [np.nan, 2.0, 2.0],\n [2.0, np.nan, 2.0],\n [np.nan, 2.0, 2.0],\n [12.0, 12.0, 24.0],\n ]\n )\n expected = pd.DataFrame(\n expected_data, index=expected_index, columns=expected_column\n )\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_dup_index_names(self):\n # GH 13279\n s = pd.Series(range(3), name=\"foo\")\n\n result = pd.crosstab(s, s)\n expected_index = pd.Index(range(3), name=\"foo\")\n expected = pd.DataFrame(\n np.eye(3, dtype=np.int64), index=expected_index, columns=expected_index\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"names\", [[\"a\", (\"b\", \"c\")], [(\"a\", \"b\"), \"c\"]])\n def test_crosstab_tuple_name(self, names):\n s1 = pd.Series(range(3), name=names[0])\n s2 = pd.Series(range(1, 4), name=names[1])\n\n mi = pd.MultiIndex.from_arrays([range(3), range(1, 4)], names=names)\n expected = pd.Series(1, index=mi).unstack(1, fill_value=0)\n\n result = pd.crosstab(s1, s2)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_unsorted_order(self):\n df = pd.DataFrame({\"b\": [3, 1, 2], \"a\": [5, 4, 6]}, index=[\"C\", \"A\", \"B\"])\n result = pd.crosstab(df.index, [df.b, df.a])\n e_idx = pd.Index([\"A\", \"B\", \"C\"], name=\"row_0\")\n e_columns = pd.MultiIndex.from_tuples(\n [(1, 4), (2, 6), (3, 5)], names=[\"b\", \"a\"]\n )\n expected = pd.DataFrame(\n [[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns\n )\n tm.assert_frame_equal(result, expected)\n\n def test_margin_normalize(self):\n # GH 27500\n df = pd.DataFrame(\n {\n \"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"bar\"],\n \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"one\", \"one\", \"two\", \"two\"],\n \"C\": [\n \"small\",\n \"large\",\n \"large\",\n \"small\",\n \"small\",\n \"large\",\n \"small\",\n \"small\",\n \"large\",\n ],\n \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9],\n }\n )\n # normalize on index\n result = pd.crosstab(\n [df.A, df.B], df.C, margins=True, margins_name=\"Sub-Total\", normalize=0\n )\n expected = pd.DataFrame(\n [[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]\n )\n expected.index = MultiIndex(\n levels=[[\"Sub-Total\", \"bar\", \"foo\"], [\"\", \"one\", \"two\"]],\n codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],\n names=[\"A\", \"B\"],\n )\n expected.columns = Index([\"large\", \"small\"], dtype=\"object\", name=\"C\")\n tm.assert_frame_equal(result, expected)\n\n # normalize on columns\n result = pd.crosstab(\n [df.A, df.B], df.C, margins=True, margins_name=\"Sub-Total\", normalize=1\n )\n expected = pd.DataFrame(\n [\n [0.25, 0.2, 0.222222],\n [0.25, 0.2, 0.222222],\n [0.5, 0.2, 0.333333],\n [0, 0.4, 0.222222],\n ]\n )\n expected.columns = Index(\n [\"large\", \"small\", \"Sub-Total\"], dtype=\"object\", name=\"C\"\n )\n expected.index = MultiIndex(\n levels=[[\"bar\", \"foo\"], [\"one\", \"two\"]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[\"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # normalize on both index and column\n result = pd.crosstab(\n [df.A, df.B], df.C, margins=True, margins_name=\"Sub-Total\", normalize=True\n )\n expected = pd.DataFrame(\n [\n [0.111111, 0.111111, 0.222222],\n [0.111111, 0.111111, 0.222222],\n [0.222222, 0.111111, 0.333333],\n [0.000000, 0.222222, 0.222222],\n [0.444444, 0.555555, 1],\n ]\n )\n expected.columns = Index(\n [\"large\", \"small\", \"Sub-Total\"], dtype=\"object\", name=\"C\"\n )\n expected.index = MultiIndex(\n levels=[[\"Sub-Total\", \"bar\", \"foo\"], [\"\", \"one\", \"two\"]],\n codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],\n names=[\"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n",
"\"\"\"\nAbstract base classes define the primitives that renderers and\ngraphics contexts must implement to serve as a matplotlib backend\n\n:class:`RendererBase`\n An abstract base class to handle drawing/rendering operations.\n\n:class:`FigureCanvasBase`\n The abstraction layer that separates the\n :class:`matplotlib.figure.Figure` from the backend specific\n details like a user interface drawing area\n\n:class:`GraphicsContextBase`\n An abstract base class that provides color, line styles, etc...\n\n:class:`Event`\n The base class for all of the matplotlib event\n handling. Derived classes such as :class:`KeyEvent` and\n :class:`MouseEvent` store the meta data like keys and buttons\n pressed, x and y locations in pixel and\n :class:`~matplotlib.axes.Axes` coordinates.\n\n:class:`ShowBase`\n The base class for the Show class of each interactive backend;\n the 'show' callable is then set to Show.__call__, inherited from\n ShowBase.\n\n:class:`ToolContainerBase`\n The base class for the Toolbar class of each interactive backend.\n\n:class:`StatusbarBase`\n The base class for the messaging area.\n\"\"\"\n\nfrom contextlib import contextmanager\nfrom enum import IntEnum\nimport functools\nimport importlib\nimport io\nimport logging\nimport os\nimport sys\nimport time\nfrom weakref import WeakKeyDictionary\n\nimport numpy as np\n\nimport matplotlib as mpl\nfrom matplotlib import (\n backend_tools as tools, cbook, colors, textpath, tight_bbox, transforms,\n widgets, get_backend, is_interactive, rcParams)\nfrom matplotlib._pylab_helpers import Gcf\nfrom matplotlib.transforms import Affine2D\nfrom matplotlib.path import Path\n\ntry:\n from PIL import PILLOW_VERSION\n from distutils.version import LooseVersion\n if LooseVersion(PILLOW_VERSION) >= \"3.4\":\n _has_pil = True\n else:\n _has_pil = False\n del PILLOW_VERSION\nexcept ImportError:\n _has_pil = False\n\n_log = logging.getLogger(__name__)\n\n_default_filetypes = {\n 'ps': 'Postscript',\n 'eps': 'Encapsulated Postscript',\n 'pdf': 'Portable Document Format',\n 'pgf': 'PGF code for LaTeX',\n 'png': 'Portable Network Graphics',\n 'raw': 'Raw RGBA bitmap',\n 'rgba': 'Raw RGBA bitmap',\n 'svg': 'Scalable Vector Graphics',\n 'svgz': 'Scalable Vector Graphics'\n}\n\n\n_default_backends = {\n 'ps': 'matplotlib.backends.backend_ps',\n 'eps': 'matplotlib.backends.backend_ps',\n 'pdf': 'matplotlib.backends.backend_pdf',\n 'pgf': 'matplotlib.backends.backend_pgf',\n 'png': 'matplotlib.backends.backend_agg',\n 'raw': 'matplotlib.backends.backend_agg',\n 'rgba': 'matplotlib.backends.backend_agg',\n 'svg': 'matplotlib.backends.backend_svg',\n 'svgz': 'matplotlib.backends.backend_svg',\n}\n\n\ndef register_backend(format, backend, description=None):\n \"\"\"\n Register a backend for saving to a given file format.\n\n Parameters\n ----------\n format : str\n File extension\n\n backend : module string or canvas class\n Backend for handling file output\n\n description : str, optional\n Description of the file type. Defaults to an empty string\n \"\"\"\n if description is None:\n description = ''\n _default_backends[format] = backend\n _default_filetypes[format] = description\n\n\ndef get_registered_canvas_class(format):\n \"\"\"\n Return the registered default canvas for given file format.\n Handles deferred import of required backend.\n \"\"\"\n if format not in _default_backends:\n return None\n backend_class = _default_backends[format]\n if isinstance(backend_class, str):\n backend_class = importlib.import_module(backend_class).FigureCanvas\n _default_backends[format] = backend_class\n return backend_class\n\n\nclass RendererBase(object):\n \"\"\"An abstract base class to handle drawing/rendering operations.\n\n The following methods must be implemented in the backend for full\n functionality (though just implementing :meth:`draw_path` alone would\n give a highly capable backend):\n\n * :meth:`draw_path`\n * :meth:`draw_image`\n * :meth:`draw_gouraud_triangle`\n\n The following methods *should* be implemented in the backend for\n optimization reasons:\n\n * :meth:`draw_text`\n * :meth:`draw_markers`\n * :meth:`draw_path_collection`\n * :meth:`draw_quad_mesh`\n \"\"\"\n\n def __init__(self):\n self._texmanager = None\n self._text2path = textpath.TextToPath()\n\n def open_group(self, s, gid=None):\n \"\"\"\n Open a grouping element with label *s* and *gid* (if set) as id.\n\n Only used by the SVG renderer.\n \"\"\"\n\n def close_group(self, s):\n \"\"\"\n Close a grouping element with label *s*\n\n Only used by the SVG renderer.\n \"\"\"\n\n def draw_path(self, gc, path, transform, rgbFace=None):\n \"\"\"\n Draws a :class:`~matplotlib.path.Path` instance using the\n given affine transform.\n \"\"\"\n raise NotImplementedError\n\n def draw_markers(self, gc, marker_path, marker_trans, path,\n trans, rgbFace=None):\n \"\"\"\n Draws a marker at each of the vertices in path. This includes\n all vertices, including control points on curves. To avoid\n that behavior, those vertices should be removed before calling\n this function.\n\n This provides a fallback implementation of draw_markers that\n makes multiple calls to :meth:`draw_path`. Some backends may\n want to override this method in order to draw the marker only\n once and reuse it multiple times.\n\n Parameters\n ----------\n gc : `GraphicsContextBase`\n The graphics context\n\n marker_trans : `matplotlib.transforms.Transform`\n An affine transform applied to the marker.\n\n trans : `matplotlib.transforms.Transform`\n An affine transform applied to the path.\n\n \"\"\"\n for vertices, codes in path.iter_segments(trans, simplify=False):\n if len(vertices):\n x, y = vertices[-2:]\n self.draw_path(gc, marker_path,\n marker_trans +\n transforms.Affine2D().translate(x, y),\n rgbFace)\n\n def draw_path_collection(self, gc, master_transform, paths, all_transforms,\n offsets, offsetTrans, facecolors, edgecolors,\n linewidths, linestyles, antialiaseds, urls,\n offset_position):\n \"\"\"\n Draws a collection of paths selecting drawing properties from\n the lists *facecolors*, *edgecolors*, *linewidths*,\n *linestyles* and *antialiaseds*. *offsets* is a list of\n offsets to apply to each of the paths. The offsets in\n *offsets* are first transformed by *offsetTrans* before being\n applied. *offset_position* may be either \"screen\" or \"data\"\n depending on the space that the offsets are in.\n\n This provides a fallback implementation of\n :meth:`draw_path_collection` that makes multiple calls to\n :meth:`draw_path`. Some backends may want to override this in\n order to render each set of path data only once, and then\n reference that path multiple times with the different offsets,\n colors, styles etc. The generator methods\n :meth:`_iter_collection_raw_paths` and\n :meth:`_iter_collection` are provided to help with (and\n standardize) the implementation across backends. It is highly\n recommended to use those generators, so that changes to the\n behavior of :meth:`draw_path_collection` can be made globally.\n \"\"\"\n path_ids = [\n (path, transforms.Affine2D(transform))\n for path, transform in self._iter_collection_raw_paths(\n master_transform, paths, all_transforms)]\n\n for xo, yo, path_id, gc0, rgbFace in self._iter_collection(\n gc, master_transform, all_transforms, path_ids, offsets,\n offsetTrans, facecolors, edgecolors, linewidths, linestyles,\n antialiaseds, urls, offset_position):\n path, transform = path_id\n transform = transforms.Affine2D(\n transform.get_matrix()).translate(xo, yo)\n self.draw_path(gc0, path, transform, rgbFace)\n\n def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,\n coordinates, offsets, offsetTrans, facecolors,\n antialiased, edgecolors):\n \"\"\"\n This provides a fallback implementation of\n :meth:`draw_quad_mesh` that generates paths and then calls\n :meth:`draw_path_collection`.\n \"\"\"\n\n from matplotlib.collections import QuadMesh\n paths = QuadMesh.convert_mesh_to_paths(\n meshWidth, meshHeight, coordinates)\n\n if edgecolors is None:\n edgecolors = facecolors\n linewidths = np.array([gc.get_linewidth()], float)\n\n return self.draw_path_collection(\n gc, master_transform, paths, [], offsets, offsetTrans, facecolors,\n edgecolors, linewidths, [], [antialiased], [None], 'screen')\n\n def draw_gouraud_triangle(self, gc, points, colors, transform):\n \"\"\"\n Draw a Gouraud-shaded triangle.\n\n Parameters\n ----------\n points : array_like, shape=(3, 2)\n Array of (x, y) points for the triangle.\n\n colors : array_like, shape=(3, 4)\n RGBA colors for each point of the triangle.\n\n transform : `matplotlib.transforms.Transform`\n An affine transform to apply to the points.\n\n \"\"\"\n raise NotImplementedError\n\n def draw_gouraud_triangles(self, gc, triangles_array, colors_array,\n transform):\n \"\"\"\n Draws a series of Gouraud triangles.\n\n Parameters\n ----------\n points : array_like, shape=(N, 3, 2)\n Array of *N* (x, y) points for the triangles.\n\n colors : array_like, shape=(N, 3, 4)\n Array of *N* RGBA colors for each point of the triangles.\n\n transform : `matplotlib.transforms.Transform`\n An affine transform to apply to the points.\n \"\"\"\n transform = transform.frozen()\n for tri, col in zip(triangles_array, colors_array):\n self.draw_gouraud_triangle(gc, tri, col, transform)\n\n def _iter_collection_raw_paths(self, master_transform, paths,\n all_transforms):\n \"\"\"\n This is a helper method (along with :meth:`_iter_collection`) to make\n it easier to write a space-efficient :meth:`draw_path_collection`\n implementation in a backend.\n\n This method yields all of the base path/transform\n combinations, given a master transform, a list of paths and\n list of transforms.\n\n The arguments should be exactly what is passed in to\n :meth:`draw_path_collection`.\n\n The backend should take each yielded path and transform and\n create an object that can be referenced (reused) later.\n \"\"\"\n Npaths = len(paths)\n Ntransforms = len(all_transforms)\n N = max(Npaths, Ntransforms)\n\n if Npaths == 0:\n return\n\n transform = transforms.IdentityTransform()\n for i in range(N):\n path = paths[i % Npaths]\n if Ntransforms:\n transform = Affine2D(all_transforms[i % Ntransforms])\n yield path, transform + master_transform\n\n def _iter_collection_uses_per_path(self, paths, all_transforms,\n offsets, facecolors, edgecolors):\n \"\"\"\n Compute how many times each raw path object returned by\n _iter_collection_raw_paths would be used when calling\n _iter_collection. This is intended for the backend to decide\n on the tradeoff between using the paths in-line and storing\n them once and reusing. Rounds up in case the number of uses\n is not the same for every path.\n \"\"\"\n Npaths = len(paths)\n if Npaths == 0 or len(facecolors) == len(edgecolors) == 0:\n return 0\n Npath_ids = max(Npaths, len(all_transforms))\n N = max(Npath_ids, len(offsets))\n return (N + Npath_ids - 1) // Npath_ids\n\n def _iter_collection(self, gc, master_transform, all_transforms,\n path_ids, offsets, offsetTrans, facecolors,\n edgecolors, linewidths, linestyles,\n antialiaseds, urls, offset_position):\n \"\"\"\n This is a helper method (along with\n :meth:`_iter_collection_raw_paths`) to make it easier to write\n a space-efficient :meth:`draw_path_collection` implementation in a\n backend.\n\n This method yields all of the path, offset and graphics\n context combinations to draw the path collection. The caller\n should already have looped over the results of\n :meth:`_iter_collection_raw_paths` to draw this collection.\n\n The arguments should be the same as that passed into\n :meth:`draw_path_collection`, with the exception of\n *path_ids*, which is a list of arbitrary objects that the\n backend will use to reference one of the paths created in the\n :meth:`_iter_collection_raw_paths` stage.\n\n Each yielded result is of the form::\n\n xo, yo, path_id, gc, rgbFace\n\n where *xo*, *yo* is an offset; *path_id* is one of the elements of\n *path_ids*; *gc* is a graphics context and *rgbFace* is a color to\n use for filling the path.\n \"\"\"\n Ntransforms = len(all_transforms)\n Npaths = len(path_ids)\n Noffsets = len(offsets)\n N = max(Npaths, Noffsets)\n Nfacecolors = len(facecolors)\n Nedgecolors = len(edgecolors)\n Nlinewidths = len(linewidths)\n Nlinestyles = len(linestyles)\n Naa = len(antialiaseds)\n Nurls = len(urls)\n\n if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:\n return\n if Noffsets:\n toffsets = offsetTrans.transform(offsets)\n\n gc0 = self.new_gc()\n gc0.copy_properties(gc)\n\n if Nfacecolors == 0:\n rgbFace = None\n\n if Nedgecolors == 0:\n gc0.set_linewidth(0.0)\n\n xo, yo = 0, 0\n for i in range(N):\n path_id = path_ids[i % Npaths]\n if Noffsets:\n xo, yo = toffsets[i % Noffsets]\n if offset_position == 'data':\n if Ntransforms:\n transform = (\n Affine2D(all_transforms[i % Ntransforms]) +\n master_transform)\n else:\n transform = master_transform\n xo, yo = transform.transform_point((xo, yo))\n xp, yp = transform.transform_point((0, 0))\n xo = -(xp - xo)\n yo = -(yp - yo)\n if not (np.isfinite(xo) and np.isfinite(yo)):\n continue\n if Nfacecolors:\n rgbFace = facecolors[i % Nfacecolors]\n if Nedgecolors:\n if Nlinewidths:\n gc0.set_linewidth(linewidths[i % Nlinewidths])\n if Nlinestyles:\n gc0.set_dashes(*linestyles[i % Nlinestyles])\n fg = edgecolors[i % Nedgecolors]\n if len(fg) == 4:\n if fg[3] == 0.0:\n gc0.set_linewidth(0)\n else:\n gc0.set_foreground(fg)\n else:\n gc0.set_foreground(fg)\n if rgbFace is not None and len(rgbFace) == 4:\n if rgbFace[3] == 0:\n rgbFace = None\n gc0.set_antialiased(antialiaseds[i % Naa])\n if Nurls:\n gc0.set_url(urls[i % Nurls])\n\n yield xo, yo, path_id, gc0, rgbFace\n gc0.restore()\n\n def get_image_magnification(self):\n \"\"\"\n Get the factor by which to magnify images passed to :meth:`draw_image`.\n Allows a backend to have images at a different resolution to other\n artists.\n \"\"\"\n return 1.0\n\n def draw_image(self, gc, x, y, im, transform=None):\n \"\"\"\n Draw an RGBA image.\n\n Parameters\n ----------\n gc : `GraphicsContextBase`\n a graphics context with clipping information.\n\n x : scalar\n the distance in physical units (i.e., dots or pixels) from the left\n hand side of the canvas.\n\n y : scalar\n the distance in physical units (i.e., dots or pixels) from the\n bottom side of the canvas.\n\n im : array_like, shape=(N, M, 4), dtype=np.uint8\n An array of RGBA pixels.\n\n transform : `matplotlib.transforms.Affine2DBase`\n If and only if the concrete backend is written such that\n :meth:`option_scale_image` returns ``True``, an affine\n transformation *may* be passed to :meth:`draw_image`. It takes the\n form of a :class:`~matplotlib.transforms.Affine2DBase` instance.\n The translation vector of the transformation is given in physical\n units (i.e., dots or pixels). Note that the transformation does not\n override `x` and `y`, and has to be applied *before* translating\n the result by `x` and `y` (this can be accomplished by adding `x`\n and `y` to the translation vector defined by `transform`).\n \"\"\"\n raise NotImplementedError\n\n def option_image_nocomposite(self):\n \"\"\"\n Return whether image composition by Matplotlib should be skipped.\n\n Raster backends should usually return False (letting the C-level\n rasterizer take care of image composition); vector backends should\n usually return ``not rcParams[\"image.composite_image\"]``.\n \"\"\"\n return False\n\n def option_scale_image(self):\n \"\"\"\n Return whether arbitrary affine transformations in :meth:`draw_image`\n are supported (True for most vector backends).\n \"\"\"\n return False\n\n def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):\n \"\"\"\n \"\"\"\n self._draw_text_as_path(gc, x, y, s, prop, angle, ismath=\"TeX\")\n\n def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n \"\"\"\n Draw the text instance.\n\n Parameters\n ----------\n gc : `GraphicsContextBase`\n The graphics context.\n x : scalar\n The x location of the text in display coords.\n y : scalar\n The y location of the text baseline in display coords.\n s : str\n The text string.\n prop : `matplotlib.font_manager.FontProperties`\n The font properties.\n angle : scalar\n The rotation angle in degrees.\n mtext : `matplotlib.text.Text`\n The original text object to be rendered.\n\n Notes\n -----\n **backend implementers note**\n\n When you are trying to determine if you have gotten your bounding box\n right (which is what enables the text layout/alignment to work\n properly), it helps to change the line in text.py::\n\n if 0: bbox_artist(self, renderer)\n\n to if 1, and then the actual bounding box will be plotted along with\n your text.\n \"\"\"\n\n self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)\n\n def _get_text_path_transform(self, x, y, s, prop, angle, ismath):\n \"\"\"\n Return the text path and transform.\n\n Parameters\n ----------\n prop : `matplotlib.font_manager.FontProperties`\n The font property.\n s : str\n The text to be converted.\n ismath : bool or \"TeX\"\n If True, use mathtext parser. If \"TeX\", use *usetex* mode.\n \"\"\"\n\n text2path = self._text2path\n fontsize = self.points_to_pixels(prop.get_size_in_points())\n verts, codes = text2path.get_text_path(prop, s, ismath=ismath)\n\n path = Path(verts, codes)\n angle = np.deg2rad(angle)\n if self.flipy():\n transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,\n fontsize / text2path.FONT_SCALE)\n transform = transform.rotate(angle).translate(x, self.height - y)\n else:\n transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,\n fontsize / text2path.FONT_SCALE)\n transform = transform.rotate(angle).translate(x, y)\n\n return path, transform\n\n def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):\n \"\"\"\n Draw the text by converting them to paths using textpath module.\n\n Parameters\n ----------\n prop : `matplotlib.font_manager.FontProperties`\n The font property.\n s : str\n The text to be converted.\n usetex : bool\n Whether to use matplotlib usetex mode.\n ismath : bool or \"TeX\"\n If True, use mathtext parser. If \"TeX\", use *usetex* mode.\n \"\"\"\n path, transform = self._get_text_path_transform(\n x, y, s, prop, angle, ismath)\n color = gc.get_rgb()\n gc.set_linewidth(0.0)\n self.draw_path(gc, path, transform, rgbFace=color)\n\n def get_text_width_height_descent(self, s, prop, ismath):\n \"\"\"\n Get the width, height, and descent (offset from the bottom\n to the baseline), in display coords, of the string *s* with\n :class:`~matplotlib.font_manager.FontProperties` *prop*\n \"\"\"\n if ismath == 'TeX':\n # todo: handle props\n texmanager = self._text2path.get_texmanager()\n fontsize = prop.get_size_in_points()\n w, h, d = texmanager.get_text_width_height_descent(\n s, fontsize, renderer=self)\n return w, h, d\n\n dpi = self.points_to_pixels(72)\n if ismath:\n dims = self._text2path.mathtext_parser.parse(s, dpi, prop)\n return dims[0:3] # return width, height, descent\n\n flags = self._text2path._get_hinting_flag()\n font = self._text2path._get_font(prop)\n size = prop.get_size_in_points()\n font.set_size(size, dpi)\n # the width and height of unrotated string\n font.set_text(s, 0.0, flags=flags)\n w, h = font.get_width_height()\n d = font.get_descent()\n w /= 64.0 # convert from subpixels\n h /= 64.0\n d /= 64.0\n return w, h, d\n\n def flipy(self):\n \"\"\"\n Return whether y values increase from top to bottom.\n\n Note that this only affects drawing of texts and images.\n \"\"\"\n return True\n\n def get_canvas_width_height(self):\n \"\"\"Return the canvas width and height in display coords.\"\"\"\n return 1, 1\n\n def get_texmanager(self):\n \"\"\"Return the `.TexManager` instance.\"\"\"\n if self._texmanager is None:\n from matplotlib.texmanager import TexManager\n self._texmanager = TexManager()\n return self._texmanager\n\n def new_gc(self):\n \"\"\"Return an instance of a `GraphicsContextBase`.\"\"\"\n return GraphicsContextBase()\n\n def points_to_pixels(self, points):\n \"\"\"\n Convert points to display units.\n\n You need to override this function (unless your backend\n doesn't have a dpi, e.g., postscript or svg). Some imaging\n systems assume some value for pixels per inch::\n\n points to pixels = points * pixels_per_inch/72.0 * dpi/72.0\n\n Parameters\n ----------\n points : scalar or array_like\n a float or a numpy array of float\n\n Returns\n -------\n Points converted to pixels\n \"\"\"\n return points\n\n @cbook.deprecated(\"3.1\", alternative=\"cbook.strip_math\")\n def strip_math(self, s):\n return cbook.strip_math(s)\n\n def start_rasterizing(self):\n \"\"\"\n Switch to the raster renderer.\n\n Used by `MixedModeRenderer`.\n \"\"\"\n\n def stop_rasterizing(self):\n \"\"\"\n Switch back to the vector renderer and draw the contents of the raster\n renderer as an image on the vector renderer.\n\n Used by `MixedModeRenderer`.\n \"\"\"\n\n def start_filter(self):\n \"\"\"\n Switch to a temporary renderer for image filtering effects.\n\n Currently only supported by the agg renderer.\n \"\"\"\n\n def stop_filter(self, filter_func):\n \"\"\"\n Switch back to the original renderer. The contents of the temporary\n renderer is processed with the *filter_func* and is drawn on the\n original renderer as an image.\n\n Currently only supported by the agg renderer.\n \"\"\"\n\n\nclass GraphicsContextBase(object):\n \"\"\"An abstract base class that provides color, line styles, etc.\"\"\"\n\n def __init__(self):\n self._alpha = 1.0\n self._forced_alpha = False # if True, _alpha overrides A from RGBA\n self._antialiased = 1 # use 0,1 not True, False for extension code\n self._capstyle = 'butt'\n self._cliprect = None\n self._clippath = None\n self._dashes = None, None\n self._joinstyle = 'round'\n self._linestyle = 'solid'\n self._linewidth = 1\n self._rgb = (0.0, 0.0, 0.0, 1.0)\n self._hatch = None\n self._hatch_color = colors.to_rgba(rcParams['hatch.color'])\n self._hatch_linewidth = rcParams['hatch.linewidth']\n self._url = None\n self._gid = None\n self._snap = None\n self._sketch = None\n\n def copy_properties(self, gc):\n 'Copy properties from gc to self'\n self._alpha = gc._alpha\n self._forced_alpha = gc._forced_alpha\n self._antialiased = gc._antialiased\n self._capstyle = gc._capstyle\n self._cliprect = gc._cliprect\n self._clippath = gc._clippath\n self._dashes = gc._dashes\n self._joinstyle = gc._joinstyle\n self._linestyle = gc._linestyle\n self._linewidth = gc._linewidth\n self._rgb = gc._rgb\n self._hatch = gc._hatch\n self._hatch_color = gc._hatch_color\n self._hatch_linewidth = gc._hatch_linewidth\n self._url = gc._url\n self._gid = gc._gid\n self._snap = gc._snap\n self._sketch = gc._sketch\n\n def restore(self):\n \"\"\"\n Restore the graphics context from the stack - needed only\n for backends that save graphics contexts on a stack.\n \"\"\"\n\n def get_alpha(self):\n \"\"\"\n Return the alpha value used for blending - not supported on\n all backends.\n \"\"\"\n return self._alpha\n\n def get_antialiased(self):\n \"Return whether the object should try to do antialiased rendering.\"\n return self._antialiased\n\n def get_capstyle(self):\n \"\"\"\n Return the capstyle as a string in ('butt', 'round', 'projecting').\n \"\"\"\n return self._capstyle\n\n def get_clip_rectangle(self):\n \"\"\"\n Return the clip rectangle as a `~matplotlib.transforms.Bbox` instance.\n \"\"\"\n return self._cliprect\n\n def get_clip_path(self):\n \"\"\"\n Return the clip path in the form (path, transform), where path\n is a :class:`~matplotlib.path.Path` instance, and transform is\n an affine transform to apply to the path before clipping.\n \"\"\"\n if self._clippath is not None:\n return self._clippath.get_transformed_path_and_affine()\n return None, None\n\n def get_dashes(self):\n \"\"\"\n Return the dash information as an offset dashlist tuple.\n\n The dash list is a even size list that gives the ink on, ink\n off in pixels.\n\n See p107 of to PostScript `BLUEBOOK\n <https://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_\n for more info.\n\n Default value is None\n \"\"\"\n return self._dashes\n\n def get_forced_alpha(self):\n \"\"\"\n Return whether the value given by get_alpha() should be used to\n override any other alpha-channel values.\n \"\"\"\n return self._forced_alpha\n\n def get_joinstyle(self):\n \"\"\"Return the line join style as one of ('miter', 'round', 'bevel').\"\"\"\n return self._joinstyle\n\n def get_linewidth(self):\n \"\"\"Return the line width in points.\"\"\"\n return self._linewidth\n\n def get_rgb(self):\n \"\"\"Return a tuple of three or four floats from 0-1.\"\"\"\n return self._rgb\n\n def get_url(self):\n \"\"\"Return a url if one is set, None otherwise.\"\"\"\n return self._url\n\n def get_gid(self):\n \"\"\"Return the object identifier if one is set, None otherwise.\"\"\"\n return self._gid\n\n def get_snap(self):\n \"\"\"\n Returns the snap setting, which can be:\n\n * True: snap vertices to the nearest pixel center\n * False: leave vertices as-is\n * None: (auto) If the path contains only rectilinear line segments,\n round to the nearest pixel center\n \"\"\"\n return self._snap\n\n def set_alpha(self, alpha):\n \"\"\"\n Set the alpha value used for blending - not supported on all backends.\n\n If ``alpha=None`` (the default), the alpha components of the\n foreground and fill colors will be used to set their respective\n transparencies (where applicable); otherwise, ``alpha`` will override\n them.\n \"\"\"\n if alpha is not None:\n self._alpha = alpha\n self._forced_alpha = True\n else:\n self._alpha = 1.0\n self._forced_alpha = False\n self.set_foreground(self._rgb, isRGBA=True)\n\n def set_antialiased(self, b):\n \"\"\"Set whether object should be drawn with antialiased rendering.\"\"\"\n # Use ints to make life easier on extension code trying to read the gc.\n self._antialiased = int(bool(b))\n\n def set_capstyle(self, cs):\n \"\"\"Set the capstyle to be one of ('butt', 'round', 'projecting').\"\"\"\n if cs in ('butt', 'round', 'projecting'):\n self._capstyle = cs\n else:\n raise ValueError('Unrecognized cap style. Found %s' % cs)\n\n def set_clip_rectangle(self, rectangle):\n \"\"\"\n Set the clip rectangle with sequence (left, bottom, width, height)\n \"\"\"\n self._cliprect = rectangle\n\n def set_clip_path(self, path):\n \"\"\"\n Set the clip path and transformation. Path should be a\n :class:`~matplotlib.transforms.TransformedPath` instance.\n \"\"\"\n if (path is not None\n and not isinstance(path, transforms.TransformedPath)):\n raise ValueError(\"Path should be a \"\n \"matplotlib.transforms.TransformedPath instance\")\n self._clippath = path\n\n def set_dashes(self, dash_offset, dash_list):\n \"\"\"\n Set the dash style for the gc.\n\n Parameters\n ----------\n dash_offset : float\n is the offset (usually 0).\n\n dash_list : array_like\n specifies the on-off sequence as points.\n ``(None, None)`` specifies a solid line\n\n \"\"\"\n if dash_list is not None:\n dl = np.asarray(dash_list)\n if np.any(dl < 0.0):\n raise ValueError(\n \"All values in the dash list must be positive\")\n self._dashes = dash_offset, dash_list\n\n def set_foreground(self, fg, isRGBA=False):\n \"\"\"\n Set the foreground color.\n\n Parameters\n ----------\n fg : color\n isRGBA : bool\n If *fg* is known to be an ``(r, g, b, a)`` tuple, *isRGBA* can be\n set to True to improve performance.\n \"\"\"\n if self._forced_alpha and isRGBA:\n self._rgb = fg[:3] + (self._alpha,)\n elif self._forced_alpha:\n self._rgb = colors.to_rgba(fg, self._alpha)\n elif isRGBA:\n self._rgb = fg\n else:\n self._rgb = colors.to_rgba(fg)\n\n def set_joinstyle(self, js):\n \"\"\"Set the join style to be one of ('miter', 'round', 'bevel').\"\"\"\n if js in ('miter', 'round', 'bevel'):\n self._joinstyle = js\n else:\n raise ValueError('Unrecognized join style. Found %s' % js)\n\n def set_linewidth(self, w):\n \"\"\"Set the linewidth in points.\"\"\"\n self._linewidth = float(w)\n\n def set_url(self, url):\n \"\"\"Set the url for links in compatible backends.\"\"\"\n self._url = url\n\n def set_gid(self, id):\n \"\"\"Set the id.\"\"\"\n self._gid = id\n\n def set_snap(self, snap):\n \"\"\"\n Set the snap setting which may be:\n\n * True: snap vertices to the nearest pixel center\n * False: leave vertices as-is\n * None: (auto) If the path contains only rectilinear line segments,\n round to the nearest pixel center\n \"\"\"\n self._snap = snap\n\n def set_hatch(self, hatch):\n \"\"\"Set the hatch style (for fills).\"\"\"\n self._hatch = hatch\n\n def get_hatch(self):\n \"\"\"Get the current hatch style.\"\"\"\n return self._hatch\n\n def get_hatch_path(self, density=6.0):\n \"\"\"Return a `Path` for the current hatch.\"\"\"\n hatch = self.get_hatch()\n if hatch is None:\n return None\n return Path.hatch(hatch, density)\n\n def get_hatch_color(self):\n \"\"\"Get the hatch color.\"\"\"\n return self._hatch_color\n\n def set_hatch_color(self, hatch_color):\n \"\"\"Set the hatch color.\"\"\"\n self._hatch_color = hatch_color\n\n def get_hatch_linewidth(self):\n \"\"\"Get the hatch linewidth.\"\"\"\n return self._hatch_linewidth\n\n def get_sketch_params(self):\n \"\"\"\n Return the sketch parameters for the artist.\n\n Returns\n -------\n sketch_params : tuple or `None`\n\n A 3-tuple with the following elements:\n\n * `scale`: The amplitude of the wiggle perpendicular to the\n source line.\n * `length`: The length of the wiggle along the line.\n * `randomness`: The scale factor by which the length is\n shrunken or expanded.\n\n May return `None` if no sketch parameters were set.\n \"\"\"\n return self._sketch\n\n def set_sketch_params(self, scale=None, length=None, randomness=None):\n \"\"\"\n Set the sketch parameters.\n\n Parameters\n ----------\n scale : float, optional\n The amplitude of the wiggle perpendicular to the source line, in\n pixels. If scale is `None`, or not provided, no sketch filter will\n be provided.\n length : float, optional\n The length of the wiggle along the line, in pixels (default 128).\n randomness : float, optional\n The scale factor by which the length is shrunken or expanded\n (default 16).\n \"\"\"\n self._sketch = (\n None if scale is None\n else (scale, length or 128., randomness or 16.))\n\n\nclass TimerBase(object):\n \"\"\"\n A base class for providing timer events, useful for things animations.\n Backends need to implement a few specific methods in order to use their\n own timing mechanisms so that the timer events are integrated into their\n event loops.\n\n Mandatory functions that must be implemented:\n\n * `_timer_start`: Contains backend-specific code for starting\n the timer\n\n * `_timer_stop`: Contains backend-specific code for stopping\n the timer\n\n Optional overrides:\n\n * `_timer_set_single_shot`: Code for setting the timer to\n single shot operating mode, if supported by the timer\n object. If not, the `Timer` class itself will store the flag\n and the `_on_timer` method should be overridden to support\n such behavior.\n\n * `_timer_set_interval`: Code for setting the interval on the\n timer, if there is a method for doing so on the timer\n object.\n\n * `_on_timer`: This is the internal function that any timer\n object should call, which will handle the task of running\n all callbacks that have been set.\n\n Attributes\n ----------\n interval : scalar\n The time between timer events in milliseconds. Default is 1000 ms.\n\n single_shot : bool\n Boolean flag indicating whether this timer should operate as single\n shot (run once and then stop). Defaults to `False`.\n\n callbacks : List[Tuple[callable, Tuple, Dict]]\n Stores list of (func, args, kwargs) tuples that will be called upon\n timer events. This list can be manipulated directly, or the\n functions `add_callback` and `remove_callback` can be used.\n \"\"\"\n def __init__(self, interval=None, callbacks=None):\n #Initialize empty callbacks list and setup default settings if necssary\n if callbacks is None:\n self.callbacks = []\n else:\n self.callbacks = callbacks[:] # Create a copy\n\n if interval is None:\n self._interval = 1000\n else:\n self._interval = interval\n\n self._single = False\n\n # Default attribute for holding the GUI-specific timer object\n self._timer = None\n\n def __del__(self):\n \"\"\"Need to stop timer and possibly disconnect timer.\"\"\"\n self._timer_stop()\n\n def start(self, interval=None):\n \"\"\"\n Start the timer object.\n\n Parameters\n ----------\n interval : int, optional\n Timer interval in milliseconds; overrides a previously set interval\n if provided.\n \"\"\"\n if interval is not None:\n self._set_interval(interval)\n self._timer_start()\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._timer_stop()\n\n def _timer_start(self):\n pass\n\n def _timer_stop(self):\n pass\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, interval):\n # Force to int since none of the backends actually support fractional\n # milliseconds, and some error or give warnings.\n interval = int(interval)\n self._interval = interval\n self._timer_set_interval()\n\n @property\n def single_shot(self):\n return self._single\n\n @single_shot.setter\n def single_shot(self, ss):\n self._single = ss\n self._timer_set_single_shot()\n\n def add_callback(self, func, *args, **kwargs):\n \"\"\"\n Register *func* to be called by timer when the event fires. Any\n additional arguments provided will be passed to *func*.\n\n This function returns *func*, which makes it possible to use it as a\n decorator.\n \"\"\"\n self.callbacks.append((func, args, kwargs))\n return func\n\n def remove_callback(self, func, *args, **kwargs):\n \"\"\"\n Remove *func* from list of callbacks.\n\n *args* and *kwargs* are optional and used to distinguish between copies\n of the same function registered to be called with different arguments.\n This behavior is deprecated. In the future, `*args, **kwargs` won't be\n considered anymore; to keep a specific callback removable by itself,\n pass it to `add_callback` as a `functools.partial` object.\n \"\"\"\n if args or kwargs:\n cbook.warn_deprecated(\n \"3.1\", \"In a future version, Timer.remove_callback will not \"\n \"take *args, **kwargs anymore, but remove all callbacks where \"\n \"the callable matches; to keep a specific callback removable \"\n \"by itself, pass it to add_callback as a functools.partial \"\n \"object.\")\n self.callbacks.remove((func, args, kwargs))\n else:\n funcs = [c[0] for c in self.callbacks]\n if func in funcs:\n self.callbacks.pop(funcs.index(func))\n\n def _timer_set_interval(self):\n \"\"\"Used to set interval on underlying timer object.\"\"\"\n\n def _timer_set_single_shot(self):\n \"\"\"Used to set single shot on underlying timer object.\"\"\"\n\n def _on_timer(self):\n \"\"\"\n Runs all function that have been registered as callbacks. Functions\n can return False (or 0) if they should not be called any more. If there\n are no callbacks, the timer is automatically stopped.\n \"\"\"\n for func, args, kwargs in self.callbacks:\n ret = func(*args, **kwargs)\n # docstring above explains why we use `if ret == 0` here,\n # instead of `if not ret`.\n # This will also catch `ret == False` as `False == 0`\n # but does not annoy the linters\n # https://docs.python.org/3/library/stdtypes.html#boolean-values\n if ret == 0:\n self.callbacks.remove((func, args, kwargs))\n\n if len(self.callbacks) == 0:\n self.stop()\n\n\nclass Event(object):\n \"\"\"\n A matplotlib event. Attach additional attributes as defined in\n :meth:`FigureCanvasBase.mpl_connect`. The following attributes\n are defined and shown with their default values\n\n Attributes\n ----------\n name : str\n the event name\n\n canvas : `FigureCanvasBase`\n the backend-specific canvas instance generating the event\n\n guiEvent\n the GUI event that triggered the matplotlib event\n\n \"\"\"\n def __init__(self, name, canvas, guiEvent=None):\n self.name = name\n self.canvas = canvas\n self.guiEvent = guiEvent\n\n\nclass DrawEvent(Event):\n \"\"\"\n An event triggered by a draw operation on the canvas\n\n In most backends callbacks subscribed to this callback will be\n fired after the rendering is complete but before the screen is\n updated. Any extra artists drawn to the canvas's renderer will\n be reflected without an explicit call to ``blit``.\n\n .. warning ::\n\n Calling ``canvas.draw`` and ``canvas.blit`` in these callbacks may\n not be safe with all backends and may cause infinite recursion.\n\n In addition to the :class:`Event` attributes, the following event\n attributes are defined:\n\n Attributes\n ----------\n renderer : `RendererBase`\n the renderer for the draw event\n\n \"\"\"\n def __init__(self, name, canvas, renderer):\n Event.__init__(self, name, canvas)\n self.renderer = renderer\n\n\nclass ResizeEvent(Event):\n \"\"\"\n An event triggered by a canvas resize\n\n In addition to the :class:`Event` attributes, the following event\n attributes are defined:\n\n Attributes\n ----------\n width : scalar\n width of the canvas in pixels\n\n height : scalar\n height of the canvas in pixels\n\n \"\"\"\n def __init__(self, name, canvas):\n Event.__init__(self, name, canvas)\n self.width, self.height = canvas.get_width_height()\n\n\nclass CloseEvent(Event):\n \"\"\"An event triggered by a figure being closed.\"\"\"\n\n\nclass LocationEvent(Event):\n \"\"\"\n An event that has a screen location.\n\n The following additional attributes are defined and shown with\n their default values.\n\n In addition to the :class:`Event` attributes, the following\n event attributes are defined:\n\n Attributes\n ----------\n x : scalar\n x position - pixels from left of canvas\n\n y : scalar\n y position - pixels from bottom of canvas\n\n inaxes : bool\n the :class:`~matplotlib.axes.Axes` instance if mouse is over axes\n\n xdata : scalar\n x coord of mouse in data coords\n\n ydata : scalar\n y coord of mouse in data coords\n \"\"\"\n\n lastevent = None # the last event that was triggered before this one\n\n def __init__(self, name, canvas, x, y, guiEvent=None):\n \"\"\"\n *x*, *y* in figure coords, 0,0 = bottom, left\n \"\"\"\n Event.__init__(self, name, canvas, guiEvent=guiEvent)\n # x position - pixels from left of canvas\n self.x = int(x) if x is not None else x\n # y position - pixels from right of canvas\n self.y = int(y) if y is not None else y\n self.inaxes = None # the Axes instance if mouse us over axes\n self.xdata = None # x coord of mouse in data coords\n self.ydata = None # y coord of mouse in data coords\n\n if x is None or y is None:\n # cannot check if event was in axes if no x,y info\n self._update_enter_leave()\n return\n\n if self.canvas.mouse_grabber is None:\n self.inaxes = self.canvas.inaxes((x, y))\n else:\n self.inaxes = self.canvas.mouse_grabber\n\n if self.inaxes is not None:\n try:\n trans = self.inaxes.transData.inverted()\n xdata, ydata = trans.transform_point((x, y))\n except ValueError:\n pass\n else:\n self.xdata = xdata\n self.ydata = ydata\n\n self._update_enter_leave()\n\n def _update_enter_leave(self):\n 'process the figure/axes enter leave events'\n if LocationEvent.lastevent is not None:\n last = LocationEvent.lastevent\n if last.inaxes != self.inaxes:\n # process axes enter/leave events\n try:\n if last.inaxes is not None:\n last.canvas.callbacks.process('axes_leave_event', last)\n except Exception:\n pass\n # See ticket 2901582.\n # I think this is a valid exception to the rule\n # against catching all exceptions; if anything goes\n # wrong, we simply want to move on and process the\n # current event.\n if self.inaxes is not None:\n self.canvas.callbacks.process('axes_enter_event', self)\n\n else:\n # process a figure enter event\n if self.inaxes is not None:\n self.canvas.callbacks.process('axes_enter_event', self)\n\n LocationEvent.lastevent = self\n\n\nclass MouseButton(IntEnum):\n LEFT = 1\n MIDDLE = 2\n RIGHT = 3\n BACK = 8\n FORWARD = 9\n\n\nclass MouseEvent(LocationEvent):\n \"\"\"\n A mouse event ('button_press_event',\n 'button_release_event',\n 'scroll_event',\n 'motion_notify_event').\n\n In addition to the :class:`Event` and :class:`LocationEvent`\n attributes, the following attributes are defined:\n\n Attributes\n ----------\n button : {None, MouseButton.LEFT, MouseButton.MIDDLE, MouseButton.RIGHT, \\\n'up', 'down'}\n The button pressed. 'up' and 'down' are used for scroll events.\n Note that in the nbagg backend, both the middle and right clicks\n return RIGHT since right clicking will bring up the context menu in\n some browsers.\n Note that LEFT and RIGHT actually refer to the \"primary\" and\n \"secondary\" buttons, i.e. if the user inverts their left and right\n buttons (\"left-handed setting\") then the LEFT button will be the one\n physically on the right.\n\n key : None or str\n The key pressed when the mouse event triggered, e.g. 'shift'.\n See `KeyEvent`.\n\n step : scalar\n The number of scroll steps (positive for 'up', negative for 'down').\n This applies only to 'scroll_event' and defaults to 0 otherwise.\n\n dblclick : bool\n Whether the event is a double-click. This applies only to\n 'button_press_event' and is False otherwise. In particular, it's\n not used in 'button_release_event'.\n\n Examples\n --------\n Usage::\n\n def on_press(event):\n print('you pressed', event.button, event.xdata, event.ydata)\n\n cid = fig.canvas.mpl_connect('button_press_event', on_press)\n \"\"\"\n\n def __init__(self, name, canvas, x, y, button=None, key=None,\n step=0, dblclick=False, guiEvent=None):\n \"\"\"\n x, y in figure coords, 0,0 = bottom, left\n button pressed None, 1, 2, 3, 'up', 'down'\n \"\"\"\n LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)\n if button in MouseButton.__members__.values():\n button = MouseButton(button)\n self.button = button\n self.key = key\n self.step = step\n self.dblclick = dblclick\n\n def __str__(self):\n return (f\"{self.name}: \"\n f\"xy=({self.x}, {self.y}) xydata=({self.xdata}, {self.ydata}) \"\n f\"button={self.button} dblclick={self.dblclick} \"\n f\"inaxes={self.inaxes}\")\n\n\nclass PickEvent(Event):\n \"\"\"\n a pick event, fired when the user picks a location on the canvas\n sufficiently close to an artist.\n\n Attrs: all the :class:`Event` attributes plus\n\n Attributes\n ----------\n mouseevent : `MouseEvent`\n the mouse event that generated the pick\n\n artist : `matplotlib.artist.Artist`\n the picked artist\n\n other\n extra class dependent attrs -- e.g., a\n :class:`~matplotlib.lines.Line2D` pick may define different\n extra attributes than a\n :class:`~matplotlib.collections.PatchCollection` pick event\n\n Examples\n --------\n Usage::\n\n ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance\n\n def on_pick(event):\n line = event.artist\n xdata, ydata = line.get_data()\n ind = event.ind\n print('on pick line:', np.array([xdata[ind], ydata[ind]]).T)\n\n cid = fig.canvas.mpl_connect('pick_event', on_pick)\n\n \"\"\"\n def __init__(self, name, canvas, mouseevent, artist,\n guiEvent=None, **kwargs):\n Event.__init__(self, name, canvas, guiEvent)\n self.mouseevent = mouseevent\n self.artist = artist\n self.__dict__.update(kwargs)\n\n\nclass KeyEvent(LocationEvent):\n \"\"\"\n A key event (key press, key release).\n\n Attach additional attributes as defined in\n :meth:`FigureCanvasBase.mpl_connect`.\n\n In addition to the :class:`Event` and :class:`LocationEvent`\n attributes, the following attributes are defined:\n\n Attributes\n ----------\n key : None or str\n the key(s) pressed. Could be **None**, a single case sensitive ascii\n character (\"g\", \"G\", \"#\", etc.), a special key\n (\"control\", \"shift\", \"f1\", \"up\", etc.) or a\n combination of the above (e.g., \"ctrl+alt+g\", \"ctrl+alt+G\").\n\n Notes\n -----\n Modifier keys will be prefixed to the pressed key and will be in the order\n \"ctrl\", \"alt\", \"super\". The exception to this rule is when the pressed key\n is itself a modifier key, therefore \"ctrl+alt\" and \"alt+control\" can both\n be valid key values.\n\n Examples\n --------\n Usage::\n\n def on_key(event):\n print('you pressed', event.key, event.xdata, event.ydata)\n\n cid = fig.canvas.mpl_connect('key_press_event', on_key)\n\n \"\"\"\n def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):\n LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)\n self.key = key\n\n\nclass FigureCanvasBase(object):\n \"\"\"\n The canvas the figure renders into.\n\n Public attributes\n\n Attributes\n ----------\n figure : `matplotlib.figure.Figure`\n A high-level figure instance\n\n \"\"\"\n events = [\n 'resize_event',\n 'draw_event',\n 'key_press_event',\n 'key_release_event',\n 'button_press_event',\n 'button_release_event',\n 'scroll_event',\n 'motion_notify_event',\n 'pick_event',\n 'idle_event',\n 'figure_enter_event',\n 'figure_leave_event',\n 'axes_enter_event',\n 'axes_leave_event',\n 'close_event'\n ]\n\n supports_blit = True\n fixed_dpi = None\n\n filetypes = _default_filetypes\n if _has_pil:\n # JPEG support\n register_backend('jpg', 'matplotlib.backends.backend_agg',\n 'Joint Photographic Experts Group')\n register_backend('jpeg', 'matplotlib.backends.backend_agg',\n 'Joint Photographic Experts Group')\n # TIFF support\n register_backend('tif', 'matplotlib.backends.backend_agg',\n 'Tagged Image File Format')\n register_backend('tiff', 'matplotlib.backends.backend_agg',\n 'Tagged Image File Format')\n\n def __init__(self, figure):\n self._fix_ipython_backend2gui()\n self._is_idle_drawing = True\n self._is_saving = False\n figure.set_canvas(self)\n self.figure = figure\n # a dictionary from event name to a dictionary that maps cid->func\n self.callbacks = cbook.CallbackRegistry()\n self.widgetlock = widgets.LockDraw()\n self._button = None # the button pressed\n self._key = None # the key pressed\n self._lastx, self._lasty = None, None\n self.button_pick_id = self.mpl_connect('button_press_event', self.pick)\n self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)\n self.mouse_grabber = None # the axes currently grabbing mouse\n self.toolbar = None # NavigationToolbar2 will set me\n self._is_idle_drawing = False\n\n @classmethod\n @functools.lru_cache()\n def _fix_ipython_backend2gui(cls):\n # Fix hard-coded module -> toolkit mapping in IPython (used for\n # `ipython --auto`). This cannot be done at import time due to\n # ordering issues, so we do it when creating a canvas, and should only\n # be done once per class (hence the `lru_cache(1)`).\n if \"IPython\" not in sys.modules:\n return\n import IPython\n ip = IPython.get_ipython()\n if not ip:\n return\n from IPython.core import pylabtools as pt\n if (not hasattr(pt, \"backend2gui\")\n or not hasattr(ip, \"enable_matplotlib\")):\n # In case we ever move the patch to IPython and remove these APIs,\n # don't break on our side.\n return\n backend_mod = sys.modules[cls.__module__]\n rif = getattr(backend_mod, \"required_interactive_framework\", None)\n backend2gui_rif = {\"qt5\": \"qt\", \"qt4\": \"qt\", \"gtk3\": \"gtk3\",\n \"wx\": \"wx\", \"macosx\": \"osx\"}.get(rif)\n if backend2gui_rif:\n pt.backend2gui[get_backend()] = backend2gui_rif\n # Work around pylabtools.find_gui_and_backend always reading from\n # rcParamsOrig.\n orig_origbackend = mpl.rcParamsOrig[\"backend\"]\n try:\n mpl.rcParamsOrig[\"backend\"] = mpl.rcParams[\"backend\"]\n ip.enable_matplotlib()\n finally:\n mpl.rcParamsOrig[\"backend\"] = orig_origbackend\n\n @contextmanager\n def _idle_draw_cntx(self):\n self._is_idle_drawing = True\n yield\n self._is_idle_drawing = False\n\n def is_saving(self):\n \"\"\"\n Returns whether the renderer is in the process of saving\n to a file, rather than rendering for an on-screen buffer.\n \"\"\"\n return self._is_saving\n\n def pick(self, mouseevent):\n if not self.widgetlock.locked():\n self.figure.pick(mouseevent)\n\n def blit(self, bbox=None):\n \"\"\"Blit the canvas in bbox (default entire canvas).\"\"\"\n\n def resize(self, w, h):\n \"\"\"Set the canvas size in pixels.\"\"\"\n\n def draw_event(self, renderer):\n \"\"\"Pass a `DrawEvent` to all functions connected to ``draw_event``.\"\"\"\n s = 'draw_event'\n event = DrawEvent(s, self, renderer)\n self.callbacks.process(s, event)\n\n def resize_event(self):\n \"\"\"Pass a `ResizeEvent` to all functions connected to ``resize_event``.\n \"\"\"\n s = 'resize_event'\n event = ResizeEvent(s, self)\n self.callbacks.process(s, event)\n self.draw_idle()\n\n def close_event(self, guiEvent=None):\n \"\"\"Pass a `CloseEvent` to all functions connected to ``close_event``.\n \"\"\"\n s = 'close_event'\n try:\n event = CloseEvent(s, self, guiEvent=guiEvent)\n self.callbacks.process(s, event)\n except (TypeError, AttributeError):\n pass\n # Suppress the TypeError when the python session is being killed.\n # It may be that a better solution would be a mechanism to\n # disconnect all callbacks upon shutdown.\n # AttributeError occurs on OSX with qt4agg upon exiting\n # with an open window; 'callbacks' attribute no longer exists.\n\n def key_press_event(self, key, guiEvent=None):\n \"\"\"Pass a `KeyEvent` to all functions connected to ``key_press_event``.\n \"\"\"\n self._key = key\n s = 'key_press_event'\n event = KeyEvent(\n s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)\n self.callbacks.process(s, event)\n\n def key_release_event(self, key, guiEvent=None):\n \"\"\"\n Pass a `KeyEvent` to all functions connected to ``key_release_event``.\n \"\"\"\n s = 'key_release_event'\n event = KeyEvent(\n s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)\n self.callbacks.process(s, event)\n self._key = None\n\n def pick_event(self, mouseevent, artist, **kwargs):\n \"\"\"\n This method will be called by artists who are picked and will\n fire off :class:`PickEvent` callbacks registered listeners\n \"\"\"\n s = 'pick_event'\n event = PickEvent(s, self, mouseevent, artist,\n guiEvent=mouseevent.guiEvent,\n **kwargs)\n self.callbacks.process(s, event)\n\n def scroll_event(self, x, y, step, guiEvent=None):\n \"\"\"\n Backend derived classes should call this function on any\n scroll wheel event. x,y are the canvas coords: 0,0 is lower,\n left. button and key are as defined in MouseEvent.\n\n This method will be call all functions connected to the\n 'scroll_event' with a :class:`MouseEvent` instance.\n \"\"\"\n if step >= 0:\n self._button = 'up'\n else:\n self._button = 'down'\n s = 'scroll_event'\n mouseevent = MouseEvent(s, self, x, y, self._button, self._key,\n step=step, guiEvent=guiEvent)\n self.callbacks.process(s, mouseevent)\n\n def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):\n \"\"\"\n Backend derived classes should call this function on any mouse\n button press. x,y are the canvas coords: 0,0 is lower, left.\n button and key are as defined in :class:`MouseEvent`.\n\n This method will be call all functions connected to the\n 'button_press_event' with a :class:`MouseEvent` instance.\n \"\"\"\n self._button = button\n s = 'button_press_event'\n mouseevent = MouseEvent(s, self, x, y, button, self._key,\n dblclick=dblclick, guiEvent=guiEvent)\n self.callbacks.process(s, mouseevent)\n\n def button_release_event(self, x, y, button, guiEvent=None):\n \"\"\"\n Backend derived classes should call this function on any mouse\n button release.\n\n This method will call all functions connected to the\n 'button_release_event' with a :class:`MouseEvent` instance.\n\n Parameters\n ----------\n x : scalar\n the canvas coordinates where 0=left\n\n y : scalar\n the canvas coordinates where 0=bottom\n\n guiEvent\n the native UI event that generated the mpl event\n\n \"\"\"\n s = 'button_release_event'\n event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)\n self.callbacks.process(s, event)\n self._button = None\n\n def motion_notify_event(self, x, y, guiEvent=None):\n \"\"\"\n Backend derived classes should call this function on any\n motion-notify-event.\n\n This method will call all functions connected to the\n 'motion_notify_event' with a :class:`MouseEvent` instance.\n\n Parameters\n ----------\n x : scalar\n the canvas coordinates where 0=left\n\n y : scalar\n the canvas coordinates where 0=bottom\n\n guiEvent\n the native UI event that generated the mpl event\n\n \"\"\"\n self._lastx, self._lasty = x, y\n s = 'motion_notify_event'\n event = MouseEvent(s, self, x, y, self._button, self._key,\n guiEvent=guiEvent)\n self.callbacks.process(s, event)\n\n def leave_notify_event(self, guiEvent=None):\n \"\"\"\n Backend derived classes should call this function when leaving\n canvas\n\n Parameters\n ----------\n guiEvent\n the native UI event that generated the mpl event\n\n \"\"\"\n\n self.callbacks.process('figure_leave_event', LocationEvent.lastevent)\n LocationEvent.lastevent = None\n self._lastx, self._lasty = None, None\n\n def enter_notify_event(self, guiEvent=None, xy=None):\n \"\"\"\n Backend derived classes should call this function when entering\n canvas\n\n Parameters\n ----------\n guiEvent\n the native UI event that generated the mpl event\n xy : (float, float)\n the coordinate location of the pointer when the canvas is\n entered\n\n \"\"\"\n if xy is not None:\n x, y = xy\n self._lastx, self._lasty = x, y\n else:\n x = None\n y = None\n cbook.warn_deprecated(\n '3.0', message='enter_notify_event expects a location but '\n 'your backend did not pass one.')\n\n event = LocationEvent('figure_enter_event', self, x, y, guiEvent)\n self.callbacks.process('figure_enter_event', event)\n\n def inaxes(self, xy):\n \"\"\"\n Check if a point is in an axes.\n\n Parameters\n ----------\n xy : tuple or list\n (x,y) coordinates.\n x position - pixels from left of canvas.\n y position - pixels from bottom of canvas.\n\n Returns\n -------\n axes: topmost axes containing the point, or None if no axes.\n\n \"\"\"\n axes_list = [a for a in self.figure.get_axes()\n if a.patch.contains_point(xy)]\n\n if axes_list:\n axes = cbook._topmost_artist(axes_list)\n else:\n axes = None\n\n return axes\n\n def grab_mouse(self, ax):\n \"\"\"\n Set the child axes which are currently grabbing the mouse events.\n Usually called by the widgets themselves.\n It is an error to call this if the mouse is already grabbed by\n another axes.\n \"\"\"\n if self.mouse_grabber not in (None, ax):\n raise RuntimeError(\"Another Axes already grabs mouse input\")\n self.mouse_grabber = ax\n\n def release_mouse(self, ax):\n \"\"\"\n Release the mouse grab held by the axes, ax.\n Usually called by the widgets.\n It is ok to call this even if you ax doesn't have the mouse\n grab currently.\n \"\"\"\n if self.mouse_grabber is ax:\n self.mouse_grabber = None\n\n def draw(self, *args, **kwargs):\n \"\"\"Render the :class:`~matplotlib.figure.Figure`.\"\"\"\n\n def draw_idle(self, *args, **kwargs):\n \"\"\"\n Request a widget redraw once control returns to the GUI event loop.\n\n Even if multiple calls to `draw_idle` occur before control returns\n to the GUI event loop, the figure will only be rendered once.\n\n Notes\n -----\n Backends may choose to override the method and implement their own\n strategy to prevent multiple renderings.\n\n \"\"\"\n if not self._is_idle_drawing:\n with self._idle_draw_cntx():\n self.draw(*args, **kwargs)\n\n def draw_cursor(self, event):\n \"\"\"\n Draw a cursor in the event.axes if inaxes is not None. Use\n native GUI drawing for efficiency if possible\n \"\"\"\n\n def get_width_height(self):\n \"\"\"\n Return the figure width and height in points or pixels\n (depending on the backend), truncated to integers\n \"\"\"\n return int(self.figure.bbox.width), int(self.figure.bbox.height)\n\n @classmethod\n def get_supported_filetypes(cls):\n \"\"\"Return dict of savefig file formats supported by this backend\"\"\"\n return cls.filetypes\n\n @classmethod\n def get_supported_filetypes_grouped(cls):\n \"\"\"Return a dict of savefig file formats supported by this backend,\n where the keys are a file type name, such as 'Joint Photographic\n Experts Group', and the values are a list of filename extensions used\n for that filetype, such as ['jpg', 'jpeg'].\"\"\"\n groupings = {}\n for ext, name in cls.filetypes.items():\n groupings.setdefault(name, []).append(ext)\n groupings[name].sort()\n return groupings\n\n def _get_output_canvas(self, fmt):\n \"\"\"\n Return a canvas suitable for saving figures to a specified file format.\n\n If necessary, this function will switch to a registered backend that\n supports the format.\n \"\"\"\n # Return the current canvas if it supports the requested format.\n if hasattr(self, 'print_{}'.format(fmt)):\n return self\n # Return a default canvas for the requested format, if it exists.\n canvas_class = get_registered_canvas_class(fmt)\n if canvas_class:\n return self.switch_backends(canvas_class)\n # Else report error for unsupported format.\n raise ValueError(\n \"Format {!r} is not supported (supported formats: {})\"\n .format(fmt, \", \".join(sorted(self.get_supported_filetypes()))))\n\n def print_figure(self, filename, dpi=None, facecolor=None, edgecolor=None,\n orientation='portrait', format=None,\n *, bbox_inches=None, **kwargs):\n \"\"\"\n Render the figure to hardcopy. Set the figure patch face and edge\n colors. This is useful because some of the GUIs have a gray figure\n face color background and you'll probably want to override this on\n hardcopy.\n\n Parameters\n ----------\n filename\n can also be a file object on image backends\n\n orientation : {'landscape', 'portrait'}, optional\n only currently applies to PostScript printing.\n\n dpi : scalar, optional\n the dots per inch to save the figure in; if None, use savefig.dpi\n\n facecolor : color or None, optional\n the facecolor of the figure; if None, defaults to savefig.facecolor\n\n edgecolor : color or None, optional\n the edgecolor of the figure; if None, defaults to savefig.edgecolor\n\n format : str, optional\n when set, forcibly set the file format to save to\n\n bbox_inches : str or `~matplotlib.transforms.Bbox`, optional\n Bbox in inches. Only the given portion of the figure is\n saved. If 'tight', try to figure out the tight bbox of\n the figure. If None, use savefig.bbox\n\n pad_inches : scalar, optional\n Amount of padding around the figure when bbox_inches is\n 'tight'. If None, use savefig.pad_inches\n\n bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional\n A list of extra artists that will be considered when the\n tight bbox is calculated.\n\n \"\"\"\n if format is None:\n # get format from filename, or from backend's default filetype\n if isinstance(filename, os.PathLike):\n filename = os.fspath(filename)\n if isinstance(filename, str):\n format = os.path.splitext(filename)[1][1:]\n if format is None or format == '':\n format = self.get_default_filetype()\n if isinstance(filename, str):\n filename = filename.rstrip('.') + '.' + format\n format = format.lower()\n\n # get canvas object and print method for format\n canvas = self._get_output_canvas(format)\n print_method = getattr(canvas, 'print_%s' % format)\n\n if dpi is None:\n dpi = rcParams['savefig.dpi']\n if dpi == 'figure':\n dpi = getattr(self.figure, '_original_dpi', self.figure.dpi)\n\n # Remove the figure manager, if any, to avoid resizing the GUI widget.\n # Some code (e.g. Figure.show) differentiates between having *no*\n # manager and a *None* manager, which should be fixed at some point,\n # but this should be fine.\n with cbook._setattr_cm(self, _is_saving=True, manager=None), \\\n cbook._setattr_cm(self.figure, dpi=dpi):\n\n if facecolor is None:\n facecolor = rcParams['savefig.facecolor']\n if edgecolor is None:\n edgecolor = rcParams['savefig.edgecolor']\n\n origfacecolor = self.figure.get_facecolor()\n origedgecolor = self.figure.get_edgecolor()\n\n self.figure.set_facecolor(facecolor)\n self.figure.set_edgecolor(edgecolor)\n\n if bbox_inches is None:\n bbox_inches = rcParams['savefig.bbox']\n\n if bbox_inches:\n # call adjust_bbox to save only the given area\n if bbox_inches == \"tight\":\n # When bbox_inches == \"tight\", it saves the figure twice.\n # The first save command (to a BytesIO) is just to estimate\n # the bounding box of the figure.\n result = print_method(\n io.BytesIO(),\n dpi=dpi,\n facecolor=facecolor,\n edgecolor=edgecolor,\n orientation=orientation,\n dryrun=True,\n **kwargs)\n renderer = self.figure._cachedRenderer\n bbox_artists = kwargs.pop(\"bbox_extra_artists\", None)\n bbox_inches = self.figure.get_tightbbox(renderer,\n bbox_extra_artists=bbox_artists)\n pad = kwargs.pop(\"pad_inches\", None)\n if pad is None:\n pad = rcParams['savefig.pad_inches']\n\n bbox_inches = bbox_inches.padded(pad)\n\n restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,\n canvas.fixed_dpi)\n\n _bbox_inches_restore = (bbox_inches, restore_bbox)\n else:\n _bbox_inches_restore = None\n\n try:\n result = print_method(\n filename,\n dpi=dpi,\n facecolor=facecolor,\n edgecolor=edgecolor,\n orientation=orientation,\n bbox_inches_restore=_bbox_inches_restore,\n **kwargs)\n finally:\n if bbox_inches and restore_bbox:\n restore_bbox()\n\n self.figure.set_facecolor(origfacecolor)\n self.figure.set_edgecolor(origedgecolor)\n self.figure.set_canvas(self)\n return result\n\n @classmethod\n def get_default_filetype(cls):\n \"\"\"\n Get the default savefig file format as specified in rcParam\n ``savefig.format``. Returned string excludes period. Overridden\n in backends that only support a single file type.\n \"\"\"\n return rcParams['savefig.format']\n\n def get_window_title(self):\n \"\"\"\n Get the title text of the window containing the figure.\n Return None if there is no window (e.g., a PS backend).\n \"\"\"\n if hasattr(self, \"manager\"):\n return self.manager.get_window_title()\n\n def set_window_title(self, title):\n \"\"\"\n Set the title text of the window containing the figure. Note that\n this has no effect if there is no window (e.g., a PS backend).\n \"\"\"\n if hasattr(self, \"manager\"):\n self.manager.set_window_title(title)\n\n def get_default_filename(self):\n \"\"\"\n Return a string, which includes extension, suitable for use as\n a default filename.\n \"\"\"\n default_basename = self.get_window_title() or 'image'\n default_basename = default_basename.replace(' ', '_')\n default_filetype = self.get_default_filetype()\n default_filename = default_basename + '.' + default_filetype\n return default_filename\n\n def switch_backends(self, FigureCanvasClass):\n \"\"\"\n Instantiate an instance of FigureCanvasClass\n\n This is used for backend switching, e.g., to instantiate a\n FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is\n not done, so any changes to one of the instances (e.g., setting\n figure size or line props), will be reflected in the other\n \"\"\"\n newCanvas = FigureCanvasClass(self.figure)\n newCanvas._is_saving = self._is_saving\n return newCanvas\n\n def mpl_connect(self, s, func):\n \"\"\"\n Connect event with string *s* to *func*. The signature of *func* is::\n\n def func(event)\n\n where event is a :class:`matplotlib.backend_bases.Event`. The\n following events are recognized\n\n - 'button_press_event'\n - 'button_release_event'\n - 'draw_event'\n - 'key_press_event'\n - 'key_release_event'\n - 'motion_notify_event'\n - 'pick_event'\n - 'resize_event'\n - 'scroll_event'\n - 'figure_enter_event',\n - 'figure_leave_event',\n - 'axes_enter_event',\n - 'axes_leave_event'\n - 'close_event'\n\n For the location events (button and key press/release), if the\n mouse is over the axes, the variable ``event.inaxes`` will be\n set to the :class:`~matplotlib.axes.Axes` the event occurs is\n over, and additionally, the variables ``event.xdata`` and\n ``event.ydata`` will be defined. This is the mouse location\n in data coords. See\n :class:`~matplotlib.backend_bases.KeyEvent` and\n :class:`~matplotlib.backend_bases.MouseEvent` for more info.\n\n Return value is a connection id that can be used with\n :meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.\n\n Examples\n --------\n Usage::\n\n def on_press(event):\n print('you pressed', event.button, event.xdata, event.ydata)\n\n cid = canvas.mpl_connect('button_press_event', on_press)\n \"\"\"\n\n return self.callbacks.connect(s, func)\n\n def mpl_disconnect(self, cid):\n \"\"\"\n Disconnect callback id cid\n\n Examples\n --------\n Usage::\n\n cid = canvas.mpl_connect('button_press_event', on_press)\n #...later\n canvas.mpl_disconnect(cid)\n \"\"\"\n return self.callbacks.disconnect(cid)\n\n def new_timer(self, *args, **kwargs):\n \"\"\"\n Creates a new backend-specific subclass of\n :class:`backend_bases.Timer`. This is useful for getting periodic\n events through the backend's native event loop. Implemented only for\n backends with GUIs.\n\n Other Parameters\n ----------------\n interval : scalar\n Timer interval in milliseconds\n\n callbacks : List[Tuple[callable, Tuple, Dict]]\n Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``\n will be executed by the timer every *interval*.\n\n callbacks which return ``False`` or ``0`` will be removed from the\n timer.\n\n Examples\n --------\n\n >>> timer = fig.canvas.new_timer(callbacks=[(f1, (1, ), {'a': 3}),])\n\n \"\"\"\n return TimerBase(*args, **kwargs)\n\n def flush_events(self):\n \"\"\"\n Flush the GUI events for the figure.\n\n Interactive backends need to reimplement this method.\n \"\"\"\n\n def start_event_loop(self, timeout=0):\n \"\"\"Start a blocking event loop.\n\n Such an event loop is used by interactive functions, such as `ginput`\n and `waitforbuttonpress`, to wait for events.\n\n The event loop blocks until a callback function triggers\n `stop_event_loop`, or *timeout* is reached.\n\n If *timeout* is negative, never timeout.\n\n Only interactive backends need to reimplement this method and it relies\n on `flush_events` being properly implemented.\n\n Interactive backends should implement this in a more native way.\n \"\"\"\n if timeout <= 0:\n timeout = np.inf\n timestep = 0.01\n counter = 0\n self._looping = True\n while self._looping and counter * timestep < timeout:\n self.flush_events()\n time.sleep(timestep)\n counter += 1\n\n def stop_event_loop(self):\n \"\"\"Stop the current blocking event loop.\n\n Interactive backends need to reimplement this to match\n `start_event_loop`\n \"\"\"\n self._looping = False\n\n\ndef key_press_handler(event, canvas, toolbar=None):\n \"\"\"\n Implement the default mpl key bindings for the canvas and toolbar\n described at :ref:`key-event-handling`\n\n Parameters\n ----------\n event : :class:`KeyEvent`\n a key press/release event\n canvas : :class:`FigureCanvasBase`\n the backend-specific canvas instance\n toolbar : :class:`NavigationToolbar2`\n the navigation cursor toolbar\n\n \"\"\"\n # these bindings happen whether you are over an axes or not\n\n if event.key is None:\n return\n\n # Load key-mappings from rcParams.\n fullscreen_keys = rcParams['keymap.fullscreen']\n home_keys = rcParams['keymap.home']\n back_keys = rcParams['keymap.back']\n forward_keys = rcParams['keymap.forward']\n pan_keys = rcParams['keymap.pan']\n zoom_keys = rcParams['keymap.zoom']\n save_keys = rcParams['keymap.save']\n quit_keys = rcParams['keymap.quit']\n grid_keys = rcParams['keymap.grid']\n grid_minor_keys = rcParams['keymap.grid_minor']\n toggle_yscale_keys = rcParams['keymap.yscale']\n toggle_xscale_keys = rcParams['keymap.xscale']\n all_keys = rcParams['keymap.all_axes']\n\n # toggle fullscreen mode ('f', 'ctrl + f')\n if event.key in fullscreen_keys:\n try:\n canvas.manager.full_screen_toggle()\n except AttributeError:\n pass\n\n # quit the figure (default key 'ctrl+w')\n if event.key in quit_keys:\n Gcf.destroy_fig(canvas.figure)\n\n if toolbar is not None:\n # home or reset mnemonic (default key 'h', 'home' and 'r')\n if event.key in home_keys:\n toolbar.home()\n # forward / backward keys to enable left handed quick navigation\n # (default key for backward: 'left', 'backspace' and 'c')\n elif event.key in back_keys:\n toolbar.back()\n # (default key for forward: 'right' and 'v')\n elif event.key in forward_keys:\n toolbar.forward()\n # pan mnemonic (default key 'p')\n elif event.key in pan_keys:\n toolbar.pan()\n toolbar._set_cursor(event)\n # zoom mnemonic (default key 'o')\n elif event.key in zoom_keys:\n toolbar.zoom()\n toolbar._set_cursor(event)\n # saving current figure (default key 's')\n elif event.key in save_keys:\n toolbar.save_figure()\n\n if event.inaxes is None:\n return\n\n # these bindings require the mouse to be over an axes to trigger\n def _get_uniform_gridstate(ticks):\n # Return True/False if all grid lines are on or off, None if they are\n # not all in the same state.\n if all(tick.gridline.get_visible() for tick in ticks):\n return True\n elif not any(tick.gridline.get_visible() for tick in ticks):\n return False\n else:\n return None\n\n ax = event.inaxes\n # toggle major grids in current axes (default key 'g')\n # Both here and below (for 'G'), we do nothing if *any* grid (major or\n # minor, x or y) is not in a uniform state, to avoid messing up user\n # customization.\n if (event.key in grid_keys\n # Exclude minor grids not in a uniform state.\n and None not in [_get_uniform_gridstate(ax.xaxis.minorTicks),\n _get_uniform_gridstate(ax.yaxis.minorTicks)]):\n x_state = _get_uniform_gridstate(ax.xaxis.majorTicks)\n y_state = _get_uniform_gridstate(ax.yaxis.majorTicks)\n cycle = [(False, False), (True, False), (True, True), (False, True)]\n try:\n x_state, y_state = (\n cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])\n except ValueError:\n # Exclude major grids not in a uniform state.\n pass\n else:\n # If turning major grids off, also turn minor grids off.\n ax.grid(x_state, which=\"major\" if x_state else \"both\", axis=\"x\")\n ax.grid(y_state, which=\"major\" if y_state else \"both\", axis=\"y\")\n canvas.draw_idle()\n # toggle major and minor grids in current axes (default key 'G')\n if (event.key in grid_minor_keys\n # Exclude major grids not in a uniform state.\n and None not in [_get_uniform_gridstate(ax.xaxis.majorTicks),\n _get_uniform_gridstate(ax.yaxis.majorTicks)]):\n x_state = _get_uniform_gridstate(ax.xaxis.minorTicks)\n y_state = _get_uniform_gridstate(ax.yaxis.minorTicks)\n cycle = [(False, False), (True, False), (True, True), (False, True)]\n try:\n x_state, y_state = (\n cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])\n except ValueError:\n # Exclude minor grids not in a uniform state.\n pass\n else:\n ax.grid(x_state, which=\"both\", axis=\"x\")\n ax.grid(y_state, which=\"both\", axis=\"y\")\n canvas.draw_idle()\n # toggle scaling of y-axes between 'log and 'linear' (default key 'l')\n elif event.key in toggle_yscale_keys:\n scale = ax.get_yscale()\n if scale == 'log':\n ax.set_yscale('linear')\n ax.figure.canvas.draw_idle()\n elif scale == 'linear':\n try:\n ax.set_yscale('log')\n except ValueError as exc:\n _log.warning(str(exc))\n ax.set_yscale('linear')\n ax.figure.canvas.draw_idle()\n # toggle scaling of x-axes between 'log and 'linear' (default key 'k')\n elif event.key in toggle_xscale_keys:\n scalex = ax.get_xscale()\n if scalex == 'log':\n ax.set_xscale('linear')\n ax.figure.canvas.draw_idle()\n elif scalex == 'linear':\n try:\n ax.set_xscale('log')\n except ValueError as exc:\n _log.warning(str(exc))\n ax.set_xscale('linear')\n ax.figure.canvas.draw_idle()\n # enable nagivation for all axes that contain the event (default key 'a')\n elif event.key in all_keys:\n for a in canvas.figure.get_axes():\n if (event.x is not None and event.y is not None\n and a.in_axes(event)): # FIXME: Why only these?\n a.set_navigate(True)\n # enable navigation only for axes with this index (if such an axes exist,\n # otherwise do nothing)\n elif event.key.isdigit() and event.key != '0':\n n = int(event.key) - 1\n if n < len(canvas.figure.get_axes()):\n for i, a in enumerate(canvas.figure.get_axes()):\n if (event.x is not None and event.y is not None\n and a.in_axes(event)): # FIXME: Why only these?\n a.set_navigate(i == n)\n\n\ndef button_press_handler(event, canvas, toolbar=None):\n \"\"\"\n The default Matplotlib button actions for extra mouse buttons.\n \"\"\"\n if toolbar is not None:\n button_name = str(MouseButton(event.button))\n if button_name in rcParams['keymap.back']:\n toolbar.back()\n elif button_name in rcParams['keymap.forward']:\n toolbar.forward()\n\n\nclass NonGuiException(Exception):\n pass\n\n\nclass FigureManagerBase(object):\n \"\"\"\n Helper class for pyplot mode, wraps everything up into a neat bundle\n\n Attributes\n ----------\n canvas : :class:`FigureCanvasBase`\n The backend-specific canvas instance\n\n num : int or str\n The figure number\n\n key_press_handler_id : int\n The default key handler cid, when using the toolmanager.\n To disable the default key press handling use::\n\n figure.canvas.mpl_disconnect(\n figure.canvas.manager.key_press_handler_id)\n\n button_press_handler_id : int\n The default mouse button handler cid, when using the toolmanager.\n To disable the default button press handling use::\n\n figure.canvas.mpl_disconnect(\n figure.canvas.manager.button_press_handler_id)\n\n \"\"\"\n def __init__(self, canvas, num):\n self.canvas = canvas\n canvas.manager = self # store a pointer to parent\n self.num = num\n\n self.key_press_handler_id = None\n self.button_press_handler_id = None\n if rcParams['toolbar'] != 'toolmanager':\n self.key_press_handler_id = self.canvas.mpl_connect(\n 'key_press_event',\n self.key_press)\n self.button_press_handler_id = self.canvas.mpl_connect(\n 'button_press_event',\n self.button_press)\n\n self.toolmanager = None\n self.toolbar = None\n\n @self.canvas.figure.add_axobserver\n def notify_axes_change(fig):\n # Called whenever the current axes is changed.\n if self.toolmanager is None and self.toolbar is not None:\n self.toolbar.update()\n\n def show(self):\n \"\"\"\n For GUI backends, show the figure window and redraw.\n For non-GUI backends, raise an exception to be caught\n by :meth:`~matplotlib.figure.Figure.show`, for an\n optional warning.\n \"\"\"\n raise NonGuiException()\n\n def destroy(self):\n pass\n\n def full_screen_toggle(self):\n pass\n\n def resize(self, w, h):\n \"\"\"\"For GUI backends, resize the window (in pixels).\"\"\"\n\n def key_press(self, event):\n \"\"\"\n Implement the default mpl key bindings defined at\n :ref:`key-event-handling`\n \"\"\"\n if rcParams['toolbar'] != 'toolmanager':\n key_press_handler(event, self.canvas, self.canvas.toolbar)\n\n def button_press(self, event):\n \"\"\"\n The default Matplotlib button actions for extra mouse buttons.\n \"\"\"\n if rcParams['toolbar'] != 'toolmanager':\n button_press_handler(event, self.canvas, self.canvas.toolbar)\n\n def get_window_title(self):\n \"\"\"Get the title text of the window containing the figure.\n\n Return None for non-GUI (e.g., PS) backends.\n \"\"\"\n return 'image'\n\n def set_window_title(self, title):\n \"\"\"Set the title text of the window containing the figure.\n\n This has no effect for non-GUI (e.g., PS) backends.\n \"\"\"\n\n\ncursors = tools.cursors\n\n\nclass NavigationToolbar2(object):\n \"\"\"\n Base class for the navigation cursor, version 2\n\n backends must implement a canvas that handles connections for\n 'button_press_event' and 'button_release_event'. See\n :meth:`FigureCanvasBase.mpl_connect` for more information\n\n\n They must also define\n\n :meth:`save_figure`\n save the current figure\n\n :meth:`set_cursor`\n if you want the pointer icon to change\n\n :meth:`_init_toolbar`\n create your toolbar widget\n\n :meth:`draw_rubberband` (optional)\n draw the zoom to rect \"rubberband\" rectangle\n\n :meth:`press` (optional)\n whenever a mouse button is pressed, you'll be notified with\n the event\n\n :meth:`release` (optional)\n whenever a mouse button is released, you'll be notified with\n the event\n\n :meth:`set_message` (optional)\n display message\n\n :meth:`set_history_buttons` (optional)\n you can change the history back / forward buttons to\n indicate disabled / enabled state.\n\n That's it, we'll do the rest!\n \"\"\"\n\n # list of toolitems to add to the toolbar, format is:\n # (\n # text, # the text of the button (often not visible to users)\n # tooltip_text, # the tooltip shown on hover (where possible)\n # image_file, # name of the image for the button (without the extension)\n # name_of_method, # name of the method in NavigationToolbar2 to call\n # )\n toolitems = (\n ('Home', 'Reset original view', 'home', 'home'),\n ('Back', 'Back to previous view', 'back', 'back'),\n ('Forward', 'Forward to next view', 'forward', 'forward'),\n (None, None, None, None),\n ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),\n ('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),\n ('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),\n (None, None, None, None),\n ('Save', 'Save the figure', 'filesave', 'save_figure'),\n )\n\n def __init__(self, canvas):\n self.canvas = canvas\n canvas.toolbar = self\n self._nav_stack = cbook.Stack()\n self._xypress = None # the location and axis info at the time\n # of the press\n self._idPress = None\n self._idRelease = None\n self._active = None\n # This cursor will be set after the initial draw.\n self._lastCursor = cursors.POINTER\n self._init_toolbar()\n self._idDrag = self.canvas.mpl_connect(\n 'motion_notify_event', self.mouse_move)\n\n self._ids_zoom = []\n self._zoom_mode = None\n\n self._button_pressed = None # determined by the button pressed\n # at start\n\n self.mode = '' # a mode string for the status bar\n self.set_history_buttons()\n\n def set_message(self, s):\n \"\"\"Display a message on toolbar or in status bar.\"\"\"\n\n def back(self, *args):\n \"\"\"move back up the view lim stack\"\"\"\n self._nav_stack.back()\n self.set_history_buttons()\n self._update_view()\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n \"\"\"Draw a rectangle rubberband to indicate zoom limits.\n\n Note that it is not guaranteed that ``x0 <= x1`` and ``y0 <= y1``.\n \"\"\"\n\n def remove_rubberband(self):\n \"\"\"Remove the rubberband.\"\"\"\n\n def forward(self, *args):\n \"\"\"Move forward in the view lim stack.\"\"\"\n self._nav_stack.forward()\n self.set_history_buttons()\n self._update_view()\n\n def home(self, *args):\n \"\"\"Restore the original view.\"\"\"\n self._nav_stack.home()\n self.set_history_buttons()\n self._update_view()\n\n def _init_toolbar(self):\n \"\"\"\n This is where you actually build the GUI widgets (called by\n __init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,\n ``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard\n across backends (there are ppm versions in CVS also).\n\n You just need to set the callbacks\n\n home : self.home\n back : self.back\n forward : self.forward\n hand : self.pan\n zoom_to_rect : self.zoom\n filesave : self.save_figure\n\n You only need to define the last one - the others are in the base\n class implementation.\n\n \"\"\"\n raise NotImplementedError\n\n def _set_cursor(self, event):\n if not event.inaxes or not self._active:\n if self._lastCursor != cursors.POINTER:\n self.set_cursor(cursors.POINTER)\n self._lastCursor = cursors.POINTER\n else:\n if (self._active == 'ZOOM'\n and self._lastCursor != cursors.SELECT_REGION):\n self.set_cursor(cursors.SELECT_REGION)\n self._lastCursor = cursors.SELECT_REGION\n elif (self._active == 'PAN' and\n self._lastCursor != cursors.MOVE):\n self.set_cursor(cursors.MOVE)\n self._lastCursor = cursors.MOVE\n\n def mouse_move(self, event):\n self._set_cursor(event)\n\n if event.inaxes and event.inaxes.get_navigate():\n\n try:\n s = event.inaxes.format_coord(event.xdata, event.ydata)\n except (ValueError, OverflowError):\n pass\n else:\n artists = [a for a in event.inaxes._mouseover_set\n if a.contains(event)[0] and a.get_visible()]\n\n if artists:\n a = cbook._topmost_artist(artists)\n if a is not event.inaxes.patch:\n data = a.get_cursor_data(event)\n if data is not None:\n data_str = a.format_cursor_data(data)\n if data_str is not None:\n s = s + ' ' + data_str\n\n if len(self.mode):\n self.set_message('%s, %s' % (self.mode, s))\n else:\n self.set_message(s)\n else:\n self.set_message(self.mode)\n\n def pan(self, *args):\n \"\"\"Activate the pan/zoom tool. pan with left button, zoom with right\"\"\"\n # set the pointer icon and button press funcs to the\n # appropriate callbacks\n\n if self._active == 'PAN':\n self._active = None\n else:\n self._active = 'PAN'\n if self._idPress is not None:\n self._idPress = self.canvas.mpl_disconnect(self._idPress)\n self.mode = ''\n\n if self._idRelease is not None:\n self._idRelease = self.canvas.mpl_disconnect(self._idRelease)\n self.mode = ''\n\n if self._active:\n self._idPress = self.canvas.mpl_connect(\n 'button_press_event', self.press_pan)\n self._idRelease = self.canvas.mpl_connect(\n 'button_release_event', self.release_pan)\n self.mode = 'pan/zoom'\n self.canvas.widgetlock(self)\n else:\n self.canvas.widgetlock.release(self)\n\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self._active)\n\n self.set_message(self.mode)\n\n def press(self, event):\n \"\"\"Called whenever a mouse button is pressed.\"\"\"\n\n def press_pan(self, event):\n \"\"\"Callback for mouse button press in pan/zoom mode.\"\"\"\n\n if event.button == 1:\n self._button_pressed = 1\n elif event.button == 3:\n self._button_pressed = 3\n else:\n self._button_pressed = None\n return\n\n if self._nav_stack() is None:\n # set the home button to this view\n self.push_current()\n\n x, y = event.x, event.y\n self._xypress = []\n for i, a in enumerate(self.canvas.figure.get_axes()):\n if (x is not None and y is not None and a.in_axes(event) and\n a.get_navigate() and a.can_pan()):\n a.start_pan(x, y, event.button)\n self._xypress.append((a, i))\n self.canvas.mpl_disconnect(self._idDrag)\n self._idDrag = self.canvas.mpl_connect('motion_notify_event',\n self.drag_pan)\n\n self.press(event)\n\n def press_zoom(self, event):\n \"\"\"Callback for mouse button press in zoom to rect mode.\"\"\"\n # If we're already in the middle of a zoom, pressing another\n # button works to \"cancel\"\n if self._ids_zoom != []:\n for zoom_id in self._ids_zoom:\n self.canvas.mpl_disconnect(zoom_id)\n self.release(event)\n self.draw()\n self._xypress = None\n self._button_pressed = None\n self._ids_zoom = []\n return\n\n if event.button == 1:\n self._button_pressed = 1\n elif event.button == 3:\n self._button_pressed = 3\n else:\n self._button_pressed = None\n return\n\n if self._nav_stack() is None:\n # set the home button to this view\n self.push_current()\n\n x, y = event.x, event.y\n self._xypress = []\n for i, a in enumerate(self.canvas.figure.get_axes()):\n if (x is not None and y is not None and a.in_axes(event) and\n a.get_navigate() and a.can_zoom()):\n self._xypress.append((x, y, a, i, a._get_view()))\n\n id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)\n id2 = self.canvas.mpl_connect('key_press_event',\n self._switch_on_zoom_mode)\n id3 = self.canvas.mpl_connect('key_release_event',\n self._switch_off_zoom_mode)\n\n self._ids_zoom = id1, id2, id3\n self._zoom_mode = event.key\n\n self.press(event)\n\n def _switch_on_zoom_mode(self, event):\n self._zoom_mode = event.key\n self.mouse_move(event)\n\n def _switch_off_zoom_mode(self, event):\n self._zoom_mode = None\n self.mouse_move(event)\n\n def push_current(self):\n \"\"\"Push the current view limits and position onto the stack.\"\"\"\n self._nav_stack.push(\n WeakKeyDictionary(\n {ax: (ax._get_view(),\n # Store both the original and modified positions.\n (ax.get_position(True).frozen(),\n ax.get_position().frozen()))\n for ax in self.canvas.figure.axes}))\n self.set_history_buttons()\n\n def release(self, event):\n \"\"\"Callback for mouse button release.\"\"\"\n\n def release_pan(self, event):\n \"\"\"Callback for mouse button release in pan/zoom mode.\"\"\"\n\n if self._button_pressed is None:\n return\n self.canvas.mpl_disconnect(self._idDrag)\n self._idDrag = self.canvas.mpl_connect(\n 'motion_notify_event', self.mouse_move)\n for a, ind in self._xypress:\n a.end_pan()\n if not self._xypress:\n return\n self._xypress = []\n self._button_pressed = None\n self.push_current()\n self.release(event)\n self.draw()\n\n def drag_pan(self, event):\n \"\"\"Callback for dragging in pan/zoom mode.\"\"\"\n for a, ind in self._xypress:\n #safer to use the recorded button at the press than current button:\n #multiple button can get pressed during motion...\n a.drag_pan(self._button_pressed, event.key, event.x, event.y)\n self.canvas.draw_idle()\n\n def drag_zoom(self, event):\n \"\"\"Callback for dragging in zoom mode.\"\"\"\n if self._xypress:\n x, y = event.x, event.y\n lastx, lasty, a, ind, view = self._xypress[0]\n (x1, y1), (x2, y2) = np.clip(\n [[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)\n if self._zoom_mode == \"x\":\n y1, y2 = a.bbox.intervaly\n elif self._zoom_mode == \"y\":\n x1, x2 = a.bbox.intervalx\n self.draw_rubberband(event, x1, y1, x2, y2)\n\n def release_zoom(self, event):\n \"\"\"Callback for mouse button release in zoom to rect mode.\"\"\"\n for zoom_id in self._ids_zoom:\n self.canvas.mpl_disconnect(zoom_id)\n self._ids_zoom = []\n\n self.remove_rubberband()\n\n if not self._xypress:\n return\n\n last_a = []\n\n for cur_xypress in self._xypress:\n x, y = event.x, event.y\n lastx, lasty, a, ind, view = cur_xypress\n # ignore singular clicks - 5 pixels is a threshold\n # allows the user to \"cancel\" a zoom action\n # by zooming by less than 5 pixels\n if ((abs(x - lastx) < 5 and self._zoom_mode != \"y\") or\n (abs(y - lasty) < 5 and self._zoom_mode != \"x\")):\n self._xypress = None\n self.release(event)\n self.draw()\n return\n\n # detect twinx,y axes and avoid double zooming\n twinx, twiny = False, False\n if last_a:\n for la in last_a:\n if a.get_shared_x_axes().joined(a, la):\n twinx = True\n if a.get_shared_y_axes().joined(a, la):\n twiny = True\n last_a.append(a)\n\n if self._button_pressed == 1:\n direction = 'in'\n elif self._button_pressed == 3:\n direction = 'out'\n else:\n continue\n\n a._set_view_from_bbox((lastx, lasty, x, y), direction,\n self._zoom_mode, twinx, twiny)\n\n self.draw()\n self._xypress = None\n self._button_pressed = None\n\n self._zoom_mode = None\n\n self.push_current()\n self.release(event)\n\n def draw(self):\n \"\"\"Redraw the canvases, update the locators.\"\"\"\n for a in self.canvas.figure.get_axes():\n xaxis = getattr(a, 'xaxis', None)\n yaxis = getattr(a, 'yaxis', None)\n locators = []\n if xaxis is not None:\n locators.append(xaxis.get_major_locator())\n locators.append(xaxis.get_minor_locator())\n if yaxis is not None:\n locators.append(yaxis.get_major_locator())\n locators.append(yaxis.get_minor_locator())\n\n for loc in locators:\n loc.refresh()\n self.canvas.draw_idle()\n\n def _update_view(self):\n \"\"\"Update the viewlim and position from the view and\n position stack for each axes.\n \"\"\"\n nav_info = self._nav_stack()\n if nav_info is None:\n return\n # Retrieve all items at once to avoid any risk of GC deleting an Axes\n # while in the middle of the loop below.\n items = list(nav_info.items())\n for ax, (view, (pos_orig, pos_active)) in items:\n ax._set_view(view)\n # Restore both the original and modified positions\n ax._set_position(pos_orig, 'original')\n ax._set_position(pos_active, 'active')\n self.canvas.draw_idle()\n\n def save_figure(self, *args):\n \"\"\"Save the current figure.\"\"\"\n raise NotImplementedError\n\n def set_cursor(self, cursor):\n \"\"\"Set the current cursor to one of the :class:`Cursors` enums values.\n\n If required by the backend, this method should trigger an update in\n the backend event loop after the cursor is set, as this method may be\n called e.g. before a long-running task during which the GUI is not\n updated.\n \"\"\"\n\n def update(self):\n \"\"\"Reset the axes stack.\"\"\"\n self._nav_stack.clear()\n self.set_history_buttons()\n\n def zoom(self, *args):\n \"\"\"Activate zoom to rect mode.\"\"\"\n if self._active == 'ZOOM':\n self._active = None\n else:\n self._active = 'ZOOM'\n\n if self._idPress is not None:\n self._idPress = self.canvas.mpl_disconnect(self._idPress)\n self.mode = ''\n\n if self._idRelease is not None:\n self._idRelease = self.canvas.mpl_disconnect(self._idRelease)\n self.mode = ''\n\n if self._active:\n self._idPress = self.canvas.mpl_connect('button_press_event',\n self.press_zoom)\n self._idRelease = self.canvas.mpl_connect('button_release_event',\n self.release_zoom)\n self.mode = 'zoom rect'\n self.canvas.widgetlock(self)\n else:\n self.canvas.widgetlock.release(self)\n\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self._active)\n\n self.set_message(self.mode)\n\n def set_history_buttons(self):\n \"\"\"Enable or disable the back/forward button.\"\"\"\n\n\nclass ToolContainerBase(object):\n \"\"\"\n Base class for all tool containers, e.g. toolbars.\n\n Attributes\n ----------\n toolmanager : `ToolManager`\n The tools with which this `ToolContainer` wants to communicate.\n \"\"\"\n\n _icon_extension = '.png'\n \"\"\"\n Toolcontainer button icon image format extension\n\n **String**: Image extension\n \"\"\"\n\n def __init__(self, toolmanager):\n self.toolmanager = toolmanager\n self.toolmanager.toolmanager_connect('tool_removed_event',\n self._remove_tool_cbk)\n\n def _tool_toggled_cbk(self, event):\n \"\"\"\n Captures the 'tool_trigger_[name]'\n\n This only gets used for toggled tools\n \"\"\"\n self.toggle_toolitem(event.tool.name, event.tool.toggled)\n\n def add_tool(self, tool, group, position=-1):\n \"\"\"\n Adds a tool to this container\n\n Parameters\n ----------\n tool : tool_like\n The tool to add, see `ToolManager.get_tool`.\n group : str\n The name of the group to add this tool to.\n position : int (optional)\n The position within the group to place this tool. Defaults to end.\n \"\"\"\n tool = self.toolmanager.get_tool(tool)\n image = self._get_image_filename(tool.image)\n toggle = getattr(tool, 'toggled', None) is not None\n self.add_toolitem(tool.name, group, position,\n image, tool.description, toggle)\n if toggle:\n self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,\n self._tool_toggled_cbk)\n # If initially toggled\n if tool.toggled:\n self.toggle_toolitem(tool.name, True)\n\n def _remove_tool_cbk(self, event):\n \"\"\"Captures the 'tool_removed_event' signal and removes the tool.\"\"\"\n self.remove_toolitem(event.tool.name)\n\n def _get_image_filename(self, image):\n \"\"\"Find the image based on its name.\"\"\"\n if not image:\n return None\n\n basedir = os.path.join(rcParams['datapath'], 'images')\n possible_images = (\n image,\n image + self._icon_extension,\n os.path.join(basedir, image),\n os.path.join(basedir, image) + self._icon_extension)\n\n for fname in possible_images:\n if os.path.isfile(fname):\n return fname\n\n def trigger_tool(self, name):\n \"\"\"\n Trigger the tool\n\n Parameters\n ----------\n name : string\n Name (id) of the tool triggered from within the container\n \"\"\"\n self.toolmanager.trigger_tool(name, sender=self)\n\n def add_toolitem(self, name, group, position, image, description, toggle):\n \"\"\"\n Add a toolitem to the container\n\n This method must get implemented per backend\n\n The callback associated with the button click event,\n must be **EXACTLY** `self.trigger_tool(name)`\n\n Parameters\n ----------\n name : string\n Name of the tool to add, this gets used as the tool's ID and as the\n default label of the buttons\n group : String\n Name of the group that this tool belongs to\n position : Int\n Position of the tool within its group, if -1 it goes at the End\n image_file : String\n Filename of the image for the button or `None`\n description : String\n Description of the tool, used for the tooltips\n toggle : Bool\n * `True` : The button is a toggle (change the pressed/unpressed\n state between consecutive clicks)\n * `False` : The button is a normal button (returns to unpressed\n state after release)\n \"\"\"\n raise NotImplementedError\n\n def toggle_toolitem(self, name, toggled):\n \"\"\"\n Toggle the toolitem without firing event\n\n Parameters\n ----------\n name : String\n Id of the tool to toggle\n toggled : bool\n Whether to set this tool as toggled or not.\n \"\"\"\n raise NotImplementedError\n\n def remove_toolitem(self, name):\n \"\"\"\n Remove a toolitem from the `ToolContainer`\n\n This method must get implemented per backend\n\n Called when `ToolManager` emits a `tool_removed_event`\n\n Parameters\n ----------\n name : string\n Name of the tool to remove\n \"\"\"\n raise NotImplementedError\n\n\nclass StatusbarBase(object):\n \"\"\"Base class for the statusbar\"\"\"\n def __init__(self, toolmanager):\n self.toolmanager = toolmanager\n self.toolmanager.toolmanager_connect('tool_message_event',\n self._message_cbk)\n\n def _message_cbk(self, event):\n \"\"\"Captures the 'tool_message_event' and set the message\"\"\"\n self.set_message(event.message)\n\n def set_message(self, s):\n \"\"\"\n Display a message on toolbar or in status bar\n\n Parameters\n ----------\n s : str\n Message text\n \"\"\"\n pass\n\n\nclass _Backend(object):\n # A backend can be defined by using the following pattern:\n #\n # @_Backend.export\n # class FooBackend(_Backend):\n # # override the attributes and methods documented below.\n\n # Set to one of {\"qt5\", \"qt4\", \"gtk3\", \"wx\", \"tk\", \"macosx\"} if an\n # interactive framework is required, or None otherwise.\n required_interactive_framework = None\n\n # `backend_version` may be overridden by the subclass.\n backend_version = \"unknown\"\n\n # The `FigureCanvas` class must be defined.\n FigureCanvas = None\n\n # For interactive backends, the `FigureManager` class must be overridden.\n FigureManager = FigureManagerBase\n\n # The following methods must be left as None for non-interactive backends.\n # For interactive backends, `trigger_manager_draw` should be a function\n # taking a manager as argument and triggering a canvas draw, and `mainloop`\n # should be a function taking no argument and starting the backend main\n # loop.\n trigger_manager_draw = None\n mainloop = None\n\n # The following methods will be automatically defined and exported, but\n # can be overridden.\n\n @classmethod\n def new_figure_manager(cls, num, *args, **kwargs):\n \"\"\"Create a new figure manager instance.\n \"\"\"\n # This import needs to happen here due to circular imports.\n from matplotlib.figure import Figure\n fig_cls = kwargs.pop('FigureClass', Figure)\n fig = fig_cls(*args, **kwargs)\n return cls.new_figure_manager_given_figure(num, fig)\n\n @classmethod\n def new_figure_manager_given_figure(cls, num, figure):\n \"\"\"Create a new figure manager instance for the given figure.\n \"\"\"\n canvas = cls.FigureCanvas(figure)\n manager = cls.FigureManager(canvas, num)\n return manager\n\n @classmethod\n def draw_if_interactive(cls):\n if cls.trigger_manager_draw is not None and is_interactive():\n manager = Gcf.get_active()\n if manager:\n cls.trigger_manager_draw(manager)\n\n @classmethod\n @cbook._make_keyword_only(\"3.1\", \"block\")\n def show(cls, block=None):\n \"\"\"\n Show all figures.\n\n `show` blocks by calling `mainloop` if *block* is ``True``, or if it\n is ``None`` and we are neither in IPython's ``%pylab`` mode, nor in\n `interactive` mode.\n \"\"\"\n managers = Gcf.get_all_fig_managers()\n if not managers:\n return\n for manager in managers:\n # Emits a warning if the backend is non-interactive.\n manager.canvas.figure.show()\n if cls.mainloop is None:\n return\n if block is None:\n # Hack: Are we in IPython's pylab mode?\n from matplotlib import pyplot\n try:\n # IPython versions >= 0.10 tack the _needmain attribute onto\n # pyplot.show, and always set it to False, when in %pylab mode.\n ipython_pylab = not pyplot.show._needmain\n except AttributeError:\n ipython_pylab = False\n block = not ipython_pylab and not is_interactive()\n # TODO: The above is a hack to get the WebAgg backend working with\n # ipython's `%pylab` mode until proper integration is implemented.\n if get_backend() == \"WebAgg\":\n block = True\n if block:\n cls.mainloop()\n\n # This method is the one actually exporting the required methods.\n\n @staticmethod\n def export(cls):\n for name in [\"required_interactive_framework\",\n \"backend_version\",\n \"FigureCanvas\",\n \"FigureManager\",\n \"new_figure_manager\",\n \"new_figure_manager_given_figure\",\n \"draw_if_interactive\",\n \"show\"]:\n setattr(sys.modules[cls.__module__], name, getattr(cls, name))\n\n # For back-compatibility, generate a shim `Show` class.\n\n class Show(ShowBase):\n def mainloop(self):\n return cls.mainloop()\n\n setattr(sys.modules[cls.__module__], \"Show\", Show)\n return cls\n\n\nclass ShowBase(_Backend):\n \"\"\"\n Simple base class to generate a show() callable in backends.\n\n Subclass must override mainloop() method.\n \"\"\"\n\n def __call__(self, block=None):\n return self.show(block=block)\n",
"from collections import OrderedDict\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY37\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n qcut,\n)\nimport pandas.util.testing as tm\nfrom pandas.util.testing import assert_equal, assert_frame_equal, assert_series_equal\n\n\ndef cartesian_product_for_groupers(result, args, names):\n \"\"\" Reindex to a cartesian production for the groupers,\n preserving the nature (Categorical) of each grouper \"\"\"\n\n def f(a):\n if isinstance(a, (CategoricalIndex, Categorical)):\n categories = a.categories\n a = Categorical.from_codes(\n np.arange(len(categories)), categories=categories, ordered=a.ordered\n )\n return a\n\n index = MultiIndex.from_product(map(f, args), names=names)\n return result.reindex(index).sort_index()\n\n\ndef test_apply_use_categorical_name(df):\n cats = qcut(df.C, 4)\n\n def get_stats(group):\n return {\n \"min\": group.min(),\n \"max\": group.max(),\n \"count\": group.count(),\n \"mean\": group.mean(),\n }\n\n result = df.groupby(cats, observed=False).D.apply(get_stats)\n assert result.index.names[0] == \"C\"\n\n\ndef test_basic():\n\n cats = Categorical(\n [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"],\n ordered=True,\n )\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n exp_index = CategoricalIndex(list(\"abcd\"), name=\"b\", ordered=True)\n expected = DataFrame({\"a\": [1, 2, 4, np.nan]}, index=exp_index)\n result = data.groupby(\"b\", observed=False).mean()\n tm.assert_frame_equal(result, expected)\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n\n # single grouper\n gb = df.groupby(\"A\", observed=False)\n exp_idx = CategoricalIndex([\"a\", \"b\", \"z\"], name=\"A\", ordered=True)\n expected = DataFrame({\"values\": Series([3, 7, 0], index=exp_idx)})\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 8623\n x = DataFrame(\n [[1, \"John P. Doe\"], [2, \"Jane Dove\"], [1, \"John P. Doe\"]],\n columns=[\"person_id\", \"person_name\"],\n )\n x[\"person_name\"] = Categorical(x.person_name)\n\n g = x.groupby([\"person_id\"], observed=False)\n result = g.transform(lambda x: x)\n tm.assert_frame_equal(result, x[[\"person_name\"]])\n\n result = x.drop_duplicates(\"person_name\")\n expected = x.iloc[[0, 1]]\n tm.assert_frame_equal(result, expected)\n\n def f(x):\n return x.drop_duplicates(\"person_name\").iloc[0]\n\n result = g.apply(f)\n expected = x.iloc[[0, 1]].copy()\n expected.index = Index([1, 2], name=\"person_id\")\n expected[\"person_name\"] = expected[\"person_name\"].astype(\"object\")\n tm.assert_frame_equal(result, expected)\n\n # GH 9921\n # Monotonic\n df = DataFrame({\"a\": [5, 15, 25]})\n c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df[\"a\"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[\"a\"]\n )\n tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[[\"a\"]])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[[\"a\"]]\n )\n\n # Filter\n tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df[\"a\"])\n tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)\n\n # Non-monotonic\n df = DataFrame({\"a\": [5, 15, 25, -5]})\n c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df[\"a\"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[\"a\"]\n )\n tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[[\"a\"]])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[[\"a\"]]\n )\n\n # GH 9603\n df = DataFrame({\"a\": [1, 0, 0, 0]})\n c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list(\"abcd\")))\n result = df.groupby(c, observed=False).apply(len)\n\n exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)\n expected = Series([1, 0, 0, 0], index=exp_index)\n expected.index.name = \"a\"\n tm.assert_series_equal(result, expected)\n\n # more basic\n levels = [\"foo\", \"bar\", \"baz\", \"qux\"]\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)\n expected = expected.reindex(exp_idx)\n\n assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = np.asarray(cats).take(idx)\n ord_data = data.take(idx)\n\n exp_cats = Categorical(\n ord_labels, ordered=True, categories=[\"foo\", \"bar\", \"baz\", \"qux\"]\n )\n expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()\n assert_frame_equal(desc_result, expected)\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)\n exp = Index([\"count\", \"mean\", \"std\", \"min\", \"25%\", \"50%\", \"75%\", \"max\"] * 4)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)\n\n\ndef test_level_get_group(observed):\n # GH15155\n df = DataFrame(\n data=np.arange(2, 22, 2),\n index=MultiIndex(\n levels=[CategoricalIndex([\"a\", \"b\"]), range(10)],\n codes=[[0] * 5 + [1] * 5, range(10)],\n names=[\"Index1\", \"Index2\"],\n ),\n )\n g = df.groupby(level=[\"Index1\"], observed=observed)\n\n # expected should equal test.loc[[\"a\"]]\n # GH15166\n expected = DataFrame(\n data=np.arange(2, 12, 2),\n index=MultiIndex(\n levels=[CategoricalIndex([\"a\", \"b\"]), range(5)],\n codes=[[0] * 5, range(5)],\n names=[\"Index1\", \"Index2\"],\n ),\n )\n result = g.get_group(\"a\")\n\n assert_frame_equal(result, expected)\n\n\n# GH#21636 flaky on py37; may be related to older numpy, see discussion\n# https://github.com/MacPython/pandas-wheels/pull/64\[email protected](PY37, reason=\"Flaky, GH-27902\", strict=False)\[email protected](\"ordered\", [True, False])\ndef test_apply(ordered):\n # GH 10138\n\n dense = Categorical(list(\"abc\"), ordered=ordered)\n\n # 'b' is in the categories but not in the list\n missing = Categorical(list(\"aaa\"), categories=[\"a\", \"b\"], ordered=ordered)\n values = np.arange(len(dense))\n df = DataFrame({\"missing\": missing, \"dense\": dense, \"values\": values})\n grouped = df.groupby([\"missing\", \"dense\"], observed=True)\n\n # missing category 'b' should still exist in the output index\n idx = MultiIndex.from_arrays([missing, dense], names=[\"missing\", \"dense\"])\n expected = DataFrame([0, 1, 2.0], index=idx, columns=[\"values\"])\n\n # GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])\n # is coming back as Series([0., 1., 0.], index=[\"missing\", \"dense\", \"values\"])\n # when we expect Series(0., index=[\"values\"])\n result = grouped.apply(lambda x: np.mean(x))\n assert_frame_equal(result, expected)\n\n # we coerce back to ints\n expected = expected.astype(\"int\")\n result = grouped.mean()\n assert_frame_equal(result, expected)\n\n result = grouped.agg(np.mean)\n assert_frame_equal(result, expected)\n\n # but for transform we should still get back the original index\n idx = MultiIndex.from_arrays([missing, dense], names=[\"missing\", \"dense\"])\n expected = Series(1, index=idx)\n result = grouped.apply(lambda x: 1)\n assert_series_equal(result, expected)\n\n\ndef test_observed(observed):\n # multiple groupers, don't re-expand the output space\n # of the grouper\n # gh-14942 (implement)\n # gh-10132 (back-compat)\n # gh-8138 (back-compat)\n # gh-8869\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n df[\"C\"] = [\"foo\", \"bar\"] * 2\n\n # multiple groupers with a non-cat\n gb = df.groupby([\"A\", \"B\", \"C\"], observed=observed)\n exp_index = MultiIndex.from_arrays(\n [cat1, cat2, [\"foo\", \"bar\"] * 2], names=[\"A\", \"B\", \"C\"]\n )\n expected = DataFrame({\"values\": Series([1, 2, 3, 4], index=exp_index)}).sort_index()\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [cat1, cat2, [\"foo\", \"bar\"]], list(\"ABC\")\n )\n\n tm.assert_frame_equal(result, expected)\n\n gb = df.groupby([\"A\", \"B\"], observed=observed)\n exp_index = MultiIndex.from_arrays([cat1, cat2], names=[\"A\", \"B\"])\n expected = DataFrame({\"values\": [1, 2, 3, 4]}, index=exp_index)\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(expected, [cat1, cat2], list(\"AB\"))\n\n tm.assert_frame_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/issues/8138\n d = {\n \"cat\": Categorical(\n [\"a\", \"b\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"], ordered=True\n ),\n \"ints\": [1, 1, 2, 2],\n \"val\": [10, 20, 30, 40],\n }\n df = DataFrame(d)\n\n # Grouping on a single column\n groups_single_key = df.groupby(\"cat\", observed=observed)\n result = groups_single_key.mean()\n\n exp_index = CategoricalIndex(\n list(\"ab\"), name=\"cat\", categories=list(\"abc\"), ordered=True\n )\n expected = DataFrame({\"ints\": [1.5, 1.5], \"val\": [20.0, 30]}, index=exp_index)\n if not observed:\n index = CategoricalIndex(\n list(\"abc\"), name=\"cat\", categories=list(\"abc\"), ordered=True\n )\n expected = expected.reindex(index)\n\n tm.assert_frame_equal(result, expected)\n\n # Grouping on two columns\n groups_double_key = df.groupby([\"cat\", \"ints\"], observed=observed)\n result = groups_double_key.agg(\"mean\")\n expected = DataFrame(\n {\n \"val\": [10, 30, 20, 40],\n \"cat\": Categorical(\n [\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"c\"], ordered=True\n ),\n \"ints\": [1, 2, 1, 2],\n }\n ).set_index([\"cat\", \"ints\"])\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [df.cat.values, [1, 2]], [\"cat\", \"ints\"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n # GH 10132\n for key in [(\"a\", 1), (\"b\", 2), (\"b\", 1), (\"a\", 2)]:\n c, i = key\n result = groups_double_key.get_group(key)\n expected = df[(df.cat == c) & (df.ints == i)]\n assert_frame_equal(result, expected)\n\n # gh-8869\n # with as_index\n d = {\n \"foo\": [10, 8, 4, 8, 4, 1, 1],\n \"bar\": [10, 20, 30, 40, 50, 60, 70],\n \"baz\": [\"d\", \"c\", \"e\", \"a\", \"a\", \"d\", \"c\"],\n }\n df = DataFrame(d)\n cat = pd.cut(df[\"foo\"], np.linspace(0, 10, 3))\n df[\"range\"] = cat\n groups = df.groupby([\"range\", \"baz\"], as_index=False, observed=observed)\n result = groups.agg(\"mean\")\n\n groups2 = df.groupby([\"range\", \"baz\"], as_index=True, observed=observed)\n expected = groups2.agg(\"mean\").reset_index()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_codes_remap(observed):\n d = {\"C1\": [3, 3, 4, 5], \"C2\": [1, 2, 3, 4], \"C3\": [10, 100, 200, 34]}\n df = DataFrame(d)\n values = pd.cut(df[\"C1\"], [1, 2, 3, 6])\n values.name = \"cat\"\n groups_double_key = df.groupby([values, \"C2\"], observed=observed)\n\n idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=[\"cat\", \"C2\"])\n expected = DataFrame({\"C1\": [3, 3, 4, 5], \"C3\": [10, 100, 200, 34]}, index=idx)\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [values.values, [1, 2, 3, 4]], [\"cat\", \"C2\"]\n )\n\n result = groups_double_key.agg(\"mean\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_perf():\n # we create a cartesian product, so this is\n # non-performant if we don't use observed values\n # gh-14942\n df = DataFrame(\n {\n \"cat\": np.random.randint(0, 255, size=30000),\n \"int_id\": np.random.randint(0, 255, size=30000),\n \"other_id\": np.random.randint(0, 10000, size=30000),\n \"foo\": 0,\n }\n )\n df[\"cat\"] = df.cat.astype(str).astype(\"category\")\n\n grouped = df.groupby([\"cat\", \"int_id\", \"other_id\"], observed=True)\n result = grouped.count()\n assert result.index.levels[0].nunique() == df.cat.nunique()\n assert result.index.levels[1].nunique() == df.int_id.nunique()\n assert result.index.levels[2].nunique() == df.other_id.nunique()\n\n\ndef test_observed_groups(observed):\n # gh-20583\n # test that we have the appropriate groups\n\n cat = Categorical([\"a\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\"])\n df = DataFrame({\"cat\": cat, \"vals\": [1, 2, 3]})\n g = df.groupby(\"cat\", observed=observed)\n\n result = g.groups\n if observed:\n expected = {\"a\": Index([0, 2], dtype=\"int64\"), \"c\": Index([1], dtype=\"int64\")}\n else:\n expected = {\n \"a\": Index([0, 2], dtype=\"int64\"),\n \"b\": Index([], dtype=\"int64\"),\n \"c\": Index([1], dtype=\"int64\"),\n }\n\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_groups_with_nan(observed):\n # GH 24740\n df = DataFrame(\n {\n \"cat\": Categorical([\"a\", np.nan, \"a\"], categories=[\"a\", \"b\", \"d\"]),\n \"vals\": [1, 2, 3],\n }\n )\n g = df.groupby(\"cat\", observed=observed)\n result = g.groups\n if observed:\n expected = {\"a\": Index([0, 2], dtype=\"int64\")}\n else:\n expected = {\n \"a\": Index([0, 2], dtype=\"int64\"),\n \"b\": Index([], dtype=\"int64\"),\n \"d\": Index([], dtype=\"int64\"),\n }\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_nth():\n # GH 26385\n cat = pd.Categorical([\"a\", np.nan, np.nan], categories=[\"a\", \"b\", \"c\"])\n ser = pd.Series([1, 2, 3])\n df = pd.DataFrame({\"cat\": cat, \"ser\": ser})\n\n result = df.groupby(\"cat\", observed=False)[\"ser\"].nth(0)\n\n index = pd.Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"])\n expected = pd.Series([1, np.nan, np.nan], index=index, name=\"ser\")\n expected.index.name = \"cat\"\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_dataframe_categorical_with_nan(observed):\n # GH 21151\n s1 = Categorical([np.nan, \"a\", np.nan, \"a\"], categories=[\"a\", \"b\", \"c\"])\n s2 = Series([1, 2, 3, 4])\n df = DataFrame({\"s1\": s1, \"s2\": s2})\n result = df.groupby(\"s1\", observed=observed).first().reset_index()\n if observed:\n expected = DataFrame(\n {\"s1\": Categorical([\"a\"], categories=[\"a\", \"b\", \"c\"]), \"s2\": [2]}\n )\n else:\n expected = DataFrame(\n {\n \"s1\": Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"]),\n \"s2\": [2, np.nan, np.nan],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"ordered\", [True, False])\[email protected](\"observed\", [True, False])\[email protected](\"sort\", [True, False])\ndef test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):\n # GH 25871: Fix groupby sorting on ordered Categoricals\n # GH 25167: Groupby with observed=True doesn't sort\n\n # Build a dataframe with cat having one unobserved category ('missing'),\n # and a Series with identical values\n label = Categorical(\n [\"d\", \"a\", \"b\", \"a\", \"d\", \"b\"],\n categories=[\"a\", \"b\", \"missing\", \"d\"],\n ordered=ordered,\n )\n val = Series([\"d\", \"a\", \"b\", \"a\", \"d\", \"b\"])\n df = DataFrame({\"label\": label, \"val\": val})\n\n # aggregate on the Categorical\n result = df.groupby(\"label\", observed=observed, sort=sort)[\"val\"].aggregate(\"first\")\n\n # If ordering works, we expect index labels equal to aggregation results,\n # except for 'observed=False': label 'missing' has aggregation None\n label = Series(result.index.array, dtype=\"object\")\n aggr = Series(result.array)\n if not observed:\n aggr[aggr.isna()] = \"missing\"\n if not all(label == aggr):\n msg = (\n \"Labels and aggregation results not consistently sorted\\n\"\n + \"for (ordered={}, observed={}, sort={})\\n\"\n + \"Result:\\n{}\"\n ).format(ordered, observed, sort, result)\n assert False, msg\n\n\ndef test_datetime():\n # GH9049: ensure backward compatibility\n levels = pd.date_range(\"2014-01-01\", periods=4)\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n expected = expected.reindex(levels)\n expected.index = CategoricalIndex(\n expected.index, categories=expected.index, ordered=True\n )\n\n assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = cats.take_nd(idx)\n ord_data = data.take(idx)\n expected = ord_data.groupby(ord_labels, observed=False).describe()\n assert_frame_equal(desc_result, expected)\n tm.assert_index_equal(desc_result.index, expected.index)\n tm.assert_index_equal(\n desc_result.index.get_level_values(0), expected.index.get_level_values(0)\n )\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)\n exp = Index([\"count\", \"mean\", \"std\", \"min\", \"25%\", \"50%\", \"75%\", \"max\"] * 4)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)\n\n\ndef test_categorical_index():\n\n s = np.random.RandomState(12345)\n levels = [\"foo\", \"bar\", \"baz\", \"qux\"]\n codes = s.randint(0, 4, size=20)\n cats = Categorical.from_codes(codes, levels, ordered=True)\n df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list(\"abcd\"))\n df[\"cats\"] = cats\n\n # with a cat index\n result = df.set_index(\"cats\").groupby(level=0, observed=False).sum()\n expected = df[list(\"abcd\")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name=\"cats\"\n )\n assert_frame_equal(result, expected)\n\n # with a cat column, should produce a cat index\n result = df.groupby(\"cats\", observed=False).sum()\n expected = df[list(\"abcd\")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name=\"cats\"\n )\n assert_frame_equal(result, expected)\n\n\ndef test_describe_categorical_columns():\n # GH 11558\n cats = CategoricalIndex(\n [\"qux\", \"foo\", \"baz\", \"bar\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\"],\n ordered=True,\n )\n df = DataFrame(np.random.randn(20, 4), columns=cats)\n result = df.groupby([1, 2, 3, 4] * 5).describe()\n\n tm.assert_index_equal(result.stack().columns, cats)\n tm.assert_categorical_equal(result.stack().columns.values, cats.values)\n\n\ndef test_unstack_categorical():\n # GH11558 (example is taken from the original issue)\n df = DataFrame(\n {\"a\": range(10), \"medium\": [\"A\", \"B\"] * 5, \"artist\": list(\"XYXXY\") * 2}\n )\n df[\"medium\"] = df[\"medium\"].astype(\"category\")\n\n gcat = df.groupby([\"artist\", \"medium\"], observed=False)[\"a\"].count().unstack()\n result = gcat.describe()\n\n exp_columns = CategoricalIndex([\"A\", \"B\"], ordered=False, name=\"medium\")\n tm.assert_index_equal(result.columns, exp_columns)\n tm.assert_categorical_equal(result.columns.values, exp_columns.values)\n\n result = gcat[\"A\"] + gcat[\"B\"]\n expected = Series([6, 4], index=Index([\"X\", \"Y\"], name=\"artist\"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_bins_unequal_len():\n # GH3011\n series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n bins = pd.cut(series.dropna().values, 4)\n\n # len(bins) != len(series) here\n with pytest.raises(ValueError):\n series.groupby(bins).mean()\n\n\ndef test_as_index():\n # GH13204\n df = DataFrame(\n {\n \"cat\": Categorical([1, 2, 2], [1, 2, 3]),\n \"A\": [10, 11, 11],\n \"B\": [101, 102, 103],\n }\n )\n result = df.groupby([\"cat\", \"A\"], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 11],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # function grouper\n f = lambda r: df.loc[r, \"A\"]\n result = df.groupby([\"cat\", f], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 22],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # another not in-axis grouper (conflicting names in index)\n s = Series([\"a\", \"b\", \"b\"], name=\"cat\")\n result = df.groupby([\"cat\", s], as_index=False, observed=True).sum()\n tm.assert_frame_equal(result, expected)\n\n # is original index dropped?\n group_columns = [\"cat\", \"A\"]\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 11],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n\n for name in [None, \"X\", \"B\"]:\n df.index = Index(list(\"abc\"), name=name)\n result = df.groupby(group_columns, as_index=False, observed=True).sum()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_preserve_categories():\n # GH-13179\n categories = list(\"abc\")\n\n # ordered=True\n df = DataFrame({\"A\": Categorical(list(\"ba\"), categories=categories, ordered=True)})\n index = CategoricalIndex(categories, categories, ordered=True)\n tm.assert_index_equal(\n df.groupby(\"A\", sort=True, observed=False).first().index, index\n )\n tm.assert_index_equal(\n df.groupby(\"A\", sort=False, observed=False).first().index, index\n )\n\n # ordered=False\n df = DataFrame({\"A\": Categorical(list(\"ba\"), categories=categories, ordered=False)})\n sort_index = CategoricalIndex(categories, categories, ordered=False)\n nosort_index = CategoricalIndex(list(\"bac\"), list(\"bac\"), ordered=False)\n tm.assert_index_equal(\n df.groupby(\"A\", sort=True, observed=False).first().index, sort_index\n )\n tm.assert_index_equal(\n df.groupby(\"A\", sort=False, observed=False).first().index, nosort_index\n )\n\n\ndef test_preserve_categorical_dtype():\n # GH13743, GH13854\n df = DataFrame(\n {\n \"A\": [1, 2, 1, 1, 2],\n \"B\": [10, 16, 22, 28, 34],\n \"C1\": Categorical(list(\"abaab\"), categories=list(\"bac\"), ordered=False),\n \"C2\": Categorical(list(\"abaab\"), categories=list(\"bac\"), ordered=True),\n }\n )\n # single grouper\n exp_full = DataFrame(\n {\n \"A\": [2.0, 1.0, np.nan],\n \"B\": [25.0, 20.0, np.nan],\n \"C1\": Categorical(list(\"bac\"), categories=list(\"bac\"), ordered=False),\n \"C2\": Categorical(list(\"bac\"), categories=list(\"bac\"), ordered=True),\n }\n )\n for col in [\"C1\", \"C2\"]:\n result1 = df.groupby(by=col, as_index=False, observed=False).mean()\n result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()\n expected = exp_full.reindex(columns=result1.columns)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n\n\[email protected](\n \"func, values\",\n [\n (\"first\", [\"second\", \"first\"]),\n (\"last\", [\"fourth\", \"third\"]),\n (\"min\", [\"fourth\", \"first\"]),\n (\"max\", [\"second\", \"third\"]),\n ],\n)\ndef test_preserve_on_ordered_ops(func, values):\n # gh-18502\n # preserve the categoricals on ops\n c = pd.Categorical([\"first\", \"second\", \"third\", \"fourth\"], ordered=True)\n df = pd.DataFrame({\"payload\": [-1, -2, -1, -2], \"col\": c})\n g = df.groupby(\"payload\")\n result = getattr(g, func)()\n expected = pd.DataFrame(\n {\"payload\": [-2, -1], \"col\": pd.Series(values, dtype=c.dtype)}\n ).set_index(\"payload\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_no_compress():\n data = Series(np.random.randn(9))\n\n codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean()\n\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n assert_series_equal(result, exp)\n\n codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])\n cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n assert_series_equal(result, exp)\n\n cats = Categorical(\n [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"],\n ordered=True,\n )\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n result = data.groupby(\"b\", observed=False).mean()\n result = result[\"a\"].values\n exp = np.array([1, 2, 4, np.nan])\n tm.assert_numpy_array_equal(result, exp)\n\n\ndef test_sort():\n\n # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501\n # This should result in a properly sorted Series so that the plot\n # has a sorted x axis\n # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')\n\n df = DataFrame({\"value\": np.random.randint(0, 10000, 100)})\n labels = [\"{0} - {1}\".format(i, i + 499) for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=[\"value\"], ascending=True)\n df[\"value_group\"] = pd.cut(\n df.value, range(0, 10500, 500), right=False, labels=cat_labels\n )\n\n res = df.groupby([\"value_group\"], observed=False)[\"value_group\"].count()\n exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]\n exp.index = CategoricalIndex(exp.index, name=exp.index.name)\n tm.assert_series_equal(res, exp)\n\n\ndef test_sort2():\n # dataframe groupby sort was being ignored # GH 8868\n df = DataFrame(\n [\n [\"(7.5, 10]\", 10, 10],\n [\"(7.5, 10]\", 8, 20],\n [\"(2.5, 5]\", 5, 30],\n [\"(5, 7.5]\", 6, 40],\n [\"(2.5, 5]\", 4, 50],\n [\"(0, 2.5]\", 1, 60],\n [\"(5, 7.5]\", 7, 70],\n ],\n columns=[\"range\", \"foo\", \"bar\"],\n )\n df[\"range\"] = Categorical(df[\"range\"], ordered=True)\n index = CategoricalIndex(\n [\"(0, 2.5]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(7.5, 10]\"], name=\"range\", ordered=True\n )\n expected_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"], index=index\n )\n\n col = \"range\"\n result_sort = df.groupby(col, sort=True, observed=False).first()\n assert_frame_equal(result_sort, expected_sort)\n\n # when categories is ordered, group is ordered by category's order\n expected_sort = result_sort\n result_sort = df.groupby(col, sort=False, observed=False).first()\n assert_frame_equal(result_sort, expected_sort)\n\n df[\"range\"] = Categorical(df[\"range\"], ordered=False)\n index = CategoricalIndex(\n [\"(0, 2.5]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(7.5, 10]\"], name=\"range\"\n )\n expected_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"], index=index\n )\n\n index = CategoricalIndex(\n [\"(7.5, 10]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(0, 2.5]\"],\n categories=[\"(7.5, 10]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(0, 2.5]\"],\n name=\"range\",\n )\n expected_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=[\"foo\", \"bar\"]\n )\n\n col = \"range\"\n\n # this is an unordered categorical, but we allow this ####\n result_sort = df.groupby(col, sort=True, observed=False).first()\n assert_frame_equal(result_sort, expected_sort)\n\n result_nosort = df.groupby(col, sort=False, observed=False).first()\n assert_frame_equal(result_nosort, expected_nosort)\n\n\ndef test_sort_datetimelike():\n # GH10505\n\n # use same data as test_groupby_sort_categorical, which category is\n # corresponding to datetime.month\n df = DataFrame(\n {\n \"dt\": [\n datetime(2011, 7, 1),\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 2, 1),\n datetime(2011, 1, 1),\n datetime(2011, 5, 1),\n ],\n \"foo\": [10, 8, 5, 6, 4, 1, 7],\n \"bar\": [10, 20, 30, 40, 50, 60, 70],\n },\n columns=[\"dt\", \"foo\", \"bar\"],\n )\n\n # ordered=True\n df[\"dt\"] = Categorical(df[\"dt\"], ordered=True)\n index = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"]\n )\n result_sort.index = CategoricalIndex(index, name=\"dt\", ordered=True)\n\n index = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n result_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], columns=[\"foo\", \"bar\"]\n )\n result_nosort.index = CategoricalIndex(\n index, categories=index, name=\"dt\", ordered=True\n )\n\n col = \"dt\"\n assert_frame_equal(result_sort, df.groupby(col, sort=True, observed=False).first())\n\n # when categories is ordered, group is ordered by category's order\n assert_frame_equal(result_sort, df.groupby(col, sort=False, observed=False).first())\n\n # ordered = False\n df[\"dt\"] = Categorical(df[\"dt\"], ordered=False)\n index = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"]\n )\n result_sort.index = CategoricalIndex(index, name=\"dt\")\n\n index = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n result_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], columns=[\"foo\", \"bar\"]\n )\n result_nosort.index = CategoricalIndex(index, categories=index, name=\"dt\")\n\n col = \"dt\"\n assert_frame_equal(result_sort, df.groupby(col, sort=True, observed=False).first())\n assert_frame_equal(\n result_nosort, df.groupby(col, sort=False, observed=False).first()\n )\n\n\ndef test_empty_sum():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {\"A\": Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]), \"B\": [1, 2, 1]}\n )\n expected_idx = CategoricalIndex([\"a\", \"b\", \"c\"], name=\"A\")\n\n # 0 by default\n result = df.groupby(\"A\", observed=False).B.sum()\n expected = Series([3, 1, 0], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.sum(min_count=0)\n expected = Series([3, 1, 0], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=1)\n expected = Series([3, 1, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count>1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=2)\n expected = Series([3, np.nan, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_prod():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {\"A\": Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]), \"B\": [1, 2, 1]}\n )\n\n expected_idx = CategoricalIndex([\"a\", \"b\", \"c\"], name=\"A\")\n\n # 1 by default\n result = df.groupby(\"A\", observed=False).B.prod()\n expected = Series([2, 1, 1], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.prod(min_count=0)\n expected = Series([2, 1, 1], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.prod(min_count=1)\n expected = Series([2, 1, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_multiindex_categorical_datetime():\n # https://github.com/pandas-dev/pandas/issues/21390\n\n df = DataFrame(\n {\n \"key1\": Categorical(list(\"abcbabcba\")),\n \"key2\": Categorical(\n list(pd.date_range(\"2018-06-01 00\", freq=\"1T\", periods=3)) * 3\n ),\n \"values\": np.arange(9),\n }\n )\n result = df.groupby([\"key1\", \"key2\"]).mean()\n\n idx = MultiIndex.from_product(\n [\n Categorical([\"a\", \"b\", \"c\"]),\n Categorical(pd.date_range(\"2018-06-01 00\", freq=\"1T\", periods=3)),\n ],\n names=[\"key1\", \"key2\"],\n )\n expected = DataFrame({\"values\": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)\n assert_frame_equal(result, expected)\n\n\[email protected](\n \"as_index, expected\",\n [\n (\n True,\n Series(\n index=MultiIndex.from_arrays(\n [Series([1, 1, 2], dtype=\"category\"), [1, 2, 2]], names=[\"a\", \"b\"]\n ),\n data=[1, 2, 3],\n name=\"x\",\n ),\n ),\n (\n False,\n DataFrame(\n {\n \"a\": Series([1, 1, 2], dtype=\"category\"),\n \"b\": [1, 2, 2],\n \"x\": [1, 2, 3],\n }\n ),\n ),\n ],\n)\ndef test_groupby_agg_observed_true_single_column(as_index, expected):\n # GH-23970\n df = DataFrame(\n {\"a\": Series([1, 1, 2], dtype=\"category\"), \"b\": [1, 2, 2], \"x\": [1, 2, 3]}\n )\n\n result = df.groupby([\"a\", \"b\"], as_index=as_index, observed=True)[\"x\"].sum()\n\n assert_equal(result, expected)\n\n\[email protected](\"fill_value\", [None, np.nan, pd.NaT])\ndef test_shift(fill_value):\n ct = Categorical(\n [\"a\", \"b\", \"c\", \"d\"], categories=[\"a\", \"b\", \"c\", \"d\"], ordered=False\n )\n expected = Categorical(\n [None, \"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\", \"d\"], ordered=False\n )\n res = ct.shift(1, fill_value=fill_value)\n assert_equal(res, expected)\n\n\[email protected]\ndef df_cat(df):\n \"\"\"\n DataFrame with multiple categorical columns and a column of integers.\n Shortened so as not to contain all possible combinations of categories.\n Useful for testing `observed` kwarg functionality on GroupBy objects.\n\n Parameters\n ----------\n df: DataFrame\n Non-categorical, longer DataFrame from another fixture, used to derive\n this one\n\n Returns\n -------\n df_cat: DataFrame\n \"\"\"\n df_cat = df.copy()[:4] # leave out some groups\n df_cat[\"A\"] = df_cat[\"A\"].astype(\"category\")\n df_cat[\"B\"] = df_cat[\"B\"].astype(\"category\")\n df_cat[\"C\"] = Series([1, 2, 3, 4])\n df_cat = df_cat.drop([\"D\"], axis=1)\n return df_cat\n\n\[email protected](\n \"operation, kwargs\", [(\"agg\", dict(dtype=\"category\")), (\"apply\", dict())]\n)\ndef test_seriesgroupby_observed_true(df_cat, operation, kwargs):\n # GH 24880\n index = MultiIndex.from_frame(\n DataFrame(\n {\"A\": [\"foo\", \"foo\", \"bar\", \"bar\"], \"B\": [\"one\", \"two\", \"one\", \"three\"]},\n **kwargs\n )\n )\n expected = Series(data=[1, 3, 2, 4], index=index, name=\"C\")\n grouped = df_cat.groupby([\"A\", \"B\"], observed=True)[\"C\"]\n result = getattr(grouped, operation)(sum)\n assert_series_equal(result, expected)\n\n\[email protected](\"operation\", [\"agg\", \"apply\"])\[email protected](\"observed\", [False, None])\ndef test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):\n # GH 24880\n index, _ = MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n ],\n names=[\"A\", \"B\"],\n ).sortlevel()\n\n expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name=\"C\")\n grouped = df_cat.groupby([\"A\", \"B\"], observed=observed)[\"C\"]\n result = getattr(grouped, operation)(sum)\n assert_series_equal(result, expected)\n\n\[email protected](\n \"observed, index, data\",\n [\n (\n True,\n MultiIndex.from_tuples(\n [\n (\"foo\", \"one\", \"min\"),\n (\"foo\", \"one\", \"max\"),\n (\"foo\", \"two\", \"min\"),\n (\"foo\", \"two\", \"max\"),\n (\"bar\", \"one\", \"min\"),\n (\"bar\", \"one\", \"max\"),\n (\"bar\", \"three\", \"min\"),\n (\"bar\", \"three\", \"max\"),\n ],\n names=[\"A\", \"B\", None],\n ),\n [1, 1, 3, 3, 2, 2, 4, 4],\n ),\n (\n False,\n MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n Index([\"min\", \"max\"]),\n ],\n names=[\"A\", \"B\", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n (\n None,\n MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n Index([\"min\", \"max\"]),\n ],\n names=[\"A\", \"B\", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n ],\n)\ndef test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):\n # GH 24880\n expected = Series(data=data, index=index, name=\"C\")\n result = df_cat.groupby([\"A\", \"B\"], observed=observed)[\"C\"].apply(\n lambda x: OrderedDict([(\"min\", x.min()), (\"max\", x.max())])\n )\n assert_series_equal(result, expected)\n",
"#******************************************************************************\n# Copyright (C) 2013 Kenneth L. Ho\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer. Redistributions in binary\n# form must reproduce the above copyright notice, this list of conditions and\n# the following disclaimer in the documentation and/or other materials\n# provided with the distribution.\n#\n# None of the names of the copyright holders may be used to endorse or\n# promote products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#******************************************************************************\n\n# Python module for interfacing with `id_dist`.\n\nr\"\"\"\n======================================================================\nInterpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)\n======================================================================\n\n.. moduleauthor:: Kenneth L. Ho <[email protected]>\n\n.. versionadded:: 0.13\n\n.. currentmodule:: scipy.linalg.interpolative\n\nAn interpolative decomposition (ID) of a matrix :math:`A \\in\n\\mathbb{C}^{m \\times n}` of rank :math:`k \\leq \\min \\{ m, n \\}` is a\nfactorization\n\n.. math::\n A \\Pi =\n \\begin{bmatrix}\n A \\Pi_{1} & A \\Pi_{2}\n \\end{bmatrix} =\n A \\Pi_{1}\n \\begin{bmatrix}\n I & T\n \\end{bmatrix},\n\nwhere :math:`\\Pi = [\\Pi_{1}, \\Pi_{2}]` is a permutation matrix with\n:math:`\\Pi_{1} \\in \\{ 0, 1 \\}^{n \\times k}`, i.e., :math:`A \\Pi_{2} =\nA \\Pi_{1} T`. This can equivalently be written as :math:`A = BP`,\nwhere :math:`B = A \\Pi_{1}` and :math:`P = [I, T] \\Pi^{\\mathsf{T}}`\nare the *skeleton* and *interpolation matrices*, respectively.\n\nIf :math:`A` does not have exact rank :math:`k`, then there exists an\napproximation in the form of an ID such that :math:`A = BP + E`, where\n:math:`\\| E \\| \\sim \\sigma_{k + 1}` is on the order of the :math:`(k +\n1)`-th largest singular value of :math:`A`. Note that :math:`\\sigma_{k\n+ 1}` is the best possible error for a rank-:math:`k` approximation\nand, in fact, is achieved by the singular value decomposition (SVD)\n:math:`A \\approx U S V^{*}`, where :math:`U \\in \\mathbb{C}^{m \\times\nk}` and :math:`V \\in \\mathbb{C}^{n \\times k}` have orthonormal columns\nand :math:`S = \\mathop{\\mathrm{diag}} (\\sigma_{i}) \\in \\mathbb{C}^{k\n\\times k}` is diagonal with nonnegative entries. The principal\nadvantages of using an ID over an SVD are that:\n\n- it is cheaper to construct;\n- it preserves the structure of :math:`A`; and\n- it is more efficient to compute with in light of the identity submatrix of :math:`P`.\n\nRoutines\n========\n\nMain functionality:\n\n.. autosummary::\n :toctree: generated/\n\n interp_decomp\n reconstruct_matrix_from_id\n reconstruct_interp_matrix\n reconstruct_skel_matrix\n id_to_svd\n svd\n estimate_spectral_norm\n estimate_spectral_norm_diff\n estimate_rank\n\nSupport functions:\n\n.. autosummary::\n :toctree: generated/\n\n seed\n rand\n\n\nReferences\n==========\n\nThis module uses the ID software package [1]_ by Martinsson, Rokhlin,\nShkolnisky, and Tygert, which is a Fortran library for computing IDs\nusing various algorithms, including the rank-revealing QR approach of\n[2]_ and the more recent randomized methods described in [3]_, [4]_,\nand [5]_. This module exposes its functionality in a way convenient\nfor Python users. Note that this module does not add any functionality\nbeyond that of organizing a simpler and more consistent interface.\n\nWe advise the user to consult also the `documentation for the ID package\n<http://tygert.com/id_doc.4.pdf>`_.\n\n.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. \"ID: a\n software package for low-rank approximation of matrices via interpolative\n decompositions, version 0.2.\" http://tygert.com/id_doc.4.pdf.\n\n.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. \"On the\n compression of low rank matrices.\" *SIAM J. Sci. Comput.* 26 (4): 1389--1404,\n 2005. :doi:`10.1137/030602678`.\n\n.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.\n Tygert. \"Randomized algorithms for the low-rank approximation of matrices.\"\n *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.\n :doi:`10.1073/pnas.0709640104`.\n\n.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. \"A randomized\n algorithm for the decomposition of matrices.\" *Appl. Comput. Harmon. Anal.* 30\n (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.\n\n.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. \"A fast\n randomized algorithm for the approximation of matrices.\" *Appl. Comput.\n Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.\n\n\nTutorial\n========\n\nInitializing\n------------\n\nThe first step is to import :mod:`scipy.linalg.interpolative` by issuing the\ncommand:\n\n>>> import scipy.linalg.interpolative as sli\n\nNow let's build a matrix. For this, we consider a Hilbert matrix, which is well\nknow to have low rank:\n\n>>> from scipy.linalg import hilbert\n>>> n = 1000\n>>> A = hilbert(n)\n\nWe can also do this explicitly via:\n\n>>> import numpy as np\n>>> n = 1000\n>>> A = np.empty((n, n), order='F')\n>>> for j in range(n):\n>>> for i in range(m):\n>>> A[i,j] = 1. / (i + j + 1)\n\nNote the use of the flag ``order='F'`` in :func:`numpy.empty`. This\ninstantiates the matrix in Fortran-contiguous order and is important for\navoiding data copying when passing to the backend.\n\nWe then define multiplication routines for the matrix by regarding it as a\n:class:`scipy.sparse.linalg.LinearOperator`:\n\n>>> from scipy.sparse.linalg import aslinearoperator\n>>> L = aslinearoperator(A)\n\nThis automatically sets up methods describing the action of the matrix and its\nadjoint on a vector.\n\nComputing an ID\n---------------\n\nWe have several choices of algorithm to compute an ID. These fall largely\naccording to two dichotomies:\n\n1. how the matrix is represented, i.e., via its entries or via its action on a\n vector; and\n2. whether to approximate it to a fixed relative precision or to a fixed rank.\n\nWe step through each choice in turn below.\n\nIn all cases, the ID is represented by three parameters:\n\n1. a rank ``k``;\n2. an index array ``idx``; and\n3. interpolation coefficients ``proj``.\n\nThe ID is specified by the relation\n``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.\n\nFrom matrix entries\n...................\n\nWe first consider a matrix given in terms of its entries.\n\nTo compute an ID to a fixed precision, type:\n\n>>> k, idx, proj = sli.interp_decomp(A, eps)\n\nwhere ``eps < 1`` is the desired precision.\n\nTo compute an ID to a fixed rank, use:\n\n>>> idx, proj = sli.interp_decomp(A, k)\n\nwhere ``k >= 1`` is the desired rank.\n\nBoth algorithms use random sampling and are usually faster than the\ncorresponding older, deterministic algorithms, which can be accessed via the\ncommands:\n\n>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)\n\nand:\n\n>>> idx, proj = sli.interp_decomp(A, k, rand=False)\n\nrespectively.\n\nFrom matrix action\n..................\n\nNow consider a matrix given in terms of its action on a vector as a\n:class:`scipy.sparse.linalg.LinearOperator`.\n\nTo compute an ID to a fixed precision, type:\n\n>>> k, idx, proj = sli.interp_decomp(L, eps)\n\nTo compute an ID to a fixed rank, use:\n\n>>> idx, proj = sli.interp_decomp(L, k)\n\nThese algorithms are randomized.\n\nReconstructing an ID\n--------------------\n\nThe ID routines above do not output the skeleton and interpolation matrices\nexplicitly but instead return the relevant information in a more compact (and\nsometimes more useful) form. To build these matrices, write:\n\n>>> B = sli.reconstruct_skel_matrix(A, k, idx)\n\nfor the skeleton matrix and:\n\n>>> P = sli.reconstruct_interp_matrix(idx, proj)\n\nfor the interpolation matrix. The ID approximation can then be computed as:\n\n>>> C = np.dot(B, P)\n\nThis can also be constructed directly using:\n\n>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)\n\nwithout having to first compute ``P``.\n\nAlternatively, this can be done explicitly as well using:\n\n>>> B = A[:,idx[:k]]\n>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]\n>>> C = np.dot(B, P)\n\nComputing an SVD\n----------------\n\nAn ID can be converted to an SVD via the command:\n\n>>> U, S, V = sli.id_to_svd(B, idx, proj)\n\nThe SVD approximation is then:\n\n>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))\n\nThe SVD can also be computed \"fresh\" by combining both the ID and conversion\nsteps into one command. Following the various ID algorithms above, there are\ncorrespondingly various SVD algorithms that one can employ.\n\nFrom matrix entries\n...................\n\nWe consider first SVD algorithms for a matrix given in terms of its entries.\n\nTo compute an SVD to a fixed precision, type:\n\n>>> U, S, V = sli.svd(A, eps)\n\nTo compute an SVD to a fixed rank, use:\n\n>>> U, S, V = sli.svd(A, k)\n\nBoth algorithms use random sampling; for the determinstic versions, issue the\nkeyword ``rand=False`` as above.\n\nFrom matrix action\n..................\n\nNow consider a matrix given in terms of its action on a vector.\n\nTo compute an SVD to a fixed precision, type:\n\n>>> U, S, V = sli.svd(L, eps)\n\nTo compute an SVD to a fixed rank, use:\n\n>>> U, S, V = sli.svd(L, k)\n\nUtility routines\n----------------\n\nSeveral utility routines are also available.\n\nTo estimate the spectral norm of a matrix, use:\n\n>>> snorm = sli.estimate_spectral_norm(A)\n\nThis algorithm is based on the randomized power method and thus requires only\nmatrix-vector products. The number of iterations to take can be set using the\nkeyword ``its`` (default: ``its=20``). The matrix is interpreted as a\n:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it\nas a :class:`numpy.ndarray`, in which case it is trivially converted using\n:func:`scipy.sparse.linalg.aslinearoperator`.\n\nThe same algorithm can also estimate the spectral norm of the difference of two\nmatrices ``A1`` and ``A2`` as follows:\n\n>>> diff = sli.estimate_spectral_norm_diff(A1, A2)\n\nThis is often useful for checking the accuracy of a matrix approximation.\n\nSome routines in :mod:`scipy.linalg.interpolative` require estimating the rank\nof a matrix as well. This can be done with either:\n\n>>> k = sli.estimate_rank(A, eps)\n\nor:\n\n>>> k = sli.estimate_rank(L, eps)\n\ndepending on the representation. The parameter ``eps`` controls the definition\nof the numerical rank.\n\nFinally, the random number generation required for all randomized routines can\nbe controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed\nvalues to their original values, use:\n\n>>> sli.seed('default')\n\nTo specify the seed values, use:\n\n>>> sli.seed(s)\n\nwhere ``s`` must be an integer or array of 55 floats. If an integer, the array\nof floats is obtained by using ``numpy.random.rand`` with the given integer\nseed.\n\nTo simply generate some random numbers, type:\n\n>>> sli.rand(n)\n\nwhere ``n`` is the number of random numbers to generate.\n\nRemarks\n-------\n\nThe above functions all automatically detect the appropriate interface and work\nwith both real and complex data types, passing input arguments to the proper\nbackend routine.\n\n\"\"\"\n\nimport scipy.linalg._interpolative_backend as backend\nimport numpy as np\n\n_DTYPE_ERROR = ValueError(\"invalid input dtype (input must be float64 or complex128)\")\n_TYPE_ERROR = TypeError(\"invalid input type (must be array or LinearOperator)\")\n\n\ndef _is_real(A):\n try:\n if A.dtype == np.complex128:\n return False\n elif A.dtype == np.float64:\n return True\n else:\n raise _DTYPE_ERROR\n except AttributeError:\n raise _TYPE_ERROR\n\n\ndef seed(seed=None):\n \"\"\"\n Seed the internal random number generator used in this ID package.\n\n The generator is a lagged Fibonacci method with 55-element internal state.\n\n Parameters\n ----------\n seed : int, sequence, 'default', optional\n If 'default', the random seed is reset to a default value.\n\n If `seed` is a sequence containing 55 floating-point numbers\n in range [0,1], these are used to set the internal state of\n the generator.\n\n If the value is an integer, the internal state is obtained\n from `numpy.random.mtrand.RandomState` (MT19937) with the integer\n used as the initial seed.\n\n If `seed` is omitted (None), ``numpy.random.rand`` is used to\n initialize the generator.\n\n \"\"\"\n # For details, see :func:`backend.id_srand`, :func:`backend.id_srandi`,\n # and :func:`backend.id_srando`.\n\n if isinstance(seed, str) and seed == 'default':\n backend.id_srando()\n elif hasattr(seed, '__len__'):\n state = np.asfortranarray(seed, dtype=float)\n if state.shape != (55,):\n raise ValueError(\"invalid input size\")\n elif state.min() < 0 or state.max() > 1:\n raise ValueError(\"values not in range [0,1]\")\n backend.id_srandi(state)\n elif seed is None:\n backend.id_srandi(np.random.rand(55))\n else:\n rnd = np.random.RandomState(seed)\n backend.id_srandi(rnd.rand(55))\n\n\ndef rand(*shape):\n \"\"\"\n Generate standard uniform pseudorandom numbers via a very efficient lagged\n Fibonacci method.\n\n This routine is used for all random number generation in this package and\n can affect ID and SVD results.\n\n Parameters\n ----------\n shape\n Shape of output array\n\n \"\"\"\n # For details, see :func:`backend.id_srand`, and :func:`backend.id_srando`.\n return backend.id_srand(np.prod(shape)).reshape(shape)\n\n\ndef interp_decomp(A, eps_or_k, rand=True):\n \"\"\"\n Compute ID of a matrix.\n\n An ID of a matrix `A` is a factorization defined by a rank `k`, a column\n index array `idx`, and interpolation coefficients `proj` such that::\n\n numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]\n\n The original matrix can then be reconstructed as::\n\n numpy.hstack([A[:,idx[:k]],\n numpy.dot(A[:,idx[:k]], proj)]\n )[:,numpy.argsort(idx)]\n\n or via the routine :func:`reconstruct_matrix_from_id`. This can\n equivalently be written as::\n\n numpy.dot(A[:,idx[:k]],\n numpy.hstack([numpy.eye(k), proj])\n )[:,np.argsort(idx)]\n\n in terms of the skeleton and interpolation matrices::\n\n B = A[:,idx[:k]]\n\n and::\n\n P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]\n\n respectively. See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n The ID can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then\n this function has the output signature::\n\n k, idx, proj = interp_decomp(A, eps_or_k)\n\n Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output\n signature is::\n\n idx, proj = interp_decomp(A, eps_or_k)\n\n .. This function automatically detects the form of the input parameters\n and passes them to the appropriate backend. For details, see\n :func:`backend.iddp_id`, :func:`backend.iddp_aid`,\n :func:`backend.iddp_rid`, :func:`backend.iddr_id`,\n :func:`backend.iddr_aid`, :func:`backend.iddr_rid`,\n :func:`backend.idzp_id`, :func:`backend.idzp_aid`,\n :func:`backend.idzp_rid`, :func:`backend.idzr_id`,\n :func:`backend.idzr_aid`, and :func:`backend.idzr_rid`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`\n Matrix to be factored\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n k : int\n Rank required to achieve specified relative precision if\n `eps_or_k < 1`.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if eps_or_k < 1:\n eps = eps_or_k\n if rand:\n if real:\n k, idx, proj = backend.iddp_aid(eps, A)\n else:\n k, idx, proj = backend.idzp_aid(eps, A)\n else:\n if real:\n k, idx, proj = backend.iddp_id(eps, A)\n else:\n k, idx, proj = backend.idzp_id(eps, A)\n return k, idx - 1, proj\n else:\n k = int(eps_or_k)\n if rand:\n if real:\n idx, proj = backend.iddr_aid(A, k)\n else:\n idx, proj = backend.idzr_aid(A, k)\n else:\n if real:\n idx, proj = backend.iddr_id(A, k)\n else:\n idx, proj = backend.idzr_id(A, k)\n return idx - 1, proj\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matveca = A.rmatvec\n if eps_or_k < 1:\n eps = eps_or_k\n if real:\n k, idx, proj = backend.iddp_rid(eps, m, n, matveca)\n else:\n k, idx, proj = backend.idzp_rid(eps, m, n, matveca)\n return k, idx - 1, proj\n else:\n k = int(eps_or_k)\n if real:\n idx, proj = backend.iddr_rid(m, n, matveca, k)\n else:\n idx, proj = backend.idzr_rid(m, n, matveca, k)\n return idx - 1, proj\n else:\n raise _TYPE_ERROR\n\n\ndef reconstruct_matrix_from_id(B, idx, proj):\n \"\"\"\n Reconstruct matrix from its ID.\n\n A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`\n and `proj`, respectively, can be reconstructed as::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`backend.idd_reconid` and\n :func:`backend.idz_reconid`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Reconstructed matrix.\n \"\"\"\n if _is_real(B):\n return backend.idd_reconid(B, idx + 1, proj)\n else:\n return backend.idz_reconid(B, idx + 1, proj)\n\n\ndef reconstruct_interp_matrix(idx, proj):\n \"\"\"\n Reconstruct interpolation matrix from ID.\n\n The interpolation matrix can be reconstructed from the ID indices and\n coefficients `idx` and `proj`, respectively, as::\n\n P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]\n\n The original matrix can then be reconstructed from its skeleton matrix `B`\n via::\n\n numpy.dot(B, P)\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`backend.idd_reconint` and\n :func:`backend.idz_reconint`.\n\n Parameters\n ----------\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Interpolation matrix.\n \"\"\"\n if _is_real(proj):\n return backend.idd_reconint(idx + 1, proj)\n else:\n return backend.idz_reconint(idx + 1, proj)\n\n\ndef reconstruct_skel_matrix(A, k, idx):\n \"\"\"\n Reconstruct skeleton matrix from ID.\n\n The skeleton matrix can be reconstructed from the original matrix `A` and its\n ID rank and indices `k` and `idx`, respectively, as::\n\n B = A[:,idx[:k]]\n\n The original matrix can then be reconstructed via::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_interp_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`backend.idd_copycols` and\n :func:`backend.idz_copycols`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray`\n Original matrix.\n k : int\n Rank of ID.\n idx : :class:`numpy.ndarray`\n Column index array.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Skeleton matrix.\n \"\"\"\n if _is_real(A):\n return backend.idd_copycols(A, k, idx + 1)\n else:\n return backend.idz_copycols(A, k, idx + 1)\n\n\ndef id_to_svd(B, idx, proj):\n \"\"\"\n Convert ID to SVD.\n\n The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and\n coefficients `idx` and `proj`, respectively, is::\n\n U, S, V = id_to_svd(B, idx, proj)\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n See also :func:`svd`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`backend.idd_id2svd` and\n :func:`backend.idz_id2svd`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n \"\"\"\n if _is_real(B):\n U, V, S = backend.idd_id2svd(B, idx + 1, proj)\n else:\n U, V, S = backend.idz_id2svd(B, idx + 1, proj)\n return U, S, V\n\n\ndef estimate_spectral_norm(A, its=20):\n \"\"\"\n Estimate spectral norm of a matrix by the randomized power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`backend.idd_snorm` and\n :func:`backend.idz_snorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate.\n \"\"\"\n from scipy.sparse.linalg import aslinearoperator\n A = aslinearoperator(A)\n m, n = A.shape\n matvec = lambda x: A. matvec(x)\n matveca = lambda x: A.rmatvec(x)\n if _is_real(A):\n return backend.idd_snorm(m, n, matveca, matvec, its=its)\n else:\n return backend.idz_snorm(m, n, matveca, matvec, its=its)\n\n\ndef estimate_spectral_norm_diff(A, B, its=20):\n \"\"\"\n Estimate spectral norm of the difference of two matrices by the randomized\n power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`backend.idd_diffsnorm` and\n :func:`backend.idz_diffsnorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n B : :class:`scipy.sparse.linalg.LinearOperator`\n Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with\n the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate of matrix difference.\n \"\"\"\n from scipy.sparse.linalg import aslinearoperator\n A = aslinearoperator(A)\n B = aslinearoperator(B)\n m, n = A.shape\n matvec1 = lambda x: A. matvec(x)\n matveca1 = lambda x: A.rmatvec(x)\n matvec2 = lambda x: B. matvec(x)\n matveca2 = lambda x: B.rmatvec(x)\n if _is_real(A):\n return backend.idd_diffsnorm(\n m, n, matveca1, matveca2, matvec1, matvec2, its=its)\n else:\n return backend.idz_diffsnorm(\n m, n, matveca1, matveca2, matvec1, matvec2, its=its)\n\n\ndef svd(A, eps_or_k, rand=True):\n \"\"\"\n Compute SVD of a matrix via an ID.\n\n An SVD of a matrix `A` is a factorization::\n\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n where `U` and `V` have orthonormal columns and `S` is nonnegative.\n\n The SVD can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`).\n\n See also :func:`interp_decomp` and :func:`id_to_svd`.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details, see\n :func:`backend.iddp_svd`, :func:`backend.iddp_asvd`,\n :func:`backend.iddp_rsvd`, :func:`backend.iddr_svd`,\n :func:`backend.iddr_asvd`, :func:`backend.iddr_rsvd`,\n :func:`backend.idzp_svd`, :func:`backend.idzp_asvd`,\n :func:`backend.idzp_rsvd`, :func:`backend.idzr_svd`,\n :func:`backend.idzr_asvd`, and :func:`backend.idzr_rsvd`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix to be factored, given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and\n `rmatvec` methods (to apply the matrix and its adjoint).\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if eps_or_k < 1:\n eps = eps_or_k\n if rand:\n if real:\n U, V, S = backend.iddp_asvd(eps, A)\n else:\n U, V, S = backend.idzp_asvd(eps, A)\n else:\n if real:\n U, V, S = backend.iddp_svd(eps, A)\n else:\n U, V, S = backend.idzp_svd(eps, A)\n else:\n k = int(eps_or_k)\n if k > min(A.shape):\n raise ValueError(\"Approximation rank %s exceeds min(A.shape) = \"\n \" %s \" % (k, min(A.shape)))\n if rand:\n if real:\n U, V, S = backend.iddr_asvd(A, k)\n else:\n U, V, S = backend.idzr_asvd(A, k)\n else:\n if real:\n U, V, S = backend.iddr_svd(A, k)\n else:\n U, V, S = backend.idzr_svd(A, k)\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matvec = lambda x: A.matvec(x)\n matveca = lambda x: A.rmatvec(x)\n if eps_or_k < 1:\n eps = eps_or_k\n if real:\n U, V, S = backend.iddp_rsvd(eps, m, n, matveca, matvec)\n else:\n U, V, S = backend.idzp_rsvd(eps, m, n, matveca, matvec)\n else:\n k = int(eps_or_k)\n if real:\n U, V, S = backend.iddr_rsvd(m, n, matveca, matvec, k)\n else:\n U, V, S = backend.idzr_rsvd(m, n, matveca, matvec, k)\n else:\n raise _TYPE_ERROR\n return U, S, V\n\n\ndef estimate_rank(A, eps):\n \"\"\"\n Estimate matrix rank to a specified relative precision using randomized\n methods.\n\n The matrix `A` can be given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used\n for each case. If `A` is of type :class:`numpy.ndarray`, then the output\n rank is typically about 8 higher than the actual numerical rank.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details,\n see :func:`backend.idd_estrank`, :func:`backend.idd_findrank`,\n :func:`backend.idz_estrank`, and :func:`backend.idz_findrank`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix whose rank is to be estimated, given as either a\n :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`\n with the `rmatvec` method (to apply the matrix adjoint).\n eps : float\n Relative error for numerical rank definition.\n\n Returns\n -------\n int\n Estimated matrix rank.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if real:\n rank = backend.idd_estrank(eps, A)\n else:\n rank = backend.idz_estrank(eps, A)\n if rank == 0:\n # special return value for nearly full rank\n rank = min(A.shape)\n return rank\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matveca = A.rmatvec\n if real:\n return backend.idd_findrank(eps, m, n, matveca)\n else:\n return backend.idz_findrank(eps, m, n, matveca)\n else:\n raise _TYPE_ERROR\n",
"\"\"\"Interpolation algorithms using piecewise cubic polynomials.\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom scipy._lib.six import string_types\n\nfrom . import BPoly, PPoly\nfrom .polyint import _isscalar\nfrom scipy._lib._util import _asarray_validated\nfrom scipy.linalg import solve_banded, solve\n\n\n__all__ = [\"CubicHermiteSpline\", \"PchipInterpolator\", \"pchip_interpolate\",\n \"Akima1DInterpolator\", \"CubicSpline\"]\n\n\ndef prepare_input(x, y, axis, dydx=None):\n \"\"\"Prepare input for cubic spline interpolators.\n\n All data are converted to numpy arrays and checked for correctness.\n Axes equal to `axis` of arrays `y` and `dydx` are rolled to be the 0-th\n axis. The value of `axis` is converted to lie in\n [0, number of dimensions of `y`).\n \"\"\"\n\n x, y = map(np.asarray, (x, y))\n if np.issubdtype(x.dtype, np.complexfloating):\n raise ValueError(\"`x` must contain real values.\")\n x = x.astype(float)\n\n if np.issubdtype(y.dtype, np.complexfloating):\n dtype = complex\n else:\n dtype = float\n\n if dydx is not None:\n dydx = np.asarray(dydx)\n if y.shape != dydx.shape:\n raise ValueError(\"The shapes of `y` and `dydx` must be identical.\")\n if np.issubdtype(dydx.dtype, np.complexfloating):\n dtype = complex\n dydx = dydx.astype(dtype, copy=False)\n\n y = y.astype(dtype, copy=False)\n axis = axis % y.ndim\n if x.ndim != 1:\n raise ValueError(\"`x` must be 1-dimensional.\")\n if x.shape[0] < 2:\n raise ValueError(\"`x` must contain at least 2 elements.\")\n if x.shape[0] != y.shape[axis]:\n raise ValueError(\"The length of `y` along `axis`={0} doesn't \"\n \"match the length of `x`\".format(axis))\n\n if not np.all(np.isfinite(x)):\n raise ValueError(\"`x` must contain only finite values.\")\n if not np.all(np.isfinite(y)):\n raise ValueError(\"`y` must contain only finite values.\")\n\n if dydx is not None and not np.all(np.isfinite(dydx)):\n raise ValueError(\"`dydx` must contain only finite values.\")\n\n dx = np.diff(x)\n if np.any(dx <= 0):\n raise ValueError(\"`x` must be strictly increasing sequence.\")\n\n y = np.rollaxis(y, axis)\n if dydx is not None:\n dydx = np.rollaxis(dydx, axis)\n\n return x, dx, y, axis, dydx\n\n\nclass CubicHermiteSpline(PPoly):\n \"\"\"Piecewise-cubic interpolator matching values and first derivatives.\n\n The result is represented as a `PPoly` instance.\n\n Parameters\n ----------\n x : array_like, shape (n,)\n 1-d array containing values of the independent variable.\n Values must be real, finite and in strictly increasing order.\n y : array_like\n Array containing values of the dependent variable. It can have\n arbitrary number of dimensions, but the length along ``axis``\n (see below) must match the length of ``x``. Values must be finite.\n dydx : array_like\n Array containing derivatives of the dependent variable. It can have\n arbitrary number of dimensions, but the length along ``axis``\n (see below) must match the length of ``x``. Values must be finite.\n axis : int, optional\n Axis along which `y` is assumed to be varying. Meaning that for\n ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.\n Default is 0.\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs. If 'periodic',\n periodic extrapolation is used. If None (default), it is set to True.\n\n Attributes\n ----------\n x : ndarray, shape (n,)\n Breakpoints. The same ``x`` which was passed to the constructor.\n c : ndarray, shape (4, n-1, ...)\n Coefficients of the polynomials on each segment. The trailing\n dimensions match the dimensions of `y`, excluding ``axis``.\n For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for\n ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.\n axis : int\n Interpolation axis. The same axis which was passed to the\n constructor.\n\n Methods\n -------\n __call__\n derivative\n antiderivative\n integrate\n roots\n\n See Also\n --------\n Akima1DInterpolator\n PchipInterpolator\n CubicSpline\n PPoly\n\n Notes\n -----\n If you want to create a higher-order spline matching higher-order\n derivatives, use `BPoly.from_derivatives`.\n\n References\n ----------\n .. [1] `Cubic Hermite spline\n <https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_\n on Wikipedia.\n \"\"\"\n def __init__(self, x, y, dydx, axis=0, extrapolate=None):\n if extrapolate is None:\n extrapolate = True\n\n x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)\n\n dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))\n slope = np.diff(y, axis=0) / dxr\n t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr\n\n c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)\n c[0] = t / dxr\n c[1] = (slope - dydx[:-1]) / dxr - t\n c[2] = dydx[:-1]\n c[3] = y[:-1]\n\n super(CubicHermiteSpline, self).__init__(c, x, extrapolate=extrapolate)\n self.axis = axis\n\n\nclass PchipInterpolator(CubicHermiteSpline):\n r\"\"\"PCHIP 1-d monotonic cubic interpolation.\n\n ``x`` and ``y`` are arrays of values used to approximate some function f,\n with ``y = f(x)``. The interpolant uses monotonic cubic splines\n to find the value of new points. (PCHIP stands for Piecewise Cubic\n Hermite Interpolating Polynomial).\n\n Parameters\n ----------\n x : ndarray\n A 1-D array of monotonically increasing real values. ``x`` cannot\n include duplicate values (otherwise f is overspecified)\n y : ndarray\n A 1-D array of real values. ``y``'s length along the interpolation\n axis must be equal to the length of ``x``. If N-D array, use ``axis``\n parameter to select correct axis.\n axis : int, optional\n Axis in the y array corresponding to the x-coordinate values.\n extrapolate : bool, optional\n Whether to extrapolate to out-of-bounds points based on first\n and last intervals, or to return NaNs.\n\n Methods\n -------\n __call__\n derivative\n antiderivative\n roots\n\n See Also\n --------\n CubicHermiteSpline\n Akima1DInterpolator\n CubicSpline\n PPoly\n\n Notes\n -----\n The interpolator preserves monotonicity in the interpolation data and does\n not overshoot if the data is not smooth.\n\n The first derivatives are guaranteed to be continuous, but the second\n derivatives may jump at :math:`x_k`.\n\n Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,\n by using PCHIP algorithm [1]_.\n\n Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`\n are the slopes at internal points :math:`x_k`.\n If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of\n them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the\n weighted harmonic mean\n\n .. math::\n\n \\frac{w_1 + w_2}{f'_k} = \\frac{w_1}{d_{k-1}} + \\frac{w_2}{d_k}\n\n where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.\n\n The end slopes are set using a one-sided scheme [2]_.\n\n\n References\n ----------\n .. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,\n SIAM J. Numer. Anal., 17(2), 238 (1980).\n :doi:`10.1137/0717021`.\n .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.\n :doi:`10.1137/1.9780898717952`\n\n\n \"\"\"\n def __init__(self, x, y, axis=0, extrapolate=None):\n x, _, y, axis, _ = prepare_input(x, y, axis)\n xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))\n dk = self._find_derivatives(xp, y)\n super(PchipInterpolator, self).__init__(x, y, dk, axis=0,\n extrapolate=extrapolate)\n self.axis = axis\n\n @staticmethod\n def _edge_case(h0, h1, m0, m1):\n # one-sided three-point estimate for the derivative\n d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)\n\n # try to preserve shape\n mask = np.sign(d) != np.sign(m0)\n mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))\n mmm = (~mask) & mask2\n\n d[mask] = 0.\n d[mmm] = 3.*m0[mmm]\n\n return d\n\n @staticmethod\n def _find_derivatives(x, y):\n # Determine the derivatives at the points y_k, d_k, by using\n # PCHIP algorithm is:\n # We choose the derivatives at the point x_k by\n # Let m_k be the slope of the kth segment (between k and k+1)\n # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0\n # else use weighted harmonic mean:\n # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}\n # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})\n # where h_k is the spacing between x_k and x_{k+1}\n y_shape = y.shape\n if y.ndim == 1:\n # So that _edge_case doesn't end up assigning to scalars\n x = x[:, None]\n y = y[:, None]\n\n hk = x[1:] - x[:-1]\n mk = (y[1:] - y[:-1]) / hk\n\n if y.shape[0] == 2:\n # edge case: only have two points, use linear interpolation\n dk = np.zeros_like(y)\n dk[0] = mk\n dk[1] = mk\n return dk.reshape(y_shape)\n\n smk = np.sign(mk)\n condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)\n\n w1 = 2*hk[1:] + hk[:-1]\n w2 = hk[1:] + 2*hk[:-1]\n\n # values where division by zero occurs will be excluded\n # by 'condition' afterwards\n with np.errstate(divide='ignore'):\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\n\n dk = np.zeros_like(y)\n dk[1:-1][condition] = 0.0\n dk[1:-1][~condition] = 1.0 / whmean[~condition]\n\n # special case endpoints, as suggested in\n # Cleve Moler, Numerical Computing with MATLAB, Chap 3.4\n dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])\n dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])\n\n return dk.reshape(y_shape)\n\n\ndef pchip_interpolate(xi, yi, x, der=0, axis=0):\n \"\"\"\n Convenience function for pchip interpolation.\n\n xi and yi are arrays of values used to approximate some function f,\n with ``yi = f(xi)``. The interpolant uses monotonic cubic splines\n to find the value of new points x and the derivatives there.\n\n See `scipy.interpolate.PchipInterpolator` for details.\n\n Parameters\n ----------\n xi : array_like\n A sorted list of x-coordinates, of length N.\n yi : array_like\n A 1-D array of real values. `yi`'s length along the interpolation\n axis must be equal to the length of `xi`. If N-D array, use axis\n parameter to select correct axis.\n x : scalar or array_like\n Of length M.\n der : int or list, optional\n Derivatives to extract. The 0-th derivative can be included to\n return the function value.\n axis : int, optional\n Axis in the yi array corresponding to the x-coordinate values.\n\n See Also\n --------\n PchipInterpolator\n\n Returns\n -------\n y : scalar or array_like\n The result, of length R or length M or M by R,\n\n \"\"\"\n P = PchipInterpolator(xi, yi, axis=axis)\n\n if der == 0:\n return P(x)\n elif _isscalar(der):\n return P.derivative(der)(x)\n else:\n return [P.derivative(nu)(x) for nu in der]\n\n\nclass Akima1DInterpolator(CubicHermiteSpline):\n \"\"\"\n Akima interpolator\n\n Fit piecewise cubic polynomials, given vectors x and y. The interpolation\n method by Akima uses a continuously differentiable sub-spline built from\n piecewise cubic polynomials. The resultant curve passes through the given\n data points and will appear smooth and natural.\n\n Parameters\n ----------\n x : ndarray, shape (m, )\n 1-D array of monotonically increasing real values.\n y : ndarray, shape (m, ...)\n N-D array of real values. The length of ``y`` along the first axis\n must be equal to the length of ``x``.\n axis : int, optional\n Specifies the axis of ``y`` along which to interpolate. Interpolation\n defaults to the first axis of ``y``.\n\n Methods\n -------\n __call__\n derivative\n antiderivative\n roots\n\n See Also\n --------\n PchipInterpolator\n CubicSpline\n PPoly\n\n Notes\n -----\n .. versionadded:: 0.14\n\n Use only for precise data, as the fitted curve passes through the given\n points exactly. This routine is useful for plotting a pleasingly smooth\n curve through a few given points for purposes of plotting.\n\n References\n ----------\n [1] A new method of interpolation and smooth curve fitting based\n on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),\n 589-602.\n\n \"\"\"\n\n def __init__(self, x, y, axis=0):\n # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see\n # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation\n x, dx, y, axis, _ = prepare_input(x, y, axis)\n # determine slopes between breakpoints\n m = np.empty((x.size + 3, ) + y.shape[1:])\n dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]\n m[2:-2] = np.diff(y, axis=0) / dx\n\n # add two additional points on the left ...\n m[1] = 2. * m[2] - m[3]\n m[0] = 2. * m[1] - m[2]\n # ... and on the right\n m[-2] = 2. * m[-3] - m[-4]\n m[-1] = 2. * m[-2] - m[-3]\n\n # if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.\n # This is the fill value:\n t = .5 * (m[3:] + m[:-3])\n # get the denominator of the slope t\n dm = np.abs(np.diff(m, axis=0))\n f1 = dm[2:]\n f2 = dm[:-2]\n f12 = f1 + f2\n # These are the mask of where the the slope at breakpoint is defined:\n ind = np.nonzero(f12 > 1e-9 * np.max(f12))\n x_ind, y_ind = ind[0], ind[1:]\n # Set the slope at breakpoint\n t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +\n f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]\n\n super(Akima1DInterpolator, self).__init__(x, y, t, axis=0,\n extrapolate=False)\n self.axis = axis\n\n def extend(self, c, x, right=True):\n raise NotImplementedError(\"Extending a 1D Akima interpolator is not \"\n \"yet implemented\")\n\n # These are inherited from PPoly, but they do not produce an Akima\n # interpolator. Hence stub them out.\n @classmethod\n def from_spline(cls, tck, extrapolate=None):\n raise NotImplementedError(\"This method does not make sense for \"\n \"an Akima interpolator.\")\n\n @classmethod\n def from_bernstein_basis(cls, bp, extrapolate=None):\n raise NotImplementedError(\"This method does not make sense for \"\n \"an Akima interpolator.\")\n\n\nclass CubicSpline(CubicHermiteSpline):\n \"\"\"Cubic spline data interpolator.\n\n Interpolate data with a piecewise cubic polynomial which is twice\n continuously differentiable [1]_. The result is represented as a `PPoly`\n instance with breakpoints matching the given data.\n\n Parameters\n ----------\n x : array_like, shape (n,)\n 1-d array containing values of the independent variable.\n Values must be real, finite and in strictly increasing order.\n y : array_like\n Array containing values of the dependent variable. It can have\n arbitrary number of dimensions, but the length along ``axis``\n (see below) must match the length of ``x``. Values must be finite.\n axis : int, optional\n Axis along which `y` is assumed to be varying. Meaning that for\n ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.\n Default is 0.\n bc_type : string or 2-tuple, optional\n Boundary condition type. Two additional equations, given by the\n boundary conditions, are required to determine all coefficients of\n polynomials on each segment [2]_.\n\n If `bc_type` is a string, then the specified condition will be applied\n at both ends of a spline. Available conditions are:\n\n * 'not-a-knot' (default): The first and second segment at a curve end\n are the same polynomial. It is a good default when there is no\n information on boundary conditions.\n * 'periodic': The interpolated functions is assumed to be periodic\n of period ``x[-1] - x[0]``. The first and last value of `y` must be\n identical: ``y[0] == y[-1]``. This boundary condition will result in\n ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.\n * 'clamped': The first derivative at curves ends are zero. Assuming\n a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.\n * 'natural': The second derivative at curve ends are zero. Assuming\n a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.\n\n If `bc_type` is a 2-tuple, the first and the second value will be\n applied at the curve start and end respectively. The tuple values can\n be one of the previously mentioned strings (except 'periodic') or a\n tuple `(order, deriv_values)` allowing to specify arbitrary\n derivatives at curve ends:\n\n * `order`: the derivative order, 1 or 2.\n * `deriv_value`: array_like containing derivative values, shape must\n be the same as `y`, excluding ``axis`` dimension. For example, if\n `y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with\n the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D\n and have the shape (n0, n1).\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs. If 'periodic',\n periodic extrapolation is used. If None (default), ``extrapolate`` is\n set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.\n\n Attributes\n ----------\n x : ndarray, shape (n,)\n Breakpoints. The same ``x`` which was passed to the constructor.\n c : ndarray, shape (4, n-1, ...)\n Coefficients of the polynomials on each segment. The trailing\n dimensions match the dimensions of `y`, excluding ``axis``.\n For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for\n ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.\n axis : int\n Interpolation axis. The same axis which was passed to the\n constructor.\n\n Methods\n -------\n __call__\n derivative\n antiderivative\n integrate\n roots\n\n See Also\n --------\n Akima1DInterpolator\n PchipInterpolator\n PPoly\n\n Notes\n -----\n Parameters `bc_type` and ``interpolate`` work independently, i.e. the\n former controls only construction of a spline, and the latter only\n evaluation.\n\n When a boundary condition is 'not-a-knot' and n = 2, it is replaced by\n a condition that the first derivative is equal to the linear interpolant\n slope. When both boundary conditions are 'not-a-knot' and n = 3, the\n solution is sought as a parabola passing through given points.\n\n When 'not-a-knot' boundary conditions is applied to both ends, the\n resulting spline will be the same as returned by `splrep` (with ``s=0``)\n and `InterpolatedUnivariateSpline`, but these two methods use a\n representation in B-spline basis.\n\n .. versionadded:: 0.18.0\n\n Examples\n --------\n In this example the cubic spline is used to interpolate a sampled sinusoid.\n You can see that the spline continuity property holds for the first and\n second derivatives and violates only for the third derivative.\n\n >>> from scipy.interpolate import CubicSpline\n >>> import matplotlib.pyplot as plt\n >>> x = np.arange(10)\n >>> y = np.sin(x)\n >>> cs = CubicSpline(x, y)\n >>> xs = np.arange(-0.5, 9.6, 0.1)\n >>> fig, ax = plt.subplots(figsize=(6.5, 4))\n >>> ax.plot(x, y, 'o', label='data')\n >>> ax.plot(xs, np.sin(xs), label='true')\n >>> ax.plot(xs, cs(xs), label=\"S\")\n >>> ax.plot(xs, cs(xs, 1), label=\"S'\")\n >>> ax.plot(xs, cs(xs, 2), label=\"S''\")\n >>> ax.plot(xs, cs(xs, 3), label=\"S'''\")\n >>> ax.set_xlim(-0.5, 9.5)\n >>> ax.legend(loc='lower left', ncol=2)\n >>> plt.show()\n\n In the second example, the unit circle is interpolated with a spline. A\n periodic boundary condition is used. You can see that the first derivative\n values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly\n computed. Note that a circle cannot be exactly represented by a cubic\n spline. To increase precision, more breakpoints would be required.\n\n >>> theta = 2 * np.pi * np.linspace(0, 1, 5)\n >>> y = np.c_[np.cos(theta), np.sin(theta)]\n >>> cs = CubicSpline(theta, y, bc_type='periodic')\n >>> print(\"ds/dx={:.1f} ds/dy={:.1f}\".format(cs(0, 1)[0], cs(0, 1)[1]))\n ds/dx=0.0 ds/dy=1.0\n >>> xs = 2 * np.pi * np.linspace(0, 1, 100)\n >>> fig, ax = plt.subplots(figsize=(6.5, 4))\n >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')\n >>> ax.plot(np.cos(xs), np.sin(xs), label='true')\n >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')\n >>> ax.axes.set_aspect('equal')\n >>> ax.legend(loc='center')\n >>> plt.show()\n\n The third example is the interpolation of a polynomial y = x**3 on the\n interval 0 <= x<= 1. A cubic spline can represent this function exactly.\n To achieve that we need to specify values and first derivatives at\n endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and\n y'(1) = 3.\n\n >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))\n >>> x = np.linspace(0, 1)\n >>> np.allclose(x**3, cs(x))\n True\n\n References\n ----------\n .. [1] `Cubic Spline Interpolation\n <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_\n on Wikiversity.\n .. [2] Carl de Boor, \"A Practical Guide to Splines\", Springer-Verlag, 1978.\n \"\"\"\n def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):\n x, dx, y, axis, _ = prepare_input(x, y, axis)\n n = len(x)\n\n bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)\n\n if extrapolate is None:\n if bc[0] == 'periodic':\n extrapolate = 'periodic'\n else:\n extrapolate = True\n\n dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))\n slope = np.diff(y, axis=0) / dxr\n\n # If bc is 'not-a-knot' this change is just a convention.\n # If bc is 'periodic' then we already checked that y[0] == y[-1],\n # and the spline is just a constant, we handle this case in the same\n # way by setting the first derivatives to slope, which is 0.\n if n == 2:\n if bc[0] in ['not-a-knot', 'periodic']:\n bc[0] = (1, slope[0])\n if bc[1] in ['not-a-knot', 'periodic']:\n bc[1] = (1, slope[0])\n\n # This is a very special case, when both conditions are 'not-a-knot'\n # and n == 3. In this case 'not-a-knot' can't be handled regularly\n # as the both conditions are identical. We handle this case by\n # constructing a parabola passing through given points.\n if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':\n A = np.zeros((3, 3)) # This is a standard matrix.\n b = np.empty((3,) + y.shape[1:], dtype=y.dtype)\n\n A[0, 0] = 1\n A[0, 1] = 1\n A[1, 0] = dx[1]\n A[1, 1] = 2 * (dx[0] + dx[1])\n A[1, 2] = dx[0]\n A[2, 1] = 1\n A[2, 2] = 1\n\n b[0] = 2 * slope[0]\n b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])\n b[2] = 2 * slope[1]\n\n s = solve(A, b, overwrite_a=True, overwrite_b=True,\n check_finite=False)\n else:\n # Find derivative values at each x[i] by solving a tridiagonal\n # system.\n A = np.zeros((3, n)) # This is a banded matrix representation.\n b = np.empty((n,) + y.shape[1:], dtype=y.dtype)\n\n # Filling the system for i=1..n-2\n # (x[i-1] - x[i]) * s[i-1] +\\\n # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\\\n # (x[i] - x[i-1]) * s[i+1] =\\\n # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\\\n # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))\n\n A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal\n A[0, 2:] = dx[:-1] # The upper diagonal\n A[-1, :-2] = dx[1:] # The lower diagonal\n\n b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])\n\n bc_start, bc_end = bc\n\n if bc_start == 'periodic':\n # Due to the periodicity, and because y[-1] = y[0], the linear\n # system has (n-1) unknowns/equations instead of n:\n A = A[:, 0:-1]\n A[1, 0] = 2 * (dx[-1] + dx[0])\n A[0, 1] = dx[-1]\n\n b = b[:-1]\n\n # Also, due to the periodicity, the system is not tri-diagonal.\n # We need to compute a \"condensed\" matrix of shape (n-2, n-2).\n # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html\n # for more explanations.\n # The condensed matrix is obtained by removing the last column\n # and last row of the (n-1, n-1) system matrix. The removed\n # values are saved in scalar variables with the (n-1, n-1)\n # system matrix indices forming their names:\n a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]\n a_m1_m2 = dx[-1]\n a_m1_m1 = 2 * (dx[-1] + dx[-2])\n a_m2_m1 = dx[-2]\n a_0_m1 = dx[0]\n\n b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])\n b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])\n\n Ac = A[:, :-1]\n b1 = b[:-1]\n b2 = np.zeros_like(b1)\n b2[0] = -a_0_m1\n b2[-1] = -a_m2_m1\n\n # s1 and s2 are the solutions of (n-2, n-2) system\n s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,\n overwrite_b=False, check_finite=False)\n\n s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,\n overwrite_b=False, check_finite=False)\n\n # computing the s[n-2] solution:\n s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /\n (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))\n\n # s is the solution of the (n, n) system:\n s = np.empty((n,) + y.shape[1:], dtype=y.dtype)\n s[:-2] = s1 + s_m1 * s2\n s[-2] = s_m1\n s[-1] = s[0]\n else:\n if bc_start == 'not-a-knot':\n A[1, 0] = dx[1]\n A[0, 1] = x[2] - x[0]\n d = x[2] - x[0]\n b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +\n dxr[0]**2 * slope[1]) / d\n elif bc_start[0] == 1:\n A[1, 0] = 1\n A[0, 1] = 0\n b[0] = bc_start[1]\n elif bc_start[0] == 2:\n A[1, 0] = 2 * dx[0]\n A[0, 1] = dx[0]\n b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])\n\n if bc_end == 'not-a-knot':\n A[1, -1] = dx[-2]\n A[-1, -2] = x[-1] - x[-3]\n d = x[-1] - x[-3]\n b[-1] = ((dxr[-1]**2*slope[-2] +\n (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)\n elif bc_end[0] == 1:\n A[1, -1] = 1\n A[-1, -2] = 0\n b[-1] = bc_end[1]\n elif bc_end[0] == 2:\n A[1, -1] = 2 * dx[-1]\n A[-1, -2] = dx[-1]\n b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])\n\n s = solve_banded((1, 1), A, b, overwrite_ab=True,\n overwrite_b=True, check_finite=False)\n\n super(CubicSpline, self).__init__(x, y, s, axis=0,\n extrapolate=extrapolate)\n self.axis = axis\n\n @staticmethod\n def _validate_bc(bc_type, y, expected_deriv_shape, axis):\n \"\"\"Validate and prepare boundary conditions.\n\n Returns\n -------\n validated_bc : 2-tuple\n Boundary conditions for a curve start and end.\n y : ndarray\n y casted to complex dtype if one of the boundary conditions has\n complex dtype.\n \"\"\"\n if isinstance(bc_type, string_types):\n if bc_type == 'periodic':\n if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):\n raise ValueError(\n \"The first and last `y` point along axis {} must \"\n \"be identical (within machine precision) when \"\n \"bc_type='periodic'.\".format(axis))\n\n bc_type = (bc_type, bc_type)\n\n else:\n if len(bc_type) != 2:\n raise ValueError(\"`bc_type` must contain 2 elements to \"\n \"specify start and end conditions.\")\n\n if 'periodic' in bc_type:\n raise ValueError(\"'periodic' `bc_type` is defined for both \"\n \"curve ends and cannot be used with other \"\n \"boundary conditions.\")\n\n validated_bc = []\n for bc in bc_type:\n if isinstance(bc, string_types):\n if bc == 'clamped':\n validated_bc.append((1, np.zeros(expected_deriv_shape)))\n elif bc == 'natural':\n validated_bc.append((2, np.zeros(expected_deriv_shape)))\n elif bc in ['not-a-knot', 'periodic']:\n validated_bc.append(bc)\n else:\n raise ValueError(\"bc_type={} is not allowed.\".format(bc))\n else:\n try:\n deriv_order, deriv_value = bc\n except Exception:\n raise ValueError(\"A specified derivative value must be \"\n \"given in the form (order, value).\")\n\n if deriv_order not in [1, 2]:\n raise ValueError(\"The specified derivative order must \"\n \"be 1 or 2.\")\n\n deriv_value = np.asarray(deriv_value)\n if deriv_value.shape != expected_deriv_shape:\n raise ValueError(\n \"`deriv_value` shape {} is not the expected one {}.\"\n .format(deriv_value.shape, expected_deriv_shape))\n\n if np.issubdtype(deriv_value.dtype, np.complexfloating):\n y = y.astype(complex, copy=False)\n\n validated_bc.append((deriv_order, deriv_value))\n\n return validated_bc, y\n",
"# Copyright (C) 2003-2005 Peter J. Verveer\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of the author may not be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy\nimport numpy as np\nfrom . import _ni_support\nfrom . import _ni_label\nfrom . import _nd_image\nfrom . import morphology\n\n__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',\n 'variance', 'standard_deviation', 'minimum', 'maximum', 'median',\n 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',\n 'histogram', 'watershed_ift']\n\n\ndef label(input, structure=None, output=None):\n \"\"\"\n Label features in an array.\n\n Parameters\n ----------\n input : array_like\n An array-like object to be labeled. Any non-zero values in `input` are\n counted as features and zero values are considered the background.\n structure : array_like, optional\n A structuring element that defines feature connections.\n `structure` must be centrosymmetric\n (see Notes).\n If no structuring element is provided,\n one is automatically generated with a squared connectivity equal to\n one. That is, for a 2-D `input` array, the default structuring element\n is::\n\n [[0,1,0],\n [1,1,1],\n [0,1,0]]\n\n output : (None, data-type, array_like), optional\n If `output` is a data type, it specifies the type of the resulting\n labeled feature array.\n If `output` is an array-like object, then `output` will be updated\n with the labeled features from this function. This function can\n operate in-place, by passing output=input.\n Note that the output must be able to store the largest label, or this\n function will raise an Exception.\n\n Returns\n -------\n label : ndarray or int\n An integer ndarray where each unique feature in `input` has a unique\n label in the returned array.\n num_features : int\n How many objects were found.\n\n If `output` is None, this function returns a tuple of\n (`labeled_array`, `num_features`).\n\n If `output` is a ndarray, then it will be updated with values in\n `labeled_array` and only `num_features` will be returned by this\n function.\n\n See Also\n --------\n find_objects : generate a list of slices for the labeled features (or\n objects); useful for finding features' position or\n dimensions\n\n Notes\n -----\n A centrosymmetric matrix is a matrix that is symmetric about the center.\n See [1]_ for more information.\n\n The `structure` matrix must be centrosymmetric to ensure\n two-way connections.\n For instance, if the `structure` matrix is not centrosymmetric\n and is defined as::\n\n [[0,1,0],\n [1,1,0],\n [0,0,0]]\n\n and the `input` is::\n\n [[1,2],\n [0,3]]\n\n then the structure matrix would indicate the\n entry 2 in the input is connected to 1,\n but 1 is not connected to 2.\n\n Examples\n --------\n Create an image with some features, then label it using the default\n (cross-shaped) structuring element:\n\n >>> from scipy.ndimage import label, generate_binary_structure\n >>> a = np.array([[0,0,1,1,0,0],\n ... [0,0,0,1,0,0],\n ... [1,1,0,0,1,0],\n ... [0,0,0,1,0,0]])\n >>> labeled_array, num_features = label(a)\n\n Each of the 4 features are labeled with a different integer:\n\n >>> num_features\n 4\n >>> labeled_array\n array([[0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [2, 2, 0, 0, 3, 0],\n [0, 0, 0, 4, 0, 0]])\n\n Generate a structuring element that will consider features connected even\n if they touch diagonally:\n\n >>> s = generate_binary_structure(2,2)\n\n or,\n\n >>> s = [[1,1,1],\n ... [1,1,1],\n ... [1,1,1]]\n\n Label the image using the new structuring element:\n\n >>> labeled_array, num_features = label(a, structure=s)\n\n Show the 2 labeled features (note that features 1, 3, and 4 from above are\n now considered a single feature):\n\n >>> num_features\n 2\n >>> labeled_array\n array([[0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [2, 2, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0]])\n\n References\n ----------\n\n .. [1] James R. Weaver, \"Centrosymmetric (cross-symmetric)\n matrices, their basic properties, eigenvalues, and\n eigenvectors.\" The American Mathematical Monthly 92.10\n (1985): 711-717.\n\n \"\"\"\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if structure is None:\n structure = morphology.generate_binary_structure(input.ndim, 1)\n structure = numpy.asarray(structure, dtype=bool)\n if structure.ndim != input.ndim:\n raise RuntimeError('structure and input must have equal rank')\n for ii in structure.shape:\n if ii != 3:\n raise ValueError('structure dimensions must be equal to 3')\n\n # Use 32 bits if it's large enough for this image.\n # _ni_label.label() needs two entries for background and\n # foreground tracking\n need_64bits = input.size >= (2**31 - 2)\n\n if isinstance(output, numpy.ndarray):\n if output.shape != input.shape:\n raise ValueError(\"output shape not correct\")\n caller_provided_output = True\n else:\n caller_provided_output = False\n if output is None:\n output = np.empty(input.shape, np.intp if need_64bits else np.int32)\n else:\n output = np.empty(input.shape, output)\n\n # handle scalars, 0-dim arrays\n if input.ndim == 0 or input.size == 0:\n if input.ndim == 0:\n # scalar\n maxlabel = 1 if (input != 0) else 0\n output[...] = maxlabel\n else:\n # 0-dim\n maxlabel = 0\n if caller_provided_output:\n return maxlabel\n else:\n return output, maxlabel\n\n try:\n max_label = _ni_label._label(input, structure, output)\n except _ni_label.NeedMoreBits:\n # Make another attempt with enough bits, then try to cast to the\n # new type.\n tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)\n max_label = _ni_label._label(input, structure, tmp_output)\n output[...] = tmp_output[...]\n if not np.all(output == tmp_output):\n # refuse to return bad results\n raise RuntimeError(\"insufficient bit-depth in requested output type\")\n\n if caller_provided_output:\n # result was written in-place\n return max_label\n else:\n return output, max_label\n\n\ndef find_objects(input, max_label=0):\n \"\"\"\n Find objects in a labeled array.\n\n Parameters\n ----------\n input : ndarray of ints\n Array containing objects defined by different labels. Labels with\n value 0 are ignored.\n max_label : int, optional\n Maximum label to be searched for in `input`. If max_label is not\n given, the positions of all objects are returned.\n\n Returns\n -------\n object_slices : list of tuples\n A list of tuples, with each tuple containing N slices (with N the\n dimension of the input array). Slices correspond to the minimal\n parallelepiped that contains the object. If a number is missing,\n None is returned instead of a slice.\n\n See Also\n --------\n label, center_of_mass\n\n Notes\n -----\n This function is very useful for isolating a volume of interest inside\n a 3-D array, that cannot be \"seen through\".\n\n Examples\n --------\n >>> from scipy import ndimage\n >>> a = np.zeros((6,6), dtype=int)\n >>> a[2:4, 2:4] = 1\n >>> a[4, 4] = 1\n >>> a[:2, :3] = 2\n >>> a[0, 5] = 3\n >>> a\n array([[2, 2, 2, 0, 0, 3],\n [2, 2, 2, 0, 0, 0],\n [0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0]])\n >>> ndimage.find_objects(a)\n [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))]\n >>> ndimage.find_objects(a, max_label=2)\n [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]\n >>> ndimage.find_objects(a == 1, max_label=2)\n [(slice(2, 5, None), slice(2, 5, None)), None]\n\n >>> loc = ndimage.find_objects(a)[0]\n >>> a[loc]\n array([[1, 1, 0],\n [1, 1, 0],\n [0, 0, 1]])\n\n \"\"\"\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n\n if max_label < 1:\n max_label = input.max()\n\n return _nd_image.find_objects(input, max_label)\n\n\ndef labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False):\n \"\"\"\n Roughly equivalent to [func(input[labels == i]) for i in index].\n\n Sequentially applies an arbitrary function (that works on array_like input)\n to subsets of an n-D image array specified by `labels` and `index`.\n The option exists to provide the function with positional parameters as the\n second argument.\n\n Parameters\n ----------\n input : array_like\n Data from which to select `labels` to process.\n labels : array_like or None\n Labels to objects in `input`.\n If not None, array must be same shape as `input`.\n If None, `func` is applied to raveled `input`.\n index : int, sequence of ints or None\n Subset of `labels` to which to apply `func`.\n If a scalar, a single value is returned.\n If None, `func` is applied to all non-zero values of `labels`.\n func : callable\n Python function to apply to `labels` from `input`.\n out_dtype : dtype\n Dtype to use for `result`.\n default : int, float or None\n Default return value when a element of `index` does not exist\n in `labels`.\n pass_positions : bool, optional\n If True, pass linear indices to `func` as a second argument.\n Default is False.\n\n Returns\n -------\n result : ndarray\n Result of applying `func` to each of `labels` to `input` in `index`.\n\n Examples\n --------\n >>> a = np.array([[1, 2, 0, 0],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> from scipy import ndimage\n >>> lbl, nlbl = ndimage.label(a)\n >>> lbls = np.arange(1, nlbl+1)\n >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)\n array([ 2.75, 5.5 , 6. ])\n\n Falling back to `default`:\n\n >>> lbls = np.arange(1, nlbl+2)\n >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)\n array([ 2.75, 5.5 , 6. , -1. ])\n\n Passing positions:\n\n >>> def fn(val, pos):\n ... print(\"fn says: %s : %s\" % (val, pos))\n ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())\n ...\n >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)\n fn says: [1 2 5 3] : [0 1 4 5]\n fn says: [4 7] : [ 7 11]\n fn says: [9 3] : [12 13]\n array([ 11., 11., -12., 0.])\n\n \"\"\"\n\n as_scalar = numpy.isscalar(index)\n input = numpy.asarray(input)\n\n if pass_positions:\n positions = numpy.arange(input.size).reshape(input.shape)\n\n if labels is None:\n if index is not None:\n raise ValueError(\"index without defined labels\")\n if not pass_positions:\n return func(input.ravel())\n else:\n return func(input.ravel(), positions.ravel())\n\n try:\n input, labels = numpy.broadcast_arrays(input, labels)\n except ValueError:\n raise ValueError(\"input and labels must have the same shape \"\n \"(excepting dimensions with width 1)\")\n\n if index is None:\n if not pass_positions:\n return func(input[labels > 0])\n else:\n return func(input[labels > 0], positions[labels > 0])\n\n index = numpy.atleast_1d(index)\n if np.any(index.astype(labels.dtype).astype(index.dtype) != index):\n raise ValueError(\"Cannot convert index values from <%s> to <%s> \"\n \"(labels' type) without loss of precision\" %\n (index.dtype, labels.dtype))\n\n index = index.astype(labels.dtype)\n\n # optimization: find min/max in index, and select those parts of labels, input, and positions\n lo = index.min()\n hi = index.max()\n mask = (labels >= lo) & (labels <= hi)\n\n # this also ravels the arrays\n labels = labels[mask]\n input = input[mask]\n if pass_positions:\n positions = positions[mask]\n\n # sort everything by labels\n label_order = labels.argsort()\n labels = labels[label_order]\n input = input[label_order]\n if pass_positions:\n positions = positions[label_order]\n\n index_order = index.argsort()\n sorted_index = index[index_order]\n\n def do_map(inputs, output):\n \"\"\"labels must be sorted\"\"\"\n nidx = sorted_index.size\n\n # Find boundaries for each stretch of constant labels\n # This could be faster, but we already paid N log N to sort labels.\n lo = numpy.searchsorted(labels, sorted_index, side='left')\n hi = numpy.searchsorted(labels, sorted_index, side='right')\n\n for i, l, h in zip(range(nidx), lo, hi):\n if l == h:\n continue\n output[i] = func(*[inp[l:h] for inp in inputs])\n\n temp = numpy.empty(index.shape, out_dtype)\n temp[:] = default\n if not pass_positions:\n do_map([input], temp)\n else:\n do_map([input, positions], temp)\n\n output = numpy.zeros(index.shape, out_dtype)\n output[index_order] = temp\n if as_scalar:\n output = output[0]\n\n return output\n\n\ndef _safely_castable_to_int(dt):\n \"\"\"Test whether the numpy data type `dt` can be safely cast to an int.\"\"\"\n int_size = np.dtype(int).itemsize\n safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or\n (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))\n return safe\n\n\ndef _stats(input, labels=None, index=None, centered=False):\n \"\"\"Count, sum, and optionally compute (sum - centre)^2 of input by label\n\n Parameters\n ----------\n input : array_like, n-dimensional\n The input data to be analyzed.\n labels : array_like (n-dimensional), optional\n The labels of the data in `input`. This array must be broadcast\n compatible with `input`; typically it is the same shape as `input`.\n If `labels` is None, all nonzero values in `input` are treated as\n the single labeled group.\n index : label or sequence of labels, optional\n These are the labels of the groups for which the stats are computed.\n If `index` is None, the stats are computed for the single group where\n `labels` is greater than 0.\n centered : bool, optional\n If True, the centered sum of squares for each labeled group is\n also returned. Default is False.\n\n Returns\n -------\n counts : int or ndarray of ints\n The number of elements in each labeled group.\n sums : scalar or ndarray of scalars\n The sums of the values in each labeled group.\n sums_c : scalar or ndarray of scalars, optional\n The sums of mean-centered squares of the values in each labeled group.\n This is only returned if `centered` is True.\n\n \"\"\"\n def single_group(vals):\n if centered:\n vals_c = vals - vals.mean()\n return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()\n else:\n return vals.size, vals.sum()\n\n if labels is None:\n return single_group(input)\n\n # ensure input and labels match sizes\n input, labels = numpy.broadcast_arrays(input, labels)\n\n if index is None:\n return single_group(input[labels > 0])\n\n if numpy.isscalar(index):\n return single_group(input[labels == index])\n\n def _sum_centered(labels):\n # `labels` is expected to be an ndarray with the same shape as `input`.\n # It must contain the label indices (which are not necessarily the labels\n # themselves).\n means = sums / counts\n centered_input = input - means[labels]\n # bincount expects 1d inputs, so we ravel the arguments.\n bc = numpy.bincount(labels.ravel(),\n weights=(centered_input *\n centered_input.conjugate()).ravel())\n return bc\n\n # Remap labels to unique integers if necessary, or if the largest\n # label is larger than the number of values.\n\n if (not _safely_castable_to_int(labels.dtype) or\n labels.min() < 0 or labels.max() > labels.size):\n # Use numpy.unique to generate the label indices. `new_labels` will\n # be 1-d, but it should be interpreted as the flattened n-d array of\n # label indices.\n unique_labels, new_labels = numpy.unique(labels, return_inverse=True)\n counts = numpy.bincount(new_labels)\n sums = numpy.bincount(new_labels, weights=input.ravel())\n if centered:\n # Compute the sum of the mean-centered squares.\n # We must reshape new_labels to the n-d shape of `input` before\n # passing it _sum_centered.\n sums_c = _sum_centered(new_labels.reshape(labels.shape))\n idxs = numpy.searchsorted(unique_labels, index)\n # make all of idxs valid\n idxs[idxs >= unique_labels.size] = 0\n found = (unique_labels[idxs] == index)\n else:\n # labels are an integer type allowed by bincount, and there aren't too\n # many, so call bincount directly.\n counts = numpy.bincount(labels.ravel())\n sums = numpy.bincount(labels.ravel(), weights=input.ravel())\n if centered:\n sums_c = _sum_centered(labels)\n # make sure all index values are valid\n idxs = numpy.asanyarray(index, numpy.int).copy()\n found = (idxs >= 0) & (idxs < counts.size)\n idxs[~found] = 0\n\n counts = counts[idxs]\n counts[~found] = 0\n sums = sums[idxs]\n sums[~found] = 0\n\n if not centered:\n return (counts, sums)\n else:\n sums_c = sums_c[idxs]\n sums_c[~found] = 0\n return (counts, sums, sums_c)\n\n\ndef sum(input, labels=None, index=None):\n \"\"\"\n Calculate the sum of the values of the array.\n\n Parameters\n ----------\n input : array_like\n Values of `input` inside the regions defined by `labels`\n are summed together.\n labels : array_like of ints, optional\n Assign labels to the values of the array. Has to have the same shape as\n `input`.\n index : array_like, optional\n A single label number or a sequence of label numbers of\n the objects to be measured.\n\n Returns\n -------\n sum : ndarray or scalar\n An array of the sums of values of `input` inside the regions defined\n by `labels` with the same shape as `index`. If 'index' is None or scalar,\n a scalar is returned.\n\n See also\n --------\n mean, median\n\n Examples\n --------\n >>> from scipy import ndimage\n >>> input = [0,1,2,3]\n >>> labels = [1,1,2,2]\n >>> ndimage.sum(input, labels, index=[1,2])\n [1.0, 5.0]\n >>> ndimage.sum(input, labels, index=1)\n 1\n >>> ndimage.sum(input, labels)\n 6\n\n\n \"\"\"\n count, sum = _stats(input, labels, index)\n return sum\n\n\ndef mean(input, labels=None, index=None):\n \"\"\"\n Calculate the mean of the values of an array at labels.\n\n Parameters\n ----------\n input : array_like\n Array on which to compute the mean of elements over distinct\n regions.\n labels : array_like, optional\n Array of labels of same shape, or broadcastable to the same shape as\n `input`. All elements sharing the same label form one region over\n which the mean of the elements is computed.\n index : int or sequence of ints, optional\n Labels of the objects over which the mean is to be computed.\n Default is None, in which case the mean for all values where label is\n greater than 0 is calculated.\n\n Returns\n -------\n out : list\n Sequence of same length as `index`, with the mean of the different\n regions labeled by the labels in `index`.\n\n See also\n --------\n variance, standard_deviation, minimum, maximum, sum, label\n\n Examples\n --------\n >>> from scipy import ndimage\n >>> a = np.arange(25).reshape((5,5))\n >>> labels = np.zeros_like(a)\n >>> labels[3:5,3:5] = 1\n >>> index = np.unique(labels)\n >>> labels\n array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1],\n [0, 0, 0, 1, 1]])\n >>> index\n array([0, 1])\n >>> ndimage.mean(a, labels=labels, index=index)\n [10.285714285714286, 21.0]\n\n \"\"\"\n\n count, sum = _stats(input, labels, index)\n return sum / numpy.asanyarray(count).astype(numpy.float)\n\n\ndef variance(input, labels=None, index=None):\n \"\"\"\n Calculate the variance of the values of an n-D image array, optionally at\n specified sub-regions.\n\n Parameters\n ----------\n input : array_like\n Nd-image data to process.\n labels : array_like, optional\n Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index : int or sequence of ints, optional\n `labels` to include in output. If None (default), all values where\n `labels` is non-zero are used.\n\n Returns\n -------\n variance : float or ndarray\n Values of variance, for each sub-region if `labels` and `index` are\n specified.\n\n See Also\n --------\n label, standard_deviation, maximum, minimum, extrema\n\n Examples\n --------\n >>> a = np.array([[1, 2, 0, 0],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> from scipy import ndimage\n >>> ndimage.variance(a)\n 7.609375\n\n Features to process can be specified using `labels` and `index`:\n\n >>> lbl, nlbl = ndimage.label(a)\n >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))\n array([ 2.1875, 2.25 , 9. ])\n\n If no index is given, all non-zero `labels` are processed:\n\n >>> ndimage.variance(a, lbl)\n 6.1875\n\n \"\"\"\n count, sum, sum_c_sq = _stats(input, labels, index, centered=True)\n return sum_c_sq / np.asanyarray(count).astype(float)\n\n\ndef standard_deviation(input, labels=None, index=None):\n \"\"\"\n Calculate the standard deviation of the values of an n-D image array,\n optionally at specified sub-regions.\n\n Parameters\n ----------\n input : array_like\n Nd-image data to process.\n labels : array_like, optional\n Labels to identify sub-regions in `input`.\n If not None, must be same shape as `input`.\n index : int or sequence of ints, optional\n `labels` to include in output. If None (default), all values where\n `labels` is non-zero are used.\n\n Returns\n -------\n standard_deviation : float or ndarray\n Values of standard deviation, for each sub-region if `labels` and\n `index` are specified.\n\n See Also\n --------\n label, variance, maximum, minimum, extrema\n\n Examples\n --------\n >>> a = np.array([[1, 2, 0, 0],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> from scipy import ndimage\n >>> ndimage.standard_deviation(a)\n 2.7585095613392387\n\n Features to process can be specified using `labels` and `index`:\n\n >>> lbl, nlbl = ndimage.label(a)\n >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))\n array([ 1.479, 1.5 , 3. ])\n\n If no index is given, non-zero `labels` are processed:\n\n >>> ndimage.standard_deviation(a, lbl)\n 2.4874685927665499\n\n \"\"\"\n return numpy.sqrt(variance(input, labels, index))\n\n\ndef _select(input, labels=None, index=None, find_min=False, find_max=False,\n find_min_positions=False, find_max_positions=False,\n find_median=False):\n \"\"\"Returns min, max, or both, plus their positions (if requested), and\n median.\"\"\"\n\n input = numpy.asanyarray(input)\n\n find_positions = find_min_positions or find_max_positions\n positions = None\n if find_positions:\n positions = numpy.arange(input.size).reshape(input.shape)\n\n def single_group(vals, positions):\n result = []\n if find_min:\n result += [vals.min()]\n if find_min_positions:\n result += [positions[vals == vals.min()][0]]\n if find_max:\n result += [vals.max()]\n if find_max_positions:\n result += [positions[vals == vals.max()][0]]\n if find_median:\n result += [numpy.median(vals)]\n return result\n\n if labels is None:\n return single_group(input, positions)\n\n # ensure input and labels match sizes\n input, labels = numpy.broadcast_arrays(input, labels)\n\n if index is None:\n mask = (labels > 0)\n masked_positions = None\n if find_positions:\n masked_positions = positions[mask]\n return single_group(input[mask], masked_positions)\n\n if numpy.isscalar(index):\n mask = (labels == index)\n masked_positions = None\n if find_positions:\n masked_positions = positions[mask]\n return single_group(input[mask], masked_positions)\n\n # remap labels to unique integers if necessary, or if the largest\n # label is larger than the number of values.\n if (not _safely_castable_to_int(labels.dtype) or\n labels.min() < 0 or labels.max() > labels.size):\n # remap labels, and indexes\n unique_labels, labels = numpy.unique(labels, return_inverse=True)\n idxs = numpy.searchsorted(unique_labels, index)\n\n # make all of idxs valid\n idxs[idxs >= unique_labels.size] = 0\n found = (unique_labels[idxs] == index)\n else:\n # labels are an integer type, and there aren't too many.\n idxs = numpy.asanyarray(index, numpy.int).copy()\n found = (idxs >= 0) & (idxs <= labels.max())\n\n idxs[~ found] = labels.max() + 1\n\n if find_median:\n order = numpy.lexsort((input.ravel(), labels.ravel()))\n else:\n order = input.ravel().argsort()\n input = input.ravel()[order]\n labels = labels.ravel()[order]\n if find_positions:\n positions = positions.ravel()[order]\n\n result = []\n if find_min:\n mins = numpy.zeros(labels.max() + 2, input.dtype)\n mins[labels[::-1]] = input[::-1]\n result += [mins[idxs]]\n if find_min_positions:\n minpos = numpy.zeros(labels.max() + 2, int)\n minpos[labels[::-1]] = positions[::-1]\n result += [minpos[idxs]]\n if find_max:\n maxs = numpy.zeros(labels.max() + 2, input.dtype)\n maxs[labels] = input\n result += [maxs[idxs]]\n if find_max_positions:\n maxpos = numpy.zeros(labels.max() + 2, int)\n maxpos[labels] = positions\n result += [maxpos[idxs]]\n if find_median:\n locs = numpy.arange(len(labels))\n lo = numpy.zeros(labels.max() + 2, numpy.int)\n lo[labels[::-1]] = locs[::-1]\n hi = numpy.zeros(labels.max() + 2, numpy.int)\n hi[labels] = locs\n lo = lo[idxs]\n hi = hi[idxs]\n # lo is an index to the lowest value in input for each label,\n # hi is an index to the largest value.\n # move them to be either the same ((hi - lo) % 2 == 0) or next\n # to each other ((hi - lo) % 2 == 1), then average.\n step = (hi - lo) // 2\n lo += step\n hi -= step\n result += [(input[lo] + input[hi]) / 2.0]\n\n return result\n\n\ndef minimum(input, labels=None, index=None):\n \"\"\"\n Calculate the minimum of the values of an array over labeled regions.\n\n Parameters\n ----------\n input : array_like\n Array_like of values. For each region specified by `labels`, the\n minimal values of `input` over the region is computed.\n labels : array_like, optional\n An array_like of integers marking different regions over which the\n minimum value of `input` is to be computed. `labels` must have the\n same shape as `input`. If `labels` is not specified, the minimum\n over the whole array is returned.\n index : array_like, optional\n A list of region labels that are taken into account for computing the\n minima. If index is None, the minimum over all elements where `labels`\n is non-zero is returned.\n\n Returns\n -------\n minimum : float or list of floats\n List of minima of `input` over the regions determined by `labels` and\n whose index is in `index`. If `index` or `labels` are not specified, a\n float is returned: the minimal value of `input` if `labels` is None,\n and the minimal value of elements where `labels` is greater than zero\n if `index` is None.\n\n See also\n --------\n label, maximum, median, minimum_position, extrema, sum, mean, variance,\n standard_deviation\n\n Notes\n -----\n The function returns a Python list and not a NumPy array, use\n `np.array` to convert the list to an array.\n\n Examples\n --------\n >>> from scipy import ndimage\n >>> a = np.array([[1, 2, 0, 0],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> labels, labels_nb = ndimage.label(a)\n >>> labels\n array([[1, 1, 0, 0],\n [1, 1, 0, 2],\n [0, 0, 0, 2],\n [3, 3, 0, 0]])\n >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))\n [1.0, 4.0, 3.0]\n >>> ndimage.minimum(a)\n 0.0\n >>> ndimage.minimum(a, labels=labels)\n 1.0\n\n \"\"\"\n return _select(input, labels, index, find_min=True)[0]\n\n\ndef maximum(input, labels=None, index=None):\n \"\"\"\n Calculate the maximum of the values of an array over labeled regions.\n\n Parameters\n ----------\n input : array_like\n Array_like of values. For each region specified by `labels`, the\n maximal values of `input` over the region is computed.\n labels : array_like, optional\n An array of integers marking different regions over which the\n maximum value of `input` is to be computed. `labels` must have the\n same shape as `input`. If `labels` is not specified, the maximum\n over the whole array is returned.\n index : array_like, optional\n A list of region labels that are taken into account for computing the\n maxima. If index is None, the maximum over all elements where `labels`\n is non-zero is returned.\n\n Returns\n -------\n output : float or list of floats\n List of maxima of `input` over the regions determined by `labels` and\n whose index is in `index`. If `index` or `labels` are not specified, a\n float is returned: the maximal value of `input` if `labels` is None,\n and the maximal value of elements where `labels` is greater than zero\n if `index` is None.\n\n See also\n --------\n label, minimum, median, maximum_position, extrema, sum, mean, variance,\n standard_deviation\n\n Notes\n -----\n The function returns a Python list and not a NumPy array, use\n `np.array` to convert the list to an array.\n\n Examples\n --------\n >>> a = np.arange(16).reshape((4,4))\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> labels = np.zeros_like(a)\n >>> labels[:2,:2] = 1\n >>> labels[2:, 1:3] = 2\n >>> labels\n array([[1, 1, 0, 0],\n [1, 1, 0, 0],\n [0, 2, 2, 0],\n [0, 2, 2, 0]])\n >>> from scipy import ndimage\n >>> ndimage.maximum(a)\n 15.0\n >>> ndimage.maximum(a, labels=labels, index=[1,2])\n [5.0, 14.0]\n >>> ndimage.maximum(a, labels=labels)\n 14.0\n\n >>> b = np.array([[1, 2, 0, 0],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> labels, labels_nb = ndimage.label(b)\n >>> labels\n array([[1, 1, 0, 0],\n [1, 1, 0, 2],\n [0, 0, 0, 2],\n [3, 3, 0, 0]])\n >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))\n [5.0, 7.0, 9.0]\n\n \"\"\"\n return _select(input, labels, index, find_max=True)[0]\n\n\ndef median(input, labels=None, index=None):\n \"\"\"\n Calculate the median of the values of an array over labeled regions.\n\n Parameters\n ----------\n input : array_like\n Array_like of values. For each region specified by `labels`, the\n median value of `input` over the region is computed.\n labels : array_like, optional\n An array_like of integers marking different regions over which the\n median value of `input` is to be computed. `labels` must have the\n same shape as `input`. If `labels` is not specified, the median\n over the whole array is returned.\n index : array_like, optional\n A list of region labels that are taken into account for computing the\n medians. If index is None, the median over all elements where `labels`\n is non-zero is returned.\n\n Returns\n -------\n median : float or list of floats\n List of medians of `input` over the regions determined by `labels` and\n whose index is in `index`. If `index` or `labels` are not specified, a\n float is returned: the median value of `input` if `labels` is None,\n and the median value of elements where `labels` is greater than zero\n if `index` is None.\n\n See also\n --------\n label, minimum, maximum, extrema, sum, mean, variance, standard_deviation\n\n Notes\n -----\n The function returns a Python list and not a NumPy array, use\n `np.array` to convert the list to an array.\n\n Examples\n --------\n >>> from scipy import ndimage\n >>> a = np.array([[1, 2, 0, 1],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> labels, labels_nb = ndimage.label(a)\n >>> labels\n array([[1, 1, 0, 2],\n [1, 1, 0, 2],\n [0, 0, 0, 2],\n [3, 3, 0, 0]])\n >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))\n [2.5, 4.0, 6.0]\n >>> ndimage.median(a)\n 1.0\n >>> ndimage.median(a, labels=labels)\n 3.0\n\n \"\"\"\n return _select(input, labels, index, find_median=True)[0]\n\n\ndef minimum_position(input, labels=None, index=None):\n \"\"\"\n Find the positions of the minimums of the values of an array at labels.\n\n Parameters\n ----------\n input : array_like\n Array_like of values.\n labels : array_like, optional\n An array of integers marking different regions over which the\n position of the minimum value of `input` is to be computed.\n `labels` must have the same shape as `input`. If `labels` is not\n specified, the location of the first minimum over the whole\n array is returned.\n\n The `labels` argument only works when `index` is specified.\n index : array_like, optional\n A list of region labels that are taken into account for finding the\n location of the minima. If `index` is None, the ``first`` minimum\n over all elements where `labels` is non-zero is returned.\n\n The `index` argument only works when `labels` is specified.\n\n Returns\n -------\n output : list of tuples of ints\n Tuple of ints or list of tuples of ints that specify the location\n of minima of `input` over the regions determined by `labels` and\n whose index is in `index`.\n\n If `index` or `labels` are not specified, a tuple of ints is\n returned specifying the location of the first minimal value of `input`.\n\n See also\n --------\n label, minimum, median, maximum_position, extrema, sum, mean, variance,\n standard_deviation\n\n Examples\n --------\n >>> a = np.array([[10, 20, 30],\n ... [40, 80, 100],\n ... [1, 100, 200]])\n >>> b = np.array([[1, 2, 0, 1],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n\n >>> from scipy import ndimage\n\n >>> ndimage.minimum_position(a)\n (2, 0)\n >>> ndimage.minimum_position(b)\n (0, 2)\n\n Features to process can be specified using `labels` and `index`:\n\n >>> label, pos = ndimage.label(a)\n >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))\n [(2, 0)]\n\n >>> label, pos = ndimage.label(b)\n >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))\n [(0, 0), (0, 3), (3, 1)]\n\n \"\"\"\n dims = numpy.array(numpy.asarray(input).shape)\n # see numpy.unravel_index to understand this line.\n dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]\n\n result = _select(input, labels, index, find_min_positions=True)[0]\n\n if numpy.isscalar(result):\n return tuple((result // dim_prod) % dims)\n\n return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]\n\n\ndef maximum_position(input, labels=None, index=None):\n \"\"\"\n Find the positions of the maximums of the values of an array at labels.\n\n For each region specified by `labels`, the position of the maximum\n value of `input` within the region is returned.\n\n Parameters\n ----------\n input : array_like\n Array_like of values.\n labels : array_like, optional\n An array of integers marking different regions over which the\n position of the maximum value of `input` is to be computed.\n `labels` must have the same shape as `input`. If `labels` is not\n specified, the location of the first maximum over the whole\n array is returned.\n\n The `labels` argument only works when `index` is specified.\n index : array_like, optional\n A list of region labels that are taken into account for finding the\n location of the maxima. If `index` is None, the first maximum\n over all elements where `labels` is non-zero is returned.\n\n The `index` argument only works when `labels` is specified.\n\n Returns\n -------\n output : list of tuples of ints\n List of tuples of ints that specify the location of maxima of\n `input` over the regions determined by `labels` and whose index\n is in `index`.\n\n If `index` or `labels` are not specified, a tuple of ints is\n returned specifying the location of the ``first`` maximal value\n of `input`.\n\n See also\n --------\n label, minimum, median, maximum_position, extrema, sum, mean, variance,\n standard_deviation\n\n \"\"\"\n dims = numpy.array(numpy.asarray(input).shape)\n # see numpy.unravel_index to understand this line.\n dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]\n\n result = _select(input, labels, index, find_max_positions=True)[0]\n\n if numpy.isscalar(result):\n return tuple((result // dim_prod) % dims)\n\n return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]\n\n\ndef extrema(input, labels=None, index=None):\n \"\"\"\n Calculate the minimums and maximums of the values of an array\n at labels, along with their positions.\n\n Parameters\n ----------\n input : ndarray\n Nd-image data to process.\n labels : ndarray, optional\n Labels of features in input.\n If not None, must be same shape as `input`.\n index : int or sequence of ints, optional\n Labels to include in output. If None (default), all values where\n non-zero `labels` are used.\n\n Returns\n -------\n minimums, maximums : int or ndarray\n Values of minimums and maximums in each feature.\n min_positions, max_positions : tuple or list of tuples\n Each tuple gives the n-D coordinates of the corresponding minimum\n or maximum.\n\n See Also\n --------\n maximum, minimum, maximum_position, minimum_position, center_of_mass\n\n Examples\n --------\n >>> a = np.array([[1, 2, 0, 0],\n ... [5, 3, 0, 4],\n ... [0, 0, 0, 7],\n ... [9, 3, 0, 0]])\n >>> from scipy import ndimage\n >>> ndimage.extrema(a)\n (0, 9, (0, 2), (3, 0))\n\n Features to process can be specified using `labels` and `index`:\n\n >>> lbl, nlbl = ndimage.label(a)\n >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))\n (array([1, 4, 3]),\n array([5, 7, 9]),\n [(0, 0), (1, 3), (3, 1)],\n [(1, 0), (2, 3), (3, 0)])\n\n If no index is given, non-zero `labels` are processed:\n\n >>> ndimage.extrema(a, lbl)\n (1, 9, (0, 0), (3, 0))\n\n \"\"\"\n dims = numpy.array(numpy.asarray(input).shape)\n # see numpy.unravel_index to understand this line.\n dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]\n\n minimums, min_positions, maximums, max_positions = _select(input, labels,\n index,\n find_min=True,\n find_max=True,\n find_min_positions=True,\n find_max_positions=True)\n\n if numpy.isscalar(minimums):\n return (minimums, maximums, tuple((min_positions // dim_prod) % dims),\n tuple((max_positions // dim_prod) % dims))\n\n min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims]\n max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims]\n\n return minimums, maximums, min_positions, max_positions\n\n\ndef center_of_mass(input, labels=None, index=None):\n \"\"\"\n Calculate the center of mass of the values of an array at labels.\n\n Parameters\n ----------\n input : ndarray\n Data from which to calculate center-of-mass. The masses can either\n be positive or negative.\n labels : ndarray, optional\n Labels for objects in `input`, as generated by `ndimage.label`.\n Only used with `index`. Dimensions must be the same as `input`.\n index : int or sequence of ints, optional\n Labels for which to calculate centers-of-mass. If not specified,\n all labels greater than zero are used. Only used with `labels`.\n\n Returns\n -------\n center_of_mass : tuple, or list of tuples\n Coordinates of centers-of-mass.\n\n Examples\n --------\n >>> a = np.array(([0,0,0,0],\n ... [0,1,1,0],\n ... [0,1,1,0],\n ... [0,1,1,0]))\n >>> from scipy import ndimage\n >>> ndimage.measurements.center_of_mass(a)\n (2.0, 1.5)\n\n Calculation of multiple objects in an image\n\n >>> b = np.array(([0,1,1,0],\n ... [0,1,0,0],\n ... [0,0,0,0],\n ... [0,0,1,1],\n ... [0,0,1,1]))\n >>> lbl = ndimage.label(b)[0]\n >>> ndimage.measurements.center_of_mass(b, lbl, [1,2])\n [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]\n\n Negative masses are also accepted, which can occur for example when\n bias is removed from measured data due to random noise.\n\n >>> c = np.array(([-1,0,0,0],\n ... [0,-1,-1,0],\n ... [0,1,-1,0],\n ... [0,1,1,0]))\n >>> ndimage.measurements.center_of_mass(c)\n (-4.0, 1.0)\n\n If there are division by zero issues, the function does not raise an\n error but rather issues a RuntimeWarning before returning inf and/or NaN.\n\n >>> d = np.array([-1, 1])\n >>> ndimage.measurements.center_of_mass(d)\n (inf,)\n \"\"\"\n normalizer = sum(input, labels, index)\n grids = numpy.ogrid[[slice(0, i) for i in input.shape]]\n\n results = [sum(input * grids[dir].astype(float), labels, index) / normalizer\n for dir in range(input.ndim)]\n\n if numpy.isscalar(results[0]):\n return tuple(results)\n\n return [tuple(v) for v in numpy.array(results).T]\n\n\ndef histogram(input, min, max, bins, labels=None, index=None):\n \"\"\"\n Calculate the histogram of the values of an array, optionally at labels.\n\n Histogram calculates the frequency of values in an array within bins\n determined by `min`, `max`, and `bins`. The `labels` and `index`\n keywords can limit the scope of the histogram to specified sub-regions\n within the array.\n\n Parameters\n ----------\n input : array_like\n Data for which to calculate histogram.\n min, max : int\n Minimum and maximum values of range of histogram bins.\n bins : int\n Number of bins.\n labels : array_like, optional\n Labels for objects in `input`.\n If not None, must be same shape as `input`.\n index : int or sequence of ints, optional\n Label or labels for which to calculate histogram. If None, all values\n where label is greater than zero are used\n\n Returns\n -------\n hist : ndarray\n Histogram counts.\n\n Examples\n --------\n >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],\n ... [ 0. , 0.7778, 0. , 0. ],\n ... [ 0. , 0. , 0. , 0. ],\n ... [ 0. , 0. , 0.7181, 0.2787],\n ... [ 0. , 0. , 0.6573, 0.3094]])\n >>> from scipy import ndimage\n >>> ndimage.measurements.histogram(a, 0, 1, 10)\n array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])\n\n With labels and no indices, non-zero elements are counted:\n\n >>> lbl, nlbl = ndimage.label(a)\n >>> ndimage.measurements.histogram(a, 0, 1, 10, lbl)\n array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])\n\n Indices can be used to count only certain objects:\n\n >>> ndimage.measurements.histogram(a, 0, 1, 10, lbl, 2)\n array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])\n\n \"\"\"\n _bins = numpy.linspace(min, max, bins + 1)\n\n def _hist(vals):\n return numpy.histogram(vals, _bins)[0]\n\n return labeled_comprehension(input, labels, index, _hist, object, None,\n pass_positions=False)\n\n\ndef watershed_ift(input, markers, structure=None, output=None):\n \"\"\"\n Apply watershed from markers using image foresting transform algorithm.\n\n Parameters\n ----------\n input : array_like\n Input.\n markers : array_like\n Markers are points within each watershed that form the beginning\n of the process. Negative markers are considered background markers\n which are processed after the other markers.\n structure : structure element, optional\n A structuring element defining the connectivity of the object can be\n provided. If None, an element is generated with a squared\n connectivity equal to one.\n output : ndarray, optional\n An output array can optionally be provided. The same shape as input.\n\n Returns\n -------\n watershed_ift : ndarray\n Output. Same shape as `input`.\n\n References\n ----------\n .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, \"The image\n foresting transform: theory, algorithms, and applications\",\n Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.\n\n \"\"\"\n input = numpy.asarray(input)\n if input.dtype.type not in [numpy.uint8, numpy.uint16]:\n raise TypeError('only 8 and 16 unsigned inputs are supported')\n\n if structure is None:\n structure = morphology.generate_binary_structure(input.ndim, 1)\n structure = numpy.asarray(structure, dtype=bool)\n if structure.ndim != input.ndim:\n raise RuntimeError('structure and input must have equal rank')\n for ii in structure.shape:\n if ii != 3:\n raise RuntimeError('structure dimensions must be equal to 3')\n\n if not structure.flags.contiguous:\n structure = structure.copy()\n markers = numpy.asarray(markers)\n if input.shape != markers.shape:\n raise RuntimeError('input and markers must have equal shape')\n\n integral_types = [numpy.int0,\n numpy.int8,\n numpy.int16,\n numpy.int32,\n numpy.int_,\n numpy.int64,\n numpy.intc,\n numpy.intp]\n\n if markers.dtype.type not in integral_types:\n raise RuntimeError('marker should be of integer type')\n\n if isinstance(output, numpy.ndarray):\n if output.dtype.type not in integral_types:\n raise RuntimeError('output should be of integer type')\n else:\n output = markers.dtype\n\n output = _ni_support._get_output(output, input)\n _nd_image.watershed_ift(input, markers, structure, output)\n return output\n",
"from __future__ import division, absolute_import, print_function\n\nfrom functools import reduce\n\nimport numpy as np\nimport numpy.core.umath as umath\nimport numpy.core.fromnumeric as fromnumeric\nfrom numpy.testing import (\n assert_, assert_raises, assert_equal,\n )\nfrom numpy.ma import (\n MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,\n arange, arccos, arcsin, arctan, arctan2, array, average, choose,\n concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,\n getmask, greater, greater_equal, inner, isMaskedArray, less,\n less_equal, log, log10, make_mask, masked, masked_array, masked_equal,\n masked_greater, masked_greater_equal, masked_inside, masked_less,\n masked_less_equal, masked_not_equal, masked_outside,\n masked_print_option, masked_values, masked_where, maximum, minimum,\n multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,\n repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,\n take, tan, tanh, transpose, where, zeros,\n )\nfrom numpy.compat import pickle\n\npi = np.pi\n\n\ndef eq(v, w, msg=''):\n result = allclose(v, w)\n if not result:\n print(\"Not eq:%s\\n%s\\n----%s\" % (msg, str(v), str(w)))\n return result\n\n\nclass TestMa(object):\n\n def setup(self):\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = array(x, mask=m1)\n ym = array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n s = x.shape\n xm.set_fill_value(1e+20)\n self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)\n\n def test_testBasic1d(self):\n # Test of basic array creation and properties in 1 dimension.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n assert_(not isMaskedArray(x))\n assert_(isMaskedArray(xm))\n assert_equal(shape(xm), s)\n assert_equal(xm.shape, s)\n assert_equal(xm.dtype, x.dtype)\n assert_equal(xm.size, reduce(lambda x, y:x * y, s))\n assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))\n assert_(eq(xm, xf))\n assert_(eq(filled(xm, 1.e20), xf))\n assert_(eq(x, xm))\n\n def test_testBasic2d(self):\n # Test of basic array creation and properties in 2 dimensions.\n for s in [(4, 3), (6, 2)]:\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n x.shape = s\n y.shape = s\n xm.shape = s\n ym.shape = s\n xf.shape = s\n\n assert_(not isMaskedArray(x))\n assert_(isMaskedArray(xm))\n assert_equal(shape(xm), s)\n assert_equal(xm.shape, s)\n assert_equal(xm.size, reduce(lambda x, y:x * y, s))\n assert_equal(count(xm),\n len(m1) - reduce(lambda x, y:x + y, m1))\n assert_(eq(xm, xf))\n assert_(eq(filled(xm, 1.e20), xf))\n assert_(eq(x, xm))\n self.setup()\n\n def test_testArithmetic(self):\n # Test of basic arithmetic.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n a2d = array([[1, 2], [0, 4]])\n a2dm = masked_array(a2d, [[0, 0], [1, 0]])\n assert_(eq(a2d * a2d, a2d * a2dm))\n assert_(eq(a2d + a2d, a2d + a2dm))\n assert_(eq(a2d - a2d, a2d - a2dm))\n for s in [(12,), (4, 3), (2, 6)]:\n x = x.reshape(s)\n y = y.reshape(s)\n xm = xm.reshape(s)\n ym = ym.reshape(s)\n xf = xf.reshape(s)\n assert_(eq(-x, -xm))\n assert_(eq(x + y, xm + ym))\n assert_(eq(x - y, xm - ym))\n assert_(eq(x * y, xm * ym))\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_(eq(x / y, xm / ym))\n assert_(eq(a10 + y, a10 + ym))\n assert_(eq(a10 - y, a10 - ym))\n assert_(eq(a10 * y, a10 * ym))\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_(eq(a10 / y, a10 / ym))\n assert_(eq(x + a10, xm + a10))\n assert_(eq(x - a10, xm - a10))\n assert_(eq(x * a10, xm * a10))\n assert_(eq(x / a10, xm / a10))\n assert_(eq(x ** 2, xm ** 2))\n assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))\n assert_(eq(x ** y, xm ** ym))\n assert_(eq(np.add(x, y), add(xm, ym)))\n assert_(eq(np.subtract(x, y), subtract(xm, ym)))\n assert_(eq(np.multiply(x, y), multiply(xm, ym)))\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_(eq(np.divide(x, y), divide(xm, ym)))\n\n def test_testMixedArithmetic(self):\n na = np.array([1])\n ma = array([1])\n assert_(isinstance(na + ma, MaskedArray))\n assert_(isinstance(ma + na, MaskedArray))\n\n def test_testUfuncs1(self):\n # Test various functions such as sin, cos.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n assert_(eq(np.cos(x), cos(xm)))\n assert_(eq(np.cosh(x), cosh(xm)))\n assert_(eq(np.sin(x), sin(xm)))\n assert_(eq(np.sinh(x), sinh(xm)))\n assert_(eq(np.tan(x), tan(xm)))\n assert_(eq(np.tanh(x), tanh(xm)))\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_(eq(np.sqrt(abs(x)), sqrt(xm)))\n assert_(eq(np.log(abs(x)), log(xm)))\n assert_(eq(np.log10(abs(x)), log10(xm)))\n assert_(eq(np.exp(x), exp(xm)))\n assert_(eq(np.arcsin(z), arcsin(zm)))\n assert_(eq(np.arccos(z), arccos(zm)))\n assert_(eq(np.arctan(z), arctan(zm)))\n assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))\n assert_(eq(np.absolute(x), absolute(xm)))\n assert_(eq(np.equal(x, y), equal(xm, ym)))\n assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))\n assert_(eq(np.less(x, y), less(xm, ym)))\n assert_(eq(np.greater(x, y), greater(xm, ym)))\n assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))\n assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))\n assert_(eq(np.conjugate(x), conjugate(xm)))\n assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))\n assert_(eq(np.concatenate((x, y)), concatenate((x, y))))\n assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))\n assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))\n\n def test_xtestCount(self):\n # Test count\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\n assert_(count(ott).dtype.type is np.intp)\n assert_equal(3, count(ott))\n assert_equal(1, count(1))\n assert_(eq(0, array(1, mask=[1])))\n ott = ott.reshape((2, 2))\n assert_(count(ott).dtype.type is np.intp)\n assert_(isinstance(count(ott, 0), np.ndarray))\n assert_(count(ott).dtype.type is np.intp)\n assert_(eq(3, count(ott)))\n assert_(getmask(count(ott, 0)) is nomask)\n assert_(eq([1, 2], count(ott, 0)))\n\n def test_testMinMax(self):\n # Test minimum and maximum.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n xr = np.ravel(x) # max doesn't work if shaped\n xmr = ravel(xm)\n\n # true because of careful selection of data\n assert_(eq(max(xr), maximum.reduce(xmr)))\n assert_(eq(min(xr), minimum.reduce(xmr)))\n\n def test_testAddSumProd(self):\n # Test add, sum, product.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n assert_(eq(np.add.reduce(x), add.reduce(x)))\n assert_(eq(np.add.accumulate(x), add.accumulate(x)))\n assert_(eq(4, sum(array(4), axis=0)))\n assert_(eq(4, sum(array(4), axis=0)))\n assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))\n assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))\n assert_(eq(np.sum(x, 0), sum(x, 0)))\n assert_(eq(np.product(x, axis=0), product(x, axis=0)))\n assert_(eq(np.product(x, 0), product(x, 0)))\n assert_(eq(np.product(filled(xm, 1), axis=0),\n product(xm, axis=0)))\n if len(s) > 1:\n assert_(eq(np.concatenate((x, y), 1),\n concatenate((xm, ym), 1)))\n assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))\n assert_(eq(np.sum(x, 1), sum(x, 1)))\n assert_(eq(np.product(x, 1), product(x, 1)))\n\n def test_testCI(self):\n # Test of conversions and indexing\n x1 = np.array([1, 2, 4, 3])\n x2 = array(x1, mask=[1, 0, 0, 0])\n x3 = array(x1, mask=[0, 1, 0, 1])\n x4 = array(x1)\n # test conversion to strings\n str(x2) # raises?\n repr(x2) # raises?\n assert_(eq(np.sort(x1), sort(x2, fill_value=0)))\n # tests of indexing\n assert_(type(x2[1]) is type(x1[1]))\n assert_(x1[1] == x2[1])\n assert_(x2[0] is masked)\n assert_(eq(x1[2], x2[2]))\n assert_(eq(x1[2:5], x2[2:5]))\n assert_(eq(x1[:], x2[:]))\n assert_(eq(x1[1:], x3[1:]))\n x1[2] = 9\n x2[2] = 9\n assert_(eq(x1, x2))\n x1[1:3] = 99\n x2[1:3] = 99\n assert_(eq(x1, x2))\n x2[1] = masked\n assert_(eq(x1, x2))\n x2[1:3] = masked\n assert_(eq(x1, x2))\n x2[:] = x1\n x2[1] = masked\n assert_(allequal(getmask(x2), array([0, 1, 0, 0])))\n x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\n assert_(allequal(getmask(x3), array([0, 1, 1, 0])))\n x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\n assert_(allequal(getmask(x4), array([0, 1, 1, 0])))\n assert_(allequal(x4, array([1, 2, 3, 4])))\n x1 = np.arange(5) * 1.0\n x2 = masked_values(x1, 3.0)\n assert_(eq(x1, x2))\n assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))\n assert_(eq(3.0, x2.fill_value))\n x1 = array([1, 'hello', 2, 3], object)\n x2 = np.array([1, 'hello', 2, 3], object)\n s1 = x1[1]\n s2 = x2[1]\n assert_equal(type(s2), str)\n assert_equal(type(s1), str)\n assert_equal(s1, s2)\n assert_(x1[1:1].shape == (0,))\n\n def test_testCopySize(self):\n # Tests of some subtle points of copying and sizing.\n n = [0, 0, 1, 0, 0]\n m = make_mask(n)\n m2 = make_mask(m)\n assert_(m is m2)\n m3 = make_mask(m, copy=1)\n assert_(m is not m3)\n\n x1 = np.arange(5)\n y1 = array(x1, mask=m)\n assert_(y1._data is not x1)\n assert_(allequal(x1, y1._data))\n assert_(y1._mask is m)\n\n y1a = array(y1, copy=0)\n # For copy=False, one might expect that the array would just\n # passed on, i.e., that it would be \"is\" instead of \"==\".\n # See gh-4043 for discussion.\n assert_(y1a._mask.__array_interface__ ==\n y1._mask.__array_interface__)\n\n y2 = array(x1, mask=m3, copy=0)\n assert_(y2._mask is m3)\n assert_(y2[2] is masked)\n y2[2] = 9\n assert_(y2[2] is not masked)\n assert_(y2._mask is m3)\n assert_(allequal(y2.mask, 0))\n\n y2a = array(x1, mask=m, copy=1)\n assert_(y2a._mask is not m)\n assert_(y2a[2] is masked)\n y2a[2] = 9\n assert_(y2a[2] is not masked)\n assert_(y2a._mask is not m)\n assert_(allequal(y2a.mask, 0))\n\n y3 = array(x1 * 1.0, mask=m)\n assert_(filled(y3).dtype is (x1 * 1.0).dtype)\n\n x4 = arange(4)\n x4[2] = masked\n y4 = resize(x4, (8,))\n assert_(eq(concatenate([x4, x4]), y4))\n assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))\n y5 = repeat(x4, (2, 2, 2, 2), axis=0)\n assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))\n y6 = repeat(x4, 2, axis=0)\n assert_(eq(y5, y6))\n\n def test_testPut(self):\n # Test of put\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n m2 = m.copy()\n x = array(d, mask=m)\n assert_(x[3] is masked)\n assert_(x[4] is masked)\n x[[1, 4]] = [10, 40]\n assert_(x._mask is m)\n assert_(x[3] is masked)\n assert_(x[4] is not masked)\n assert_(eq(x, [0, 10, 2, -1, 40]))\n\n x = array(d, mask=m2, copy=True)\n x.put([0, 1, 2], [-1, 100, 200])\n assert_(x._mask is not m2)\n assert_(x[3] is masked)\n assert_(x[4] is masked)\n assert_(eq(x, [-1, 100, 200, 0, 0]))\n\n def test_testPut2(self):\n # Test of put\n d = arange(5)\n x = array(d, mask=[0, 0, 0, 0, 0])\n z = array([10, 40], mask=[1, 0])\n assert_(x[2] is not masked)\n assert_(x[3] is not masked)\n x[2:4] = z\n assert_(x[2] is masked)\n assert_(x[3] is not masked)\n assert_(eq(x, [0, 1, 10, 40, 4]))\n\n d = arange(5)\n x = array(d, mask=[0, 0, 0, 0, 0])\n y = x[2:4]\n z = array([10, 40], mask=[1, 0])\n assert_(x[2] is not masked)\n assert_(x[3] is not masked)\n y[:] = z\n assert_(y[0] is masked)\n assert_(y[1] is not masked)\n assert_(eq(y, [10, 40]))\n assert_(x[2] is masked)\n assert_(x[3] is not masked)\n assert_(eq(x, [0, 1, 10, 40, 4]))\n\n def test_testMaPut(self):\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\n m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]\n i = np.nonzero(m)[0]\n put(ym, i, zm)\n assert_(all(take(ym, i, axis=0) == zm))\n\n def test_testOddFeatures(self):\n # Test of other odd features\n x = arange(20)\n x = x.reshape(4, 5)\n x.flat[5] = 12\n assert_(x[1, 0] == 12)\n z = x + 10j * x\n assert_(eq(z.real, x))\n assert_(eq(z.imag, 10 * x))\n assert_(eq((z * conjugate(z)).real, 101 * x * x))\n z.imag[...] = 0.0\n\n x = arange(10)\n x[3] = masked\n assert_(str(x[3]) == str(masked))\n c = x >= 8\n assert_(count(where(c, masked, masked)) == 0)\n assert_(shape(where(c, masked, masked)) == c.shape)\n z = where(c, x, masked)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is masked)\n assert_(z[7] is masked)\n assert_(z[8] is not masked)\n assert_(z[9] is not masked)\n assert_(eq(x, z))\n z = where(c, masked, x)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is not masked)\n assert_(z[7] is not masked)\n assert_(z[8] is masked)\n assert_(z[9] is masked)\n z = masked_where(c, x)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is not masked)\n assert_(z[7] is not masked)\n assert_(z[8] is masked)\n assert_(z[9] is masked)\n assert_(eq(x, z))\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_(eq(z, [1., 2., 0., -4., -5]))\n c[0] = masked\n z = where(c, x, -x)\n assert_(eq(z, [1., 2., 0., -4., -5]))\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))\n assert_(eq(masked_where(greater_equal(x, 2), x),\n masked_greater_equal(x, 2)))\n assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))\n assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))\n assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))\n assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))\n assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))\n assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))\n assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))\n assert_(eq(masked_inside(array(list(range(5)),\n mask=[1, 0, 0, 0, 0]), 1, 3).mask,\n [1, 1, 1, 1, 0]))\n assert_(eq(masked_outside(array(list(range(5)),\n mask=[0, 1, 0, 0, 0]), 1, 3).mask,\n [1, 1, 0, 0, 1]))\n assert_(eq(masked_equal(array(list(range(5)),\n mask=[1, 0, 0, 0, 0]), 2).mask,\n [1, 0, 1, 0, 0]))\n assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],\n mask=[1, 0, 0, 0, 0]), 2).mask,\n [1, 0, 1, 0, 1]))\n assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),\n [99, 99, 3, 4, 5]))\n atest = ones((10, 10, 10), dtype=np.float32)\n btest = zeros(atest.shape, MaskType)\n ctest = masked_where(btest, atest)\n assert_(eq(atest, ctest))\n z = choose(c, (-x, x))\n assert_(eq(z, [1., 2., 0., -4., -5]))\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n x = arange(6)\n x[5] = masked\n y = arange(6) * 10\n y[2] = masked\n c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])\n cm = c.filled(1)\n z = where(c, x, y)\n zm = where(cm, x, y)\n assert_(eq(z, zm))\n assert_(getmask(zm) is nomask)\n assert_(eq(zm, [0, 1, 2, 30, 40, 50]))\n z = where(c, masked, 1)\n assert_(eq(z, [99, 99, 99, 1, 1, 1]))\n z = where(c, 1, masked)\n assert_(eq(z, [99, 1, 1, 99, 99, 99]))\n\n def test_testMinMax2(self):\n # Test of minimum, maximum.\n assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))\n assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))\n x = arange(5)\n y = arange(5) - 2\n x[3] = masked\n y[0] = masked\n assert_(eq(minimum(x, y), where(less(x, y), x, y)))\n assert_(eq(maximum(x, y), where(greater(x, y), x, y)))\n assert_(minimum.reduce(x) == 0)\n assert_(maximum.reduce(x) == 4)\n\n def test_testTakeTransposeInnerOuter(self):\n # Test of take, transpose, inner, outer products\n x = arange(24)\n y = np.arange(24)\n x[5:6] = masked\n x = x.reshape(2, 3, 4)\n y = y.reshape(2, 3, 4)\n assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))\n assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))\n assert_(eq(np.inner(filled(x, 0), filled(y, 0)),\n inner(x, y)))\n assert_(eq(np.outer(filled(x, 0), filled(y, 0)),\n outer(x, y)))\n y = array(['abc', 1, 'def', 2, 3], object)\n y[2] = masked\n t = take(y, [0, 3, 4])\n assert_(t[0] == 'abc')\n assert_(t[1] == 2)\n assert_(t[2] == 3)\n\n def test_testInplace(self):\n # Test of inplace operations and rich comparisons\n y = arange(10)\n\n x = arange(10)\n xm = arange(10)\n xm[2] = masked\n x += 1\n assert_(eq(x, y + 1))\n xm += 1\n assert_(eq(x, y + 1))\n\n x = arange(10)\n xm = arange(10)\n xm[2] = masked\n x -= 1\n assert_(eq(x, y - 1))\n xm -= 1\n assert_(eq(xm, y - 1))\n\n x = arange(10) * 1.0\n xm = arange(10) * 1.0\n xm[2] = masked\n x *= 2.0\n assert_(eq(x, y * 2))\n xm *= 2.0\n assert_(eq(xm, y * 2))\n\n x = arange(10) * 2\n xm = arange(10)\n xm[2] = masked\n x //= 2\n assert_(eq(x, y))\n xm //= 2\n assert_(eq(x, y))\n\n x = arange(10) * 1.0\n xm = arange(10) * 1.0\n xm[2] = masked\n x /= 2.0\n assert_(eq(x, y / 2.0))\n xm /= arange(10)\n assert_(eq(xm, ones((10,))))\n\n x = arange(10).astype(np.float32)\n xm = arange(10)\n xm[2] = masked\n x += 1.\n assert_(eq(x, y + 1.))\n\n def test_testPickle(self):\n # Test of pickling\n x = arange(12)\n x[4:10:2] = masked\n x = x.reshape(4, 3)\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n s = pickle.dumps(x, protocol=proto)\n y = pickle.loads(s)\n assert_(eq(x, y))\n\n def test_testMasked(self):\n # Test of masked element\n xx = arange(6)\n xx[1] = masked\n assert_(str(masked) == '--')\n assert_(xx[1] is masked)\n assert_equal(filled(xx[1], 0), 0)\n\n def test_testAverage1(self):\n # Test of average.\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\n assert_(eq(2.0, average(ott, axis=0)))\n assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))\n result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)\n assert_(eq(2.0, result))\n assert_(wts == 4.0)\n ott[:] = masked\n assert_(average(ott, axis=0) is masked)\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\n ott = ott.reshape(2, 2)\n ott[:, 1] = masked\n assert_(eq(average(ott, axis=0), [2.0, 0.0]))\n assert_(average(ott, axis=1)[0] is masked)\n assert_(eq([2., 0.], average(ott, axis=0)))\n result, wts = average(ott, axis=0, returned=1)\n assert_(eq(wts, [1., 0.]))\n\n def test_testAverage2(self):\n # More tests of average.\n w1 = [0, 1, 1, 1, 1, 0]\n w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]\n x = arange(6)\n assert_(allclose(average(x, axis=0), 2.5))\n assert_(allclose(average(x, axis=0, weights=w1), 2.5))\n y = array([arange(6), 2.0 * arange(6)])\n assert_(allclose(average(y, None),\n np.add.reduce(np.arange(6)) * 3. / 12.))\n assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))\n assert_(allclose(average(y, axis=1),\n [average(x, axis=0), average(x, axis=0)*2.0]))\n assert_(allclose(average(y, None, weights=w2), 20. / 6.))\n assert_(allclose(average(y, axis=0, weights=w2),\n [0., 1., 2., 3., 4., 10.]))\n assert_(allclose(average(y, axis=1),\n [average(x, axis=0), average(x, axis=0)*2.0]))\n m1 = zeros(6)\n m2 = [0, 0, 1, 1, 0, 0]\n m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]\n m4 = ones(6)\n m5 = [0, 1, 1, 1, 1, 1]\n assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))\n assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))\n assert_(average(masked_array(x, m4), axis=0) is masked)\n assert_equal(average(masked_array(x, m5), axis=0), 0.0)\n assert_equal(count(average(masked_array(x, m4), axis=0)), 0)\n z = masked_array(y, m3)\n assert_(allclose(average(z, None), 20. / 6.))\n assert_(allclose(average(z, axis=0),\n [0., 1., 99., 99., 4.0, 7.5]))\n assert_(allclose(average(z, axis=1), [2.5, 5.0]))\n assert_(allclose(average(z, axis=0, weights=w2),\n [0., 1., 99., 99., 4.0, 10.0]))\n\n a = arange(6)\n b = arange(6) * 3\n r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)\n assert_equal(shape(r1), shape(w1))\n assert_equal(r1.shape, w1.shape)\n r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)\n assert_equal(shape(w2), shape(r2))\n r2, w2 = average(ones((2, 2, 3)), returned=1)\n assert_equal(shape(w2), shape(r2))\n r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)\n assert_(shape(w2) == shape(r2))\n a2d = array([[1, 2], [0, 4]], float)\n a2dm = masked_array(a2d, [[0, 0], [1, 0]])\n a2da = average(a2d, axis=0)\n assert_(eq(a2da, [0.5, 3.0]))\n a2dma = average(a2dm, axis=0)\n assert_(eq(a2dma, [1.0, 3.0]))\n a2dma = average(a2dm, axis=None)\n assert_(eq(a2dma, 7. / 3.))\n a2dma = average(a2dm, axis=1)\n assert_(eq(a2dma, [1.5, 4.0]))\n\n def test_testToPython(self):\n assert_equal(1, int(array(1)))\n assert_equal(1.0, float(array(1)))\n assert_equal(1, int(array([[[1]]])))\n assert_equal(1.0, float(array([[1]])))\n assert_raises(TypeError, float, array([1, 1]))\n assert_raises(ValueError, bool, array([0, 1]))\n assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))\n\n def test_testScalarArithmetic(self):\n xm = array(0, mask=1)\n #TODO FIXME: Find out what the following raises a warning in r8247\n with np.errstate(divide='ignore'):\n assert_((1 / array(0)).mask)\n assert_((1 + xm).mask)\n assert_((-xm).mask)\n assert_((-xm).mask)\n assert_(maximum(xm, xm).mask)\n assert_(minimum(xm, xm).mask)\n assert_(xm.filled().dtype is xm._data.dtype)\n x = array(0, mask=0)\n assert_(x.filled() == x._data)\n assert_equal(str(xm), str(masked_print_option))\n\n def test_testArrayMethods(self):\n a = array([1, 3, 2])\n assert_(eq(a.any(), a._data.any()))\n assert_(eq(a.all(), a._data.all()))\n assert_(eq(a.argmax(), a._data.argmax()))\n assert_(eq(a.argmin(), a._data.argmin()))\n assert_(eq(a.choose(0, 1, 2, 3, 4),\n a._data.choose(0, 1, 2, 3, 4)))\n assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))\n assert_(eq(a.conj(), a._data.conj()))\n assert_(eq(a.conjugate(), a._data.conjugate()))\n m = array([[1, 2], [3, 4]])\n assert_(eq(m.diagonal(), m._data.diagonal()))\n assert_(eq(a.sum(), a._data.sum()))\n assert_(eq(a.take([1, 2]), a._data.take([1, 2])))\n assert_(eq(m.transpose(), m._data.transpose()))\n\n def test_testArrayAttributes(self):\n a = array([1, 3, 2])\n assert_equal(a.ndim, 1)\n\n def test_testAPI(self):\n assert_(not [m for m in dir(np.ndarray)\n if m not in dir(MaskedArray) and\n not m.startswith('_')])\n\n def test_testSingleElementSubscript(self):\n a = array([1, 3, 2])\n b = array([1, 3, 2], mask=[1, 0, 1])\n assert_equal(a[0].shape, ())\n assert_equal(b[0].shape, ())\n assert_equal(b[1].shape, ())\n\n\nclass TestUfuncs(object):\n def setup(self):\n self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),\n array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)\n\n def test_testUfuncRegression(self):\n f_invalid_ignore = [\n 'sqrt', 'arctanh', 'arcsin', 'arccos',\n 'arccosh', 'arctanh', 'log', 'log10', 'divide',\n 'true_divide', 'floor_divide', 'remainder', 'fmod']\n for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',\n 'sin', 'cos', 'tan',\n 'arcsin', 'arccos', 'arctan',\n 'sinh', 'cosh', 'tanh',\n 'arcsinh',\n 'arccosh',\n 'arctanh',\n 'absolute', 'fabs', 'negative',\n 'floor', 'ceil',\n 'logical_not',\n 'add', 'subtract', 'multiply',\n 'divide', 'true_divide', 'floor_divide',\n 'remainder', 'fmod', 'hypot', 'arctan2',\n 'equal', 'not_equal', 'less_equal', 'greater_equal',\n 'less', 'greater',\n 'logical_and', 'logical_or', 'logical_xor']:\n try:\n uf = getattr(umath, f)\n except AttributeError:\n uf = getattr(fromnumeric, f)\n mf = getattr(np.ma, f)\n args = self.d[:uf.nin]\n with np.errstate():\n if f in f_invalid_ignore:\n np.seterr(invalid='ignore')\n if f in ['arctanh', 'log', 'log10']:\n np.seterr(divide='ignore')\n ur = uf(*args)\n mr = mf(*args)\n assert_(eq(ur.filled(0), mr.filled(0), f))\n assert_(eqmask(ur.mask, mr.mask))\n\n def test_reduce(self):\n a = self.d[0]\n assert_(not alltrue(a, axis=0))\n assert_(sometrue(a, axis=0))\n assert_equal(sum(a[:3], axis=0), 0)\n assert_equal(product(a, axis=0), 0)\n\n def test_minmax(self):\n a = arange(1, 13).reshape(3, 4)\n amask = masked_where(a < 5, a)\n assert_equal(amask.max(), a.max())\n assert_equal(amask.min(), 5)\n assert_((amask.max(0) == a.max(0)).all())\n assert_((amask.min(0) == [5, 6, 7, 8]).all())\n assert_(amask.max(1)[0].mask)\n assert_(amask.min(1)[0].mask)\n\n def test_nonzero(self):\n for t in \"?bhilqpBHILQPfdgFDGO\":\n x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])\n assert_(eq(nonzero(x), [0]))\n\n\nclass TestArrayMethods(object):\n\n def setup(self):\n x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n self.d = (x, X, XX, m, mx, mX, mXX)\n\n def test_trace(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n mXdiag = mX.diagonal()\n assert_equal(mX.trace(), mX.diagonal().compressed().sum())\n assert_(eq(mX.trace(),\n X.trace() - sum(mXdiag.mask * X.diagonal(),\n axis=0)))\n\n def test_clip(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n clipped = mx.clip(2, 8)\n assert_(eq(clipped.mask, mx.mask))\n assert_(eq(clipped._data, x.clip(2, 8)))\n assert_(eq(clipped._data, mx._data.clip(2, 8)))\n\n def test_ptp(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n (n, m) = X.shape\n assert_equal(mx.ptp(), mx.compressed().ptp())\n rows = np.zeros(n, np.float_)\n cols = np.zeros(m, np.float_)\n for k in range(m):\n cols[k] = mX[:, k].compressed().ptp()\n for k in range(n):\n rows[k] = mX[k].compressed().ptp()\n assert_(eq(mX.ptp(0), cols))\n assert_(eq(mX.ptp(1), rows))\n\n def test_swapaxes(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n mXswapped = mX.swapaxes(0, 1)\n assert_(eq(mXswapped[-1], mX[:, -1]))\n mXXswapped = mXX.swapaxes(0, 2)\n assert_equal(mXXswapped.shape, (2, 2, 3, 3))\n\n def test_cumprod(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n mXcp = mX.cumprod(0)\n assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))\n mXcp = mX.cumprod(1)\n assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))\n\n def test_cumsum(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n mXcp = mX.cumsum(0)\n assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))\n mXcp = mX.cumsum(1)\n assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))\n\n def test_varstd(self):\n (x, X, XX, m, mx, mX, mXX,) = self.d\n assert_(eq(mX.var(axis=None), mX.compressed().var()))\n assert_(eq(mX.std(axis=None), mX.compressed().std()))\n assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))\n assert_(eq(mX.var().shape, X.var().shape))\n (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))\n for k in range(6):\n assert_(eq(mXvar1[k], mX[k].compressed().var()))\n assert_(eq(mXvar0[k], mX[:, k].compressed().var()))\n assert_(eq(np.sqrt(mXvar0[k]),\n mX[:, k].compressed().std()))\n\n\ndef eqmask(m1, m2):\n if m1 is nomask:\n return m2 is nomask\n if m2 is nomask:\n return m1 is nomask\n return (m1 == m2).all()\n",
"\"\"\"\n\n=============================================================\nOnline Latent Dirichlet Allocation with variational inference\n=============================================================\n\nThis implementation is modified from Matthew D. Hoffman's onlineldavb code\nLink: https://github.com/blei-lab/onlineldavb\n\"\"\"\n\n# Author: Chyi-Kwei Yau\n# Author: Matthew D. Hoffman (original onlineldavb implementation)\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.special import gammaln\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils import (check_random_state, check_array,\n gen_batches, gen_even_slices)\nfrom ..utils.fixes import logsumexp\nfrom ..utils.validation import check_non_negative\nfrom ..utils._joblib import Parallel, delayed, effective_n_jobs\nfrom ..exceptions import NotFittedError\n\nfrom ._online_lda import (mean_change, _dirichlet_expectation_1d,\n _dirichlet_expectation_2d)\n\nEPS = np.finfo(np.float).eps\n\n\ndef _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,\n max_iters,\n mean_change_tol, cal_sstats, random_state):\n \"\"\"E-step: update document-topic distribution.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)\n Exponential value of expectation of log topic word distribution.\n In the literature, this is `exp(E[log(beta)])`.\n\n doc_topic_prior : float\n Prior of document topic distribution `theta`.\n\n max_iters : int\n Max number of iterations for updating document topic distribution in\n the E-step.\n\n mean_change_tol : float\n Stopping tolerance for updating document topic distribution in E-setp.\n\n cal_sstats : boolean\n Parameter that indicate to calculate sufficient statistics or not.\n Set `cal_sstats` to `True` when we need to run M-step.\n\n random_state : RandomState instance or None\n Parameter that indicate how to initialize document topic distribution.\n Set `random_state` to None will initialize document topic distribution\n to a constant number.\n\n Returns\n -------\n (doc_topic_distr, suff_stats) :\n `doc_topic_distr` is unnormalized topic distribution for each document.\n In the literature, this is `gamma`. we can calculate `E[log(theta)]`\n from it.\n `suff_stats` is expected sufficient statistics for the M-step.\n When `cal_sstats == False`, this will be None.\n\n \"\"\"\n is_sparse_x = sp.issparse(X)\n n_samples, n_features = X.shape\n n_topics = exp_topic_word_distr.shape[0]\n\n if random_state:\n doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))\n else:\n doc_topic_distr = np.ones((n_samples, n_topics))\n\n # In the literature, this is `exp(E[log(theta)])`\n exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))\n\n # diff on `component_` (only calculate it when `cal_diff` is True)\n suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None\n\n if is_sparse_x:\n X_data = X.data\n X_indices = X.indices\n X_indptr = X.indptr\n\n for idx_d in range(n_samples):\n if is_sparse_x:\n ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]\n cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]\n else:\n ids = np.nonzero(X[idx_d, :])[0]\n cnts = X[idx_d, ids]\n\n doc_topic_d = doc_topic_distr[idx_d, :]\n # The next one is a copy, since the inner loop overwrites it.\n exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()\n exp_topic_word_d = exp_topic_word_distr[:, ids]\n\n # Iterate between `doc_topic_d` and `norm_phi` until convergence\n for _ in range(0, max_iters):\n last_d = doc_topic_d\n\n # The optimal phi_{dwk} is proportional to\n # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).\n norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS\n\n doc_topic_d = (exp_doc_topic_d *\n np.dot(cnts / norm_phi, exp_topic_word_d.T))\n # Note: adds doc_topic_prior to doc_topic_d, in-place.\n _dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,\n exp_doc_topic_d)\n\n if mean_change(last_d, doc_topic_d) < mean_change_tol:\n break\n doc_topic_distr[idx_d, :] = doc_topic_d\n\n # Contribution of document d to the expected sufficient\n # statistics for the M step.\n if cal_sstats:\n norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS\n suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)\n\n return (doc_topic_distr, suff_stats)\n\n\nclass LatentDirichletAllocation(BaseEstimator, TransformerMixin):\n \"\"\"Latent Dirichlet Allocation with online variational Bayes algorithm\n\n .. versionadded:: 0.17\n\n Read more in the :ref:`User Guide <LatentDirichletAllocation>`.\n\n Parameters\n ----------\n n_components : int, optional (default=10)\n Number of topics.\n\n doc_topic_prior : float, optional (default=None)\n Prior of document topic distribution `theta`. If the value is None,\n defaults to `1 / n_components`.\n In [1]_, this is called `alpha`.\n\n topic_word_prior : float, optional (default=None)\n Prior of topic word distribution `beta`. If the value is None, defaults\n to `1 / n_components`.\n In [1]_, this is called `eta`.\n\n learning_method : 'batch' | 'online', default='batch'\n Method used to update `_component`. Only used in `fit` method.\n In general, if the data size is large, the online update will be much\n faster than the batch update.\n\n Valid options::\n\n 'batch': Batch variational Bayes method. Use all training data in\n each EM update.\n Old `components_` will be overwritten in each iteration.\n 'online': Online variational Bayes method. In each EM update, use\n mini-batch of training data to update the ``components_``\n variable incrementally. The learning rate is controlled by the\n ``learning_decay`` and the ``learning_offset`` parameters.\n\n .. versionchanged:: 0.20\n The default learning method is now ``\"batch\"``.\n\n learning_decay : float, optional (default=0.7)\n It is a parameter that control learning rate in the online learning\n method. The value should be set between (0.5, 1.0] to guarantee\n asymptotic convergence. When the value is 0.0 and batch_size is\n ``n_samples``, the update method is same as batch learning. In the\n literature, this is called kappa.\n\n learning_offset : float, optional (default=10.)\n A (positive) parameter that downweights early iterations in online\n learning. It should be greater than 1.0. In the literature, this is\n called tau_0.\n\n max_iter : integer, optional (default=10)\n The maximum number of iterations.\n\n batch_size : int, optional (default=128)\n Number of documents to use in each EM iteration. Only used in online\n learning.\n\n evaluate_every : int, optional (default=0)\n How often to evaluate perplexity. Only used in `fit` method.\n set it to 0 or negative number to not evalute perplexity in\n training at all. Evaluating perplexity can help you check convergence\n in training process, but it will also increase total training time.\n Evaluating perplexity in every iteration might increase training time\n up to two-fold.\n\n total_samples : int, optional (default=1e6)\n Total number of documents. Only used in the `partial_fit` method.\n\n perp_tol : float, optional (default=1e-1)\n Perplexity tolerance in batch learning. Only used when\n ``evaluate_every`` is greater than 0.\n\n mean_change_tol : float, optional (default=1e-3)\n Stopping tolerance for updating document topic distribution in E-step.\n\n max_doc_update_iter : int (default=100)\n Max number of iterations for updating document topic distribution in\n the E-step.\n\n n_jobs : int or None, optional (default=None)\n The number of jobs to use in the E-step.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n verbose : int, optional (default=0)\n Verbosity level.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n Variational parameters for topic word distribution. Since the complete\n conditional for topic word distribution is a Dirichlet,\n ``components_[i, j]`` can be viewed as pseudocount that represents the\n number of times word `j` was assigned to topic `i`.\n It can also be viewed as distribution over the words for each topic\n after normalization:\n ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.\n\n n_batch_iter_ : int\n Number of iterations of the EM step.\n\n n_iter_ : int\n Number of passes over the dataset.\n\n Examples\n --------\n >>> from sklearn.decomposition import LatentDirichletAllocation\n >>> from sklearn.datasets import make_multilabel_classification\n >>> # This produces a feature matrix of token counts, similar to what\n >>> # CountVectorizer would produce on text.\n >>> X, _ = make_multilabel_classification(random_state=0)\n >>> lda = LatentDirichletAllocation(n_components=5,\n ... random_state=0)\n >>> lda.fit(X) # doctest: +ELLIPSIS\n LatentDirichletAllocation(...)\n >>> # get topics for some given samples:\n >>> lda.transform(X[-2:])\n array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846],\n [0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]])\n\n References\n ----------\n [1] \"Online Learning for Latent Dirichlet Allocation\", Matthew D. Hoffman,\n David M. Blei, Francis Bach, 2010\n\n [2] \"Stochastic Variational Inference\", Matthew D. Hoffman, David M. Blei,\n Chong Wang, John Paisley, 2013\n\n [3] Matthew D. Hoffman's onlineldavb code. Link:\n https://github.com/blei-lab/onlineldavb\n\n \"\"\"\n\n def __init__(self, n_components=10, doc_topic_prior=None,\n topic_word_prior=None, learning_method='batch',\n learning_decay=.7, learning_offset=10., max_iter=10,\n batch_size=128, evaluate_every=-1, total_samples=1e6,\n perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,\n n_jobs=None, verbose=0, random_state=None):\n self.n_components = n_components\n self.doc_topic_prior = doc_topic_prior\n self.topic_word_prior = topic_word_prior\n self.learning_method = learning_method\n self.learning_decay = learning_decay\n self.learning_offset = learning_offset\n self.max_iter = max_iter\n self.batch_size = batch_size\n self.evaluate_every = evaluate_every\n self.total_samples = total_samples\n self.perp_tol = perp_tol\n self.mean_change_tol = mean_change_tol\n self.max_doc_update_iter = max_doc_update_iter\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.random_state = random_state\n\n def _check_params(self):\n \"\"\"Check model parameters.\"\"\"\n if self.n_components <= 0:\n raise ValueError(\"Invalid 'n_components' parameter: %r\"\n % self.n_components)\n\n if self.total_samples <= 0:\n raise ValueError(\"Invalid 'total_samples' parameter: %r\"\n % self.total_samples)\n\n if self.learning_offset < 0:\n raise ValueError(\"Invalid 'learning_offset' parameter: %r\"\n % self.learning_offset)\n\n if self.learning_method not in (\"batch\", \"online\"):\n raise ValueError(\"Invalid 'learning_method' parameter: %r\"\n % self.learning_method)\n\n def _init_latent_vars(self, n_features):\n \"\"\"Initialize latent variables.\"\"\"\n\n self.random_state_ = check_random_state(self.random_state)\n self.n_batch_iter_ = 1\n self.n_iter_ = 0\n\n if self.doc_topic_prior is None:\n self.doc_topic_prior_ = 1. / self.n_components\n else:\n self.doc_topic_prior_ = self.doc_topic_prior\n\n if self.topic_word_prior is None:\n self.topic_word_prior_ = 1. / self.n_components\n else:\n self.topic_word_prior_ = self.topic_word_prior\n\n init_gamma = 100.\n init_var = 1. / init_gamma\n # In the literature, this is called `lambda`\n self.components_ = self.random_state_.gamma(\n init_gamma, init_var, (self.n_components, n_features))\n\n # In the literature, this is `exp(E[log(beta)])`\n self.exp_dirichlet_component_ = np.exp(\n _dirichlet_expectation_2d(self.components_))\n\n def _e_step(self, X, cal_sstats, random_init, parallel=None):\n \"\"\"E-step in EM update.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n cal_sstats : boolean\n Parameter that indicate whether to calculate sufficient statistics\n or not. Set ``cal_sstats`` to True when we need to run M-step.\n\n random_init : boolean\n Parameter that indicate whether to initialize document topic\n distribution randomly in the E-step. Set it to True in training\n steps.\n\n parallel : joblib.Parallel (optional)\n Pre-initialized instance of joblib.Parallel.\n\n Returns\n -------\n (doc_topic_distr, suff_stats) :\n `doc_topic_distr` is unnormalized topic distribution for each\n document. In the literature, this is called `gamma`.\n `suff_stats` is expected sufficient statistics for the M-step.\n When `cal_sstats == False`, it will be None.\n\n \"\"\"\n\n # Run e-step in parallel\n random_state = self.random_state_ if random_init else None\n\n # TODO: make Parallel._effective_n_jobs public instead?\n n_jobs = effective_n_jobs(self.n_jobs)\n if parallel is None:\n parallel = Parallel(n_jobs=n_jobs, verbose=max(0,\n self.verbose - 1))\n results = parallel(\n delayed(_update_doc_distribution)(X[idx_slice, :],\n self.exp_dirichlet_component_,\n self.doc_topic_prior_,\n self.max_doc_update_iter,\n self.mean_change_tol, cal_sstats,\n random_state)\n for idx_slice in gen_even_slices(X.shape[0], n_jobs))\n\n # merge result\n doc_topics, sstats_list = zip(*results)\n doc_topic_distr = np.vstack(doc_topics)\n\n if cal_sstats:\n # This step finishes computing the sufficient statistics for the\n # M-step.\n suff_stats = np.zeros(self.components_.shape)\n for sstats in sstats_list:\n suff_stats += sstats\n suff_stats *= self.exp_dirichlet_component_\n else:\n suff_stats = None\n\n return (doc_topic_distr, suff_stats)\n\n def _em_step(self, X, total_samples, batch_update, parallel=None):\n \"\"\"EM update for 1 iteration.\n\n update `_component` by batch VB or online VB.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n total_samples : integer\n Total number of documents. It is only used when\n batch_update is `False`.\n\n batch_update : boolean\n Parameter that controls updating method.\n `True` for batch learning, `False` for online learning.\n\n parallel : joblib.Parallel\n Pre-initialized instance of joblib.Parallel\n\n Returns\n -------\n doc_topic_distr : array, shape=(n_samples, n_components)\n Unnormalized document topic distribution.\n \"\"\"\n\n # E-step\n _, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,\n parallel=parallel)\n\n # M-step\n if batch_update:\n self.components_ = self.topic_word_prior_ + suff_stats\n else:\n # online update\n # In the literature, the weight is `rho`\n weight = np.power(self.learning_offset + self.n_batch_iter_,\n -self.learning_decay)\n doc_ratio = float(total_samples) / X.shape[0]\n self.components_ *= (1 - weight)\n self.components_ += (weight * (self.topic_word_prior_\n + doc_ratio * suff_stats))\n\n # update `component_` related variables\n self.exp_dirichlet_component_ = np.exp(\n _dirichlet_expectation_2d(self.components_))\n self.n_batch_iter_ += 1\n return\n\n def _check_non_neg_array(self, X, whom):\n \"\"\"check X format\n\n check X format and make sure no negative value in X.\n\n Parameters\n ----------\n X : array-like or sparse matrix\n\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n check_non_negative(X, whom)\n return X\n\n def partial_fit(self, X, y=None):\n \"\"\"Online VB with Mini-Batch update.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n y : Ignored\n\n Returns\n -------\n self\n \"\"\"\n self._check_params()\n X = self._check_non_neg_array(X,\n \"LatentDirichletAllocation.partial_fit\")\n n_samples, n_features = X.shape\n batch_size = self.batch_size\n\n # initialize parameters or check\n if not hasattr(self, 'components_'):\n self._init_latent_vars(n_features)\n\n if n_features != self.components_.shape[1]:\n raise ValueError(\n \"The provided data has %d dimensions while \"\n \"the model was trained with feature size %d.\" %\n (n_features, self.components_.shape[1]))\n\n n_jobs = effective_n_jobs(self.n_jobs)\n with Parallel(n_jobs=n_jobs, verbose=max(0,\n self.verbose - 1)) as parallel:\n for idx_slice in gen_batches(n_samples, batch_size):\n self._em_step(X[idx_slice, :],\n total_samples=self.total_samples,\n batch_update=False,\n parallel=parallel)\n\n return self\n\n def fit(self, X, y=None):\n \"\"\"Learn model for the data X with variational Bayes method.\n\n When `learning_method` is 'online', use mini-batch update.\n Otherwise, use batch update.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n y : Ignored\n\n Returns\n -------\n self\n \"\"\"\n self._check_params()\n X = self._check_non_neg_array(X, \"LatentDirichletAllocation.fit\")\n n_samples, n_features = X.shape\n max_iter = self.max_iter\n evaluate_every = self.evaluate_every\n learning_method = self.learning_method\n\n batch_size = self.batch_size\n\n # initialize parameters\n self._init_latent_vars(n_features)\n # change to perplexity later\n last_bound = None\n n_jobs = effective_n_jobs(self.n_jobs)\n with Parallel(n_jobs=n_jobs, verbose=max(0,\n self.verbose - 1)) as parallel:\n for i in range(max_iter):\n if learning_method == 'online':\n for idx_slice in gen_batches(n_samples, batch_size):\n self._em_step(X[idx_slice, :], total_samples=n_samples,\n batch_update=False, parallel=parallel)\n else:\n # batch update\n self._em_step(X, total_samples=n_samples,\n batch_update=True, parallel=parallel)\n\n # check perplexity\n if evaluate_every > 0 and (i + 1) % evaluate_every == 0:\n doc_topics_distr, _ = self._e_step(X, cal_sstats=False,\n random_init=False,\n parallel=parallel)\n bound = self._perplexity_precomp_distr(X, doc_topics_distr,\n sub_sampling=False)\n if self.verbose:\n print('iteration: %d of max_iter: %d, perplexity: %.4f'\n % (i + 1, max_iter, bound))\n\n if last_bound and abs(last_bound - bound) < self.perp_tol:\n break\n last_bound = bound\n\n elif self.verbose:\n print('iteration: %d of max_iter: %d' % (i + 1, max_iter))\n self.n_iter_ += 1\n\n # calculate final perplexity value on train set\n doc_topics_distr, _ = self._e_step(X, cal_sstats=False,\n random_init=False,\n parallel=parallel)\n self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr,\n sub_sampling=False)\n\n return self\n\n def _unnormalized_transform(self, X):\n \"\"\"Transform data X according to fitted model.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n Returns\n -------\n doc_topic_distr : shape=(n_samples, n_components)\n Document topic distribution for X.\n \"\"\"\n if not hasattr(self, 'components_'):\n raise NotFittedError(\"no 'components_' attribute in model.\"\n \" Please fit model first.\")\n\n # make sure feature size is the same in fitted model and in X\n X = self._check_non_neg_array(X, \"LatentDirichletAllocation.transform\")\n n_samples, n_features = X.shape\n if n_features != self.components_.shape[1]:\n raise ValueError(\n \"The provided data has %d dimensions while \"\n \"the model was trained with feature size %d.\" %\n (n_features, self.components_.shape[1]))\n\n doc_topic_distr, _ = self._e_step(X, cal_sstats=False,\n random_init=False)\n\n return doc_topic_distr\n\n def transform(self, X):\n \"\"\"Transform data X according to the fitted model.\n\n .. versionchanged:: 0.18\n *doc_topic_distr* is now normalized\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n Returns\n -------\n doc_topic_distr : shape=(n_samples, n_components)\n Document topic distribution for X.\n \"\"\"\n doc_topic_distr = self._unnormalized_transform(X)\n doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]\n return doc_topic_distr\n\n def _approx_bound(self, X, doc_topic_distr, sub_sampling):\n \"\"\"Estimate the variational bound.\n\n Estimate the variational bound over \"all documents\" using only the\n documents passed in as X. Since log-likelihood of each word cannot\n be computed directly, we use this bound to estimate it.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n doc_topic_distr : array, shape=(n_samples, n_components)\n Document topic distribution. In the literature, this is called\n gamma.\n\n sub_sampling : boolean, optional, (default=False)\n Compensate for subsampling of documents.\n It is used in calculate bound in online learning.\n\n Returns\n -------\n score : float\n\n \"\"\"\n\n def _loglikelihood(prior, distr, dirichlet_distr, size):\n # calculate log-likelihood\n score = np.sum((prior - distr) * dirichlet_distr)\n score += np.sum(gammaln(distr) - gammaln(prior))\n score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))\n return score\n\n is_sparse_x = sp.issparse(X)\n n_samples, n_components = doc_topic_distr.shape\n n_features = self.components_.shape[1]\n score = 0\n\n dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)\n dirichlet_component_ = _dirichlet_expectation_2d(self.components_)\n doc_topic_prior = self.doc_topic_prior_\n topic_word_prior = self.topic_word_prior_\n\n if is_sparse_x:\n X_data = X.data\n X_indices = X.indices\n X_indptr = X.indptr\n\n # E[log p(docs | theta, beta)]\n for idx_d in range(0, n_samples):\n if is_sparse_x:\n ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]\n cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]\n else:\n ids = np.nonzero(X[idx_d, :])[0]\n cnts = X[idx_d, ids]\n temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]\n + dirichlet_component_[:, ids])\n norm_phi = logsumexp(temp, axis=0)\n score += np.dot(cnts, norm_phi)\n\n # compute E[log p(theta | alpha) - log q(theta | gamma)]\n score += _loglikelihood(doc_topic_prior, doc_topic_distr,\n dirichlet_doc_topic, self.n_components)\n\n # Compensate for the subsampling of the population of documents\n if sub_sampling:\n doc_ratio = float(self.total_samples) / n_samples\n score *= doc_ratio\n\n # E[log p(beta | eta) - log q (beta | lambda)]\n score += _loglikelihood(topic_word_prior, self.components_,\n dirichlet_component_, n_features)\n\n return score\n\n def score(self, X, y=None):\n \"\"\"Calculate approximate log-likelihood as score.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Document word matrix.\n\n y : Ignored\n\n Returns\n -------\n score : float\n Use approximate bound as score.\n \"\"\"\n X = self._check_non_neg_array(X, \"LatentDirichletAllocation.score\")\n\n doc_topic_distr = self._unnormalized_transform(X)\n score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)\n return score\n\n def _perplexity_precomp_distr(self, X, doc_topic_distr=None,\n sub_sampling=False):\n \"\"\"Calculate approximate perplexity for data X with ability to accept\n precomputed doc_topic_distr\n\n Perplexity is defined as exp(-1. * log-likelihood per word)\n\n Parameters\n ----------\n X : array-like or sparse matrix, [n_samples, n_features]\n Document word matrix.\n\n doc_topic_distr : None or array, shape=(n_samples, n_components)\n Document topic distribution.\n If it is None, it will be generated by applying transform on X.\n\n Returns\n -------\n score : float\n Perplexity score.\n \"\"\"\n if not hasattr(self, 'components_'):\n raise NotFittedError(\"no 'components_' attribute in model.\"\n \" Please fit model first.\")\n\n X = self._check_non_neg_array(X,\n \"LatentDirichletAllocation.perplexity\")\n\n if doc_topic_distr is None:\n doc_topic_distr = self._unnormalized_transform(X)\n else:\n n_samples, n_components = doc_topic_distr.shape\n if n_samples != X.shape[0]:\n raise ValueError(\"Number of samples in X and doc_topic_distr\"\n \" do not match.\")\n\n if n_components != self.n_components:\n raise ValueError(\"Number of topics does not match.\")\n\n current_samples = X.shape[0]\n bound = self._approx_bound(X, doc_topic_distr, sub_sampling)\n\n if sub_sampling:\n word_cnt = X.sum() * (float(self.total_samples) / current_samples)\n else:\n word_cnt = X.sum()\n perword_bound = bound / word_cnt\n\n return np.exp(-1.0 * perword_bound)\n\n def perplexity(self, X, sub_sampling=False):\n \"\"\"Calculate approximate perplexity for data X.\n\n Perplexity is defined as exp(-1. * log-likelihood per word)\n\n .. versionchanged:: 0.19\n *doc_topic_distr* argument has been deprecated and is ignored\n because user no longer has access to unnormalized distribution\n\n Parameters\n ----------\n X : array-like or sparse matrix, [n_samples, n_features]\n Document word matrix.\n\n sub_sampling : bool\n Do sub-sampling or not.\n\n Returns\n -------\n score : float\n Perplexity score.\n \"\"\"\n return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)\n",
"from __future__ import division, print_function, absolute_import\n\nimport warnings\nfrom collections import namedtuple\nfrom . import _zeros\nimport numpy as np\n\n\n_iter = 100\n_xtol = 2e-12\n_rtol = 4 * np.finfo(float).eps\n\n__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748',\n 'RootResults']\n\n# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h\n_ECONVERGED = 0\n_ESIGNERR = -1\n_ECONVERR = -2\n_EVALUEERR = -3\n_EINPROGRESS = 1\n\nCONVERGED = 'converged'\nSIGNERR = 'sign error'\nCONVERR = 'convergence error'\nVALUEERR = 'value error'\nINPROGRESS = 'No error'\n\n\nflag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR,\n _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS}\n\n\nclass RootResults(object):\n \"\"\"Represents the root finding result.\n\n Attributes\n ----------\n root : float\n Estimated root location.\n iterations : int\n Number of iterations needed to find the root.\n function_calls : int\n Number of times the function was called.\n converged : bool\n True if the routine converged.\n flag : str\n Description of the cause of termination.\n\n \"\"\"\n\n def __init__(self, root, iterations, function_calls, flag):\n self.root = root\n self.iterations = iterations\n self.function_calls = function_calls\n self.converged = flag == _ECONVERGED\n self.flag = None\n try:\n self.flag = flag_map[flag]\n except KeyError:\n self.flag = 'unknown error %d' % (flag,)\n\n def __repr__(self):\n attrs = ['converged', 'flag', 'function_calls',\n 'iterations', 'root']\n m = max(map(len, attrs)) + 1\n return '\\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))\n for a in attrs])\n\n\ndef results_c(full_output, r):\n if full_output:\n x, funcalls, iterations, flag = r\n results = RootResults(root=x,\n iterations=iterations,\n function_calls=funcalls,\n flag=flag)\n return x, results\n else:\n return r\n\n\ndef _results_select(full_output, r):\n \"\"\"Select from a tuple of (root, funccalls, iterations, flag)\"\"\"\n x, funcalls, iterations, flag = r\n if full_output:\n results = RootResults(root=x,\n iterations=iterations,\n function_calls=funcalls,\n flag=flag)\n return x, results\n return x\n\n\ndef newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,\n fprime2=None, x1=None, rtol=0.0,\n full_output=False, disp=True):\n \"\"\"\n Find a zero of a real or complex function using the Newton-Raphson\n (or secant or Halley's) method.\n\n Find a zero of the function `func` given a nearby starting point `x0`.\n The Newton-Raphson method is used if the derivative `fprime` of `func`\n is provided, otherwise the secant method is used. If the second order\n derivative `fprime2` of `func` is also provided, then Halley's method is\n used.\n\n If `x0` is a sequence with more than one item, then `newton` returns an\n array, and `func` must be vectorized and return a sequence or array of the\n same shape as its first argument. If `fprime` or `fprime2` is given then\n its return must also have the same shape.\n\n Parameters\n ----------\n func : callable\n The function whose zero is wanted. It must be a function of a\n single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...``\n are extra arguments that can be passed in the `args` parameter.\n x0 : float, sequence, or ndarray\n An initial estimate of the zero that should be somewhere near the\n actual zero. If not scalar, then `func` must be vectorized and return\n a sequence or array of the same shape as its first argument.\n fprime : callable, optional\n The derivative of the function when available and convenient. If it\n is None (default), then the secant method is used.\n args : tuple, optional\n Extra arguments to be used in the function call.\n tol : float, optional\n The allowable error of the zero value. If `func` is complex-valued,\n a larger `tol` is recommended as both the real and imaginary parts\n of `x` contribute to ``|x - x0|``.\n maxiter : int, optional\n Maximum number of iterations.\n fprime2 : callable, optional\n The second order derivative of the function when available and\n convenient. If it is None (default), then the normal Newton-Raphson\n or the secant method is used. If it is not None, then Halley's method\n is used.\n x1 : float, optional\n Another estimate of the zero that should be somewhere near the\n actual zero. Used if `fprime` is not provided.\n rtol : float, optional\n Tolerance (relative) for termination.\n full_output : bool, optional\n If `full_output` is False (default), the root is returned.\n If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``\n is the root and ``r`` is a `RootResults` object.\n If True and `x0` is non-scalar, the return value is ``(x, converged,\n zero_der)`` (see Returns section for details).\n disp : bool, optional\n If True, raise a RuntimeError if the algorithm didn't converge, with\n the error message containing the number of iterations and current\n function value. Otherwise the convergence status is recorded in a\n `RootResults` return object.\n Ignored if `x0` is not scalar.\n *Note: this has little to do with displaying, however\n the `disp` keyword cannot be renamed for backwards compatibility.*\n\n Returns\n -------\n root : float, sequence, or ndarray\n Estimated location where function is zero.\n r : `RootResults`, optional\n Present if ``full_output=True`` and `x0` is scalar.\n Object containing information about the convergence. In particular,\n ``r.converged`` is True if the routine converged.\n converged : ndarray of bool, optional\n Present if ``full_output=True`` and `x0` is non-scalar.\n For vector functions, indicates which elements converged successfully.\n zero_der : ndarray of bool, optional\n Present if ``full_output=True`` and `x0` is non-scalar.\n For vector functions, indicates which elements had a zero derivative.\n\n See Also\n --------\n brentq, brenth, ridder, bisect\n fsolve : find zeros in n dimensions.\n\n Notes\n -----\n The convergence rate of the Newton-Raphson method is quadratic,\n the Halley method is cubic, and the secant method is\n sub-quadratic. This means that if the function is well behaved\n the actual error in the estimated zero after the n-th iteration\n is approximately the square (cube for Halley) of the error\n after the (n-1)-th step. However, the stopping criterion used\n here is the step size and there is no guarantee that a zero\n has been found. Consequently the result should be verified.\n Safer algorithms are brentq, brenth, ridder, and bisect,\n but they all require that the root first be bracketed in an\n interval where the function changes sign. The brentq algorithm\n is recommended for general use in one dimensional problems\n when such an interval has been found.\n\n When `newton` is used with arrays, it is best suited for the following\n types of problems:\n\n * The initial guesses, `x0`, are all relatively the same distance from\n the roots.\n * Some or all of the extra arguments, `args`, are also arrays so that a\n class of similar problems can be solved together.\n * The size of the initial guesses, `x0`, is larger than O(100) elements.\n Otherwise, a naive loop may perform as well or better than a vector.\n\n Examples\n --------\n >>> from scipy import optimize\n >>> import matplotlib.pyplot as plt\n\n >>> def f(x):\n ... return (x**3 - 1) # only one real root at x = 1\n\n ``fprime`` is not provided, use the secant method:\n\n >>> root = optimize.newton(f, 1.5)\n >>> root\n 1.0000000000000016\n >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)\n >>> root\n 1.0000000000000016\n\n Only ``fprime`` is provided, use the Newton-Raphson method:\n\n >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)\n >>> root\n 1.0\n\n Both ``fprime2`` and ``fprime`` are provided, use Halley's method:\n\n >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,\n ... fprime2=lambda x: 6 * x)\n >>> root\n 1.0\n\n When we want to find zeros for a set of related starting values and/or\n function parameters, we can provide both of those as an array of inputs:\n\n >>> f = lambda x, a: x**3 - a\n >>> fder = lambda x, a: 3 * x**2\n >>> np.random.seed(4321)\n >>> x = np.random.randn(100)\n >>> a = np.arange(-50, 50)\n >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ))\n\n The above is the equivalent of solving for each value in ``(x, a)``\n separately in a for-loop, just faster:\n\n >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))\n ... for x0, a0 in zip(x, a)]\n >>> np.allclose(vec_res, loop_res)\n True\n\n Plot the results found for all values of ``a``:\n\n >>> analytical_result = np.sign(a) * np.abs(a)**(1/3)\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(a, analytical_result, 'o')\n >>> ax.plot(a, vec_res, '.')\n >>> ax.set_xlabel('$a$')\n >>> ax.set_ylabel('$x$ where $f(x, a)=0$')\n >>> plt.show()\n\n \"\"\"\n if tol <= 0:\n raise ValueError(\"tol too small (%g <= 0)\" % tol)\n if maxiter < 1:\n raise ValueError(\"maxiter must be greater than 0\")\n if np.size(x0) > 1:\n return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,\n full_output)\n\n # Convert to float (don't use float(x0); this works also for complex x0)\n p0 = 1.0 * x0\n funcalls = 0\n if fprime is not None:\n # Newton-Raphson method\n for itr in range(maxiter):\n # first evaluate fval\n fval = func(p0, *args)\n funcalls += 1\n # If fval is 0, a root has been found, then terminate\n if fval == 0:\n return _results_select(\n full_output, (p0, funcalls, itr, _ECONVERGED))\n fder = fprime(p0, *args)\n funcalls += 1\n if fder == 0:\n msg = \"Derivative was zero.\"\n if disp:\n msg += (\n \" Failed to converge after %d iterations, value is %s.\"\n % (itr + 1, p0))\n raise RuntimeError(msg)\n warnings.warn(msg, RuntimeWarning)\n return _results_select(\n full_output, (p0, funcalls, itr + 1, _ECONVERR))\n newton_step = fval / fder\n if fprime2:\n fder2 = fprime2(p0, *args)\n funcalls += 1\n # Halley's method:\n # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)\n # Only do it if denominator stays close enough to 1\n # Rationale: If 1-adj < 0, then Halley sends x in the\n # opposite direction to Newton. Doesn't happen if x is close\n # enough to root.\n adj = newton_step * fder2 / fder / 2\n if np.abs(adj) < 1:\n newton_step /= 1.0 - adj\n p = p0 - newton_step\n if np.isclose(p, p0, rtol=rtol, atol=tol):\n return _results_select(\n full_output, (p, funcalls, itr + 1, _ECONVERGED))\n p0 = p\n else:\n # Secant method\n if x1 is not None:\n if x1 == x0:\n raise ValueError(\"x1 and x0 must be different\")\n p1 = x1\n else:\n eps = 1e-4\n p1 = x0 * (1 + eps)\n p1 += (eps if p1 >= 0 else -eps)\n q0 = func(p0, *args)\n funcalls += 1\n q1 = func(p1, *args)\n funcalls += 1\n if abs(q1) < abs(q0):\n p0, p1, q0, q1 = p1, p0, q1, q0\n for itr in range(maxiter):\n if q1 == q0:\n if p1 != p0:\n msg = \"Tolerance of %s reached.\" % (p1 - p0)\n if disp:\n msg += (\n \" Failed to converge after %d iterations, value is %s.\"\n % (itr + 1, p1))\n raise RuntimeError(msg)\n warnings.warn(msg, RuntimeWarning)\n p = (p1 + p0) / 2.0\n return _results_select(\n full_output, (p, funcalls, itr + 1, _ECONVERGED))\n else:\n if abs(q1) > abs(q0):\n p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)\n else:\n p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)\n if np.isclose(p, p1, rtol=rtol, atol=tol):\n return _results_select(\n full_output, (p, funcalls, itr + 1, _ECONVERGED))\n p0, q0 = p1, q1\n p1 = p\n q1 = func(p1, *args)\n funcalls += 1\n\n if disp:\n msg = (\"Failed to converge after %d iterations, value is %s.\"\n % (itr + 1, p))\n raise RuntimeError(msg)\n\n return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR))\n\n\ndef _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):\n \"\"\"\n A vectorized version of Newton, Halley, and secant methods for arrays.\n\n Do not use this method directly. This method is called from `newton`\n when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.\n \"\"\"\n # Explicitly copy `x0` as `p` will be modified inplace, but, the\n # user's array should not be altered.\n try:\n p = np.array(x0, copy=True, dtype=float)\n except TypeError:\n # can't convert complex to float\n p = np.array(x0, copy=True)\n\n failures = np.ones_like(p, dtype=bool)\n nz_der = np.ones_like(failures)\n if fprime is not None:\n # Newton-Raphson method\n for iteration in range(maxiter):\n # first evaluate fval\n fval = np.asarray(func(p, *args))\n # If all fval are 0, all roots have been found, then terminate\n if not fval.any():\n failures = fval.astype(bool)\n break\n fder = np.asarray(fprime(p, *args))\n nz_der = (fder != 0)\n # stop iterating if all derivatives are zero\n if not nz_der.any():\n break\n # Newton step\n dp = fval[nz_der] / fder[nz_der]\n if fprime2 is not None:\n fder2 = np.asarray(fprime2(p, *args))\n dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])\n # only update nonzero derivatives\n p[nz_der] -= dp\n failures[nz_der] = np.abs(dp) >= tol # items not yet converged\n # stop iterating if there aren't any failures, not incl zero der\n if not failures[nz_der].any():\n break\n else:\n # Secant method\n dx = np.finfo(float).eps**0.33\n p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)\n q0 = np.asarray(func(p, *args))\n q1 = np.asarray(func(p1, *args))\n active = np.ones_like(p, dtype=bool)\n for iteration in range(maxiter):\n nz_der = (q1 != q0)\n # stop iterating if all derivatives are zero\n if not nz_der.any():\n p = (p1 + p) / 2.0\n break\n # Secant Step\n dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]\n # only update nonzero derivatives\n p[nz_der] = p1[nz_der] - dp\n active_zero_der = ~nz_der & active\n p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0\n active &= nz_der # don't assign zero derivatives again\n failures[nz_der] = np.abs(dp) >= tol # not yet converged\n # stop iterating if there aren't any failures, not incl zero der\n if not failures[nz_der].any():\n break\n p1, p = p, p1\n q0 = q1\n q1 = np.asarray(func(p1, *args))\n\n zero_der = ~nz_der & failures # don't include converged with zero-ders\n if zero_der.any():\n # Secant warnings\n if fprime is None:\n nonzero_dp = (p1 != p)\n # non-zero dp, but infinite newton step\n zero_der_nz_dp = (zero_der & nonzero_dp)\n if zero_der_nz_dp.any():\n rms = np.sqrt(\n sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)\n )\n warnings.warn(\n 'RMS of {:g} reached'.format(rms), RuntimeWarning)\n # Newton or Halley warnings\n else:\n all_or_some = 'all' if zero_der.all() else 'some'\n msg = '{:s} derivatives were zero'.format(all_or_some)\n warnings.warn(msg, RuntimeWarning)\n elif failures.any():\n all_or_some = 'all' if failures.all() else 'some'\n msg = '{0:s} failed to converge after {1:d} iterations'.format(\n all_or_some, maxiter\n )\n if failures.all():\n raise RuntimeError(msg)\n warnings.warn(msg, RuntimeWarning)\n\n if full_output:\n result = namedtuple('result', ('root', 'converged', 'zero_der'))\n p = result(p, ~failures, zero_der)\n\n return p\n\n\ndef bisect(f, a, b, args=(),\n xtol=_xtol, rtol=_rtol, maxiter=_iter,\n full_output=False, disp=True):\n \"\"\"\n Find root of a function within an interval using bisection.\n\n Basic bisection routine to find a zero of the function `f` between the\n arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.\n Slow but sure.\n\n Parameters\n ----------\n f : function\n Python function returning a number. `f` must be continuous, and\n f(a) and f(b) must have opposite signs.\n a : scalar\n One end of the bracketing interval [a,b].\n b : scalar\n The other end of the bracketing interval [a,b].\n xtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter must be nonnegative.\n rtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter cannot be smaller than its default value of\n ``4*np.finfo(float).eps``.\n maxiter : int, optional\n if convergence is not achieved in `maxiter` iterations, an error is\n raised. Must be >= 0.\n args : tuple, optional\n containing extra arguments for the function `f`.\n `f` is called by ``apply(f, (x)+args)``.\n full_output : bool, optional\n If `full_output` is False, the root is returned. If `full_output` is\n True, the return value is ``(x, r)``, where x is the root, and r is\n a `RootResults` object.\n disp : bool, optional\n If True, raise RuntimeError if the algorithm didn't converge.\n Otherwise the convergence status is recorded in a `RootResults`\n return object.\n\n Returns\n -------\n x0 : float\n Zero of `f` between `a` and `b`.\n r : `RootResults` (present if ``full_output = True``)\n Object containing information about the convergence. In particular,\n ``r.converged`` is True if the routine converged.\n\n Examples\n --------\n\n >>> def f(x):\n ... return (x**2 - 1)\n\n >>> from scipy import optimize\n\n >>> root = optimize.bisect(f, 0, 2)\n >>> root\n 1.0\n\n >>> root = optimize.bisect(f, -2, 0)\n >>> root\n -1.0\n\n See Also\n --------\n brentq, brenth, bisect, newton\n fixed_point : scalar fixed-point finder\n fsolve : n-dimensional root-finding\n\n \"\"\"\n if not isinstance(args, tuple):\n args = (args,)\n if xtol <= 0:\n raise ValueError(\"xtol too small (%g <= 0)\" % xtol)\n if rtol < _rtol:\n raise ValueError(\"rtol too small (%g < %g)\" % (rtol, _rtol))\n r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)\n return results_c(full_output, r)\n\n\ndef ridder(f, a, b, args=(),\n xtol=_xtol, rtol=_rtol, maxiter=_iter,\n full_output=False, disp=True):\n \"\"\"\n Find a root of a function in an interval using Ridder's method.\n\n Parameters\n ----------\n f : function\n Python function returning a number. f must be continuous, and f(a) and\n f(b) must have opposite signs.\n a : scalar\n One end of the bracketing interval [a,b].\n b : scalar\n The other end of the bracketing interval [a,b].\n xtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter must be nonnegative.\n rtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter cannot be smaller than its default value of\n ``4*np.finfo(float).eps``.\n maxiter : int, optional\n if convergence is not achieved in `maxiter` iterations, an error is\n raised. Must be >= 0.\n args : tuple, optional\n containing extra arguments for the function `f`.\n `f` is called by ``apply(f, (x)+args)``.\n full_output : bool, optional\n If `full_output` is False, the root is returned. If `full_output` is\n True, the return value is ``(x, r)``, where `x` is the root, and `r` is\n a `RootResults` object.\n disp : bool, optional\n If True, raise RuntimeError if the algorithm didn't converge.\n Otherwise the convergence status is recorded in any `RootResults`\n return object.\n\n Returns\n -------\n x0 : float\n Zero of `f` between `a` and `b`.\n r : `RootResults` (present if ``full_output = True``)\n Object containing information about the convergence.\n In particular, ``r.converged`` is True if the routine converged.\n\n See Also\n --------\n brentq, brenth, bisect, newton : one-dimensional root-finding\n fixed_point : scalar fixed-point finder\n\n Notes\n -----\n Uses [Ridders1979]_ method to find a zero of the function `f` between the\n arguments `a` and `b`. Ridders' method is faster than bisection, but not\n generally as fast as the Brent routines. [Ridders1979]_ provides the\n classic description and source of the algorithm. A description can also be\n found in any recent edition of Numerical Recipes.\n\n The routine used here diverges slightly from standard presentations in\n order to be a bit more careful of tolerance.\n\n References\n ----------\n .. [Ridders1979]\n Ridders, C. F. J. \"A New Algorithm for Computing a\n Single Root of a Real Continuous Function.\"\n IEEE Trans. Circuits Systems 26, 979-980, 1979.\n\n Examples\n --------\n\n >>> def f(x):\n ... return (x**2 - 1)\n\n >>> from scipy import optimize\n\n >>> root = optimize.ridder(f, 0, 2)\n >>> root\n 1.0\n\n >>> root = optimize.ridder(f, -2, 0)\n >>> root\n -1.0\n \"\"\"\n if not isinstance(args, tuple):\n args = (args,)\n if xtol <= 0:\n raise ValueError(\"xtol too small (%g <= 0)\" % xtol)\n if rtol < _rtol:\n raise ValueError(\"rtol too small (%g < %g)\" % (rtol, _rtol))\n r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)\n return results_c(full_output, r)\n\n\ndef brentq(f, a, b, args=(),\n xtol=_xtol, rtol=_rtol, maxiter=_iter,\n full_output=False, disp=True):\n \"\"\"\n Find a root of a function in a bracketing interval using Brent's method.\n\n Uses the classic Brent's method to find a zero of the function `f` on\n the sign changing interval [a , b]. Generally considered the best of the\n rootfinding routines here. It is a safe version of the secant method that\n uses inverse quadratic extrapolation. Brent's method combines root\n bracketing, interval bisection, and inverse quadratic interpolation. It is\n sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)\n claims convergence is guaranteed for functions computable within [a,b].\n\n [Brent1973]_ provides the classic description of the algorithm. Another\n description can be found in a recent edition of Numerical Recipes, including\n [PressEtal1992]_. A third description is at\n http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to\n understand the algorithm just by reading our code. Our code diverges a bit\n from standard presentations: we choose a different formula for the\n extrapolation step.\n\n Parameters\n ----------\n f : function\n Python function returning a number. The function :math:`f`\n must be continuous, and :math:`f(a)` and :math:`f(b)` must\n have opposite signs.\n a : scalar\n One end of the bracketing interval :math:`[a, b]`.\n b : scalar\n The other end of the bracketing interval :math:`[a, b]`.\n xtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter must be nonnegative. For nice functions, Brent's\n method will often satisfy the above condition with ``xtol/2``\n and ``rtol/2``. [Brent1973]_\n rtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter cannot be smaller than its default value of\n ``4*np.finfo(float).eps``. For nice functions, Brent's\n method will often satisfy the above condition with ``xtol/2``\n and ``rtol/2``. [Brent1973]_\n maxiter : int, optional\n if convergence is not achieved in `maxiter` iterations, an error is\n raised. Must be >= 0.\n args : tuple, optional\n containing extra arguments for the function `f`.\n `f` is called by ``apply(f, (x)+args)``.\n full_output : bool, optional\n If `full_output` is False, the root is returned. If `full_output` is\n True, the return value is ``(x, r)``, where `x` is the root, and `r` is\n a `RootResults` object.\n disp : bool, optional\n If True, raise RuntimeError if the algorithm didn't converge.\n Otherwise the convergence status is recorded in any `RootResults`\n return object.\n\n Returns\n -------\n x0 : float\n Zero of `f` between `a` and `b`.\n r : `RootResults` (present if ``full_output = True``)\n Object containing information about the convergence. In particular,\n ``r.converged`` is True if the routine converged.\n\n Notes\n -----\n `f` must be continuous. f(a) and f(b) must have opposite signs.\n\n Related functions fall into several classes:\n\n multivariate local optimizers\n `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`\n nonlinear least squares minimizer\n `leastsq`\n constrained multivariate optimizers\n `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`\n global optimizers\n `basinhopping`, `brute`, `differential_evolution`\n local scalar minimizers\n `fminbound`, `brent`, `golden`, `bracket`\n n-dimensional root-finding\n `fsolve`\n one-dimensional root-finding\n `brenth`, `ridder`, `bisect`, `newton`\n scalar fixed-point finder\n `fixed_point`\n\n References\n ----------\n .. [Brent1973]\n Brent, R. P.,\n *Algorithms for Minimization Without Derivatives*.\n Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.\n\n .. [PressEtal1992]\n Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.\n *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.\n Cambridge, England: Cambridge University Press, pp. 352-355, 1992.\n Section 9.3: \"Van Wijngaarden-Dekker-Brent Method.\"\n\n Examples\n --------\n >>> def f(x):\n ... return (x**2 - 1)\n\n >>> from scipy import optimize\n\n >>> root = optimize.brentq(f, -2, 0)\n >>> root\n -1.0\n\n >>> root = optimize.brentq(f, 0, 2)\n >>> root\n 1.0\n \"\"\"\n if not isinstance(args, tuple):\n args = (args,)\n if xtol <= 0:\n raise ValueError(\"xtol too small (%g <= 0)\" % xtol)\n if rtol < _rtol:\n raise ValueError(\"rtol too small (%g < %g)\" % (rtol, _rtol))\n r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)\n return results_c(full_output, r)\n\n\ndef brenth(f, a, b, args=(),\n xtol=_xtol, rtol=_rtol, maxiter=_iter,\n full_output=False, disp=True):\n \"\"\"Find a root of a function in a bracketing interval using Brent's\n method with hyperbolic extrapolation.\n\n A variation on the classic Brent routine to find a zero of the function f\n between the arguments a and b that uses hyperbolic extrapolation instead of\n inverse quadratic extrapolation. There was a paper back in the 1980's ...\n f(a) and f(b) cannot have the same signs. Generally on a par with the\n brent routine, but not as heavily tested. It is a safe version of the\n secant method that uses hyperbolic extrapolation. The version here is by\n Chuck Harris.\n\n Parameters\n ----------\n f : function\n Python function returning a number. f must be continuous, and f(a) and\n f(b) must have opposite signs.\n a : scalar\n One end of the bracketing interval [a,b].\n b : scalar\n The other end of the bracketing interval [a,b].\n xtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter must be nonnegative. As with `brentq`, for nice\n functions the method will often satisfy the above condition\n with ``xtol/2`` and ``rtol/2``.\n rtol : number, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter cannot be smaller than its default value of\n ``4*np.finfo(float).eps``. As with `brentq`, for nice functions\n the method will often satisfy the above condition with\n ``xtol/2`` and ``rtol/2``.\n maxiter : int, optional\n if convergence is not achieved in `maxiter` iterations, an error is\n raised. Must be >= 0.\n args : tuple, optional\n containing extra arguments for the function `f`.\n `f` is called by ``apply(f, (x)+args)``.\n full_output : bool, optional\n If `full_output` is False, the root is returned. If `full_output` is\n True, the return value is ``(x, r)``, where `x` is the root, and `r` is\n a `RootResults` object.\n disp : bool, optional\n If True, raise RuntimeError if the algorithm didn't converge.\n Otherwise the convergence status is recorded in any `RootResults`\n return object.\n\n Returns\n -------\n x0 : float\n Zero of `f` between `a` and `b`.\n r : `RootResults` (present if ``full_output = True``)\n Object containing information about the convergence. In particular,\n ``r.converged`` is True if the routine converged.\n\n Examples\n --------\n >>> def f(x):\n ... return (x**2 - 1)\n\n >>> from scipy import optimize\n\n >>> root = optimize.brenth(f, -2, 0)\n >>> root\n -1.0\n\n >>> root = optimize.brenth(f, 0, 2)\n >>> root\n 1.0\n\n See Also\n --------\n fmin, fmin_powell, fmin_cg,\n fmin_bfgs, fmin_ncg : multivariate local optimizers\n\n leastsq : nonlinear least squares minimizer\n\n fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers\n\n basinhopping, differential_evolution, brute : global optimizers\n\n fminbound, brent, golden, bracket : local scalar minimizers\n\n fsolve : n-dimensional root-finding\n\n brentq, brenth, ridder, bisect, newton : one-dimensional root-finding\n\n fixed_point : scalar fixed-point finder\n\n \"\"\"\n if not isinstance(args, tuple):\n args = (args,)\n if xtol <= 0:\n raise ValueError(\"xtol too small (%g <= 0)\" % xtol)\n if rtol < _rtol:\n raise ValueError(\"rtol too small (%g < %g)\" % (rtol, _rtol))\n r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)\n return results_c(full_output, r)\n\n\n################################\n# TOMS \"Algorithm 748: Enclosing Zeros of Continuous Functions\", by\n# Alefeld, G. E. and Potra, F. A. and Shi, Yixun,\n# See [1]\n\n\ndef _within_tolerance(x, y, rtol, atol):\n diff = np.abs(x - y)\n z = np.abs(y)\n result = (diff <= (atol + rtol * z))\n return result\n\n\ndef _notclose(fs, rtol=_rtol, atol=_xtol):\n # Ensure not None, not 0, all finite, and not very close to each other\n notclosefvals = (\n all(fs) and all(np.isfinite(fs)) and\n not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))\n for i, _f in enumerate(fs[:-1])))\n return notclosefvals\n\n\ndef _secant(xvals, fvals):\n \"\"\"Perform a secant step, taking a little care\"\"\"\n # Secant has many \"mathematically\" equivalent formulations\n # x2 = x0 - (x1 - x0)/(f1 - f0) * f0\n # = x1 - (x1 - x0)/(f1 - f0) * f1\n # = (-x1 * f0 + x0 * f1) / (f1 - f0)\n # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)\n # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)\n x0, x1 = xvals[:2]\n f0, f1 = fvals[:2]\n if f0 == f1:\n return np.nan\n if np.abs(f1) > np.abs(f0):\n x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)\n else:\n x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)\n return x2\n\n\ndef _update_bracket(ab, fab, c, fc):\n \"\"\"Update a bracket given (c, fc), return the discarded endpoints.\"\"\"\n fa, fb = fab\n idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)\n rx, rfx = ab[idx], fab[idx]\n fab[idx] = fc\n ab[idx] = c\n return rx, rfx\n\n\ndef _compute_divided_differences(xvals, fvals, N=None, full=True,\n forward=True):\n \"\"\"Return a matrix of divided differences for the xvals, fvals pairs\n\n DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i\n\n If full is False, just return the main diagonal(or last row):\n f[a], f[a, b] and f[a, b, c].\n If forward is False, return f[c], f[b, c], f[a, b, c].\"\"\"\n if full:\n if forward:\n xvals = np.asarray(xvals)\n else:\n xvals = np.array(xvals)[::-1]\n M = len(xvals)\n N = M if N is None else min(N, M)\n DD = np.zeros([M, N])\n DD[:, 0] = fvals[:]\n for i in range(1, N):\n DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) /\n (xvals[i:] - xvals[:M - i]))\n return DD\n\n xvals = np.asarray(xvals)\n dd = np.array(fvals)\n row = np.array(fvals)\n idx2Use = (0 if forward else -1)\n dd[0] = fvals[idx2Use]\n for i in range(1, len(xvals)):\n denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]\n row = np.diff(row)[:] / denom\n dd[i] = row[idx2Use]\n return dd\n\n\ndef _interpolated_poly(xvals, fvals, x):\n \"\"\"Compute p(x) for the polynomial passing through the specified locations.\n\n Use Neville's algorithm to compute p(x) where p is the minimal degree\n polynomial passing through the points xvals, fvals\"\"\"\n xvals = np.asarray(xvals)\n N = len(xvals)\n Q = np.zeros([N, N])\n D = np.zeros([N, N])\n Q[:, 0] = fvals[:]\n D[:, 0] = fvals[:]\n for k in range(1, N):\n alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]\n diffik = xvals[0:N - k] - xvals[k:N]\n Q[k:, k] = (xvals[k:] - x) / diffik * alpha\n D[k:, k] = (xvals[:N - k] - x) / diffik * alpha\n # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root\n return np.sum(Q[-1, 1:]) + Q[-1, 0]\n\n\ndef _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):\n \"\"\"Inverse cubic interpolation f-values -> x-values\n\n Given four points (fa, a), (fb, b), (fc, c), (fd, d) with\n fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points\n and compute x=IP(0).\n \"\"\"\n return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)\n\n\ndef _newton_quadratic(ab, fab, d, fd, k):\n \"\"\"Apply Newton-Raphson like steps, using divided differences to approximate f'\n\n ab is a real interval [a, b] containing a root,\n fab holds the real values of f(a), f(b)\n d is a real number outside [ab, b]\n k is the number of steps to apply\n \"\"\"\n a, b = ab\n fa, fb = fab\n _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],\n forward=True, full=False)\n\n # _P is the quadratic polynomial through the 3 points\n def _P(x):\n # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)\n return (A * (x - b) + B) * (x - a) + fa\n\n if A == 0:\n r = a - fa / B\n else:\n r = (a if np.sign(A) * np.sign(fa) > 0 else b)\n # Apply k Newton-Raphson steps to _P(x), starting from x=r\n for i in range(k):\n r1 = r - _P(r) / (B + A * (2 * r - a - b))\n if not (ab[0] < r1 < ab[1]):\n if (ab[0] < r < ab[1]):\n return r\n r = sum(ab) / 2.0\n break\n r = r1\n\n return r\n\n\nclass TOMS748Solver(object):\n \"\"\"Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi.\n \"\"\"\n _MU = 0.5\n _K_MIN = 1\n _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3.\n\n def __init__(self):\n self.f = None\n self.args = None\n self.function_calls = 0\n self.iterations = 0\n self.k = 2\n # ab=[a,b] is a global interval containing a root\n self.ab = [np.nan, np.nan]\n # fab is function values at a, b\n self.fab = [np.nan, np.nan]\n self.d = None\n self.fd = None\n self.e = None\n self.fe = None\n self.disp = False\n self.xtol = _xtol\n self.rtol = _rtol\n self.maxiter = _iter\n\n def configure(self, xtol, rtol, maxiter, disp, k):\n self.disp = disp\n self.xtol = xtol\n self.rtol = rtol\n self.maxiter = maxiter\n # Silently replace a low value of k with 1\n self.k = max(k, self._K_MIN)\n # Noisily replace a high value of k with self._K_MAX\n if self.k > self._K_MAX:\n msg = \"toms748: Overriding k: ->%d\" % self._K_MAX\n warnings.warn(msg, RuntimeWarning)\n self.k = self._K_MAX\n\n def _callf(self, x, error=True):\n \"\"\"Call the user-supplied function, update book-keeping\"\"\"\n fx = self.f(x, *self.args)\n self.function_calls += 1\n if not np.isfinite(fx) and error:\n raise ValueError(\"Invalid function value: f(%f) -> %s \" % (x, fx))\n return fx\n\n def get_result(self, x, flag=_ECONVERGED):\n r\"\"\"Package the result and statistics into a tuple.\"\"\"\n return (x, self.function_calls, self.iterations, flag)\n\n def _update_bracket(self, c, fc):\n return _update_bracket(self.ab, self.fab, c, fc)\n\n def start(self, f, a, b, args=()):\n r\"\"\"Prepare for the iterations.\"\"\"\n self.function_calls = 0\n self.iterations = 0\n\n self.f = f\n self.args = args\n self.ab[:] = [a, b]\n if not np.isfinite(a) or np.imag(a) != 0:\n raise ValueError(\"Invalid x value: %s \" % (a))\n if not np.isfinite(b) or np.imag(b) != 0:\n raise ValueError(\"Invalid x value: %s \" % (b))\n\n fa = self._callf(a)\n if not np.isfinite(fa) or np.imag(fa) != 0:\n raise ValueError(\"Invalid function value: f(%f) -> %s \" % (a, fa))\n if fa == 0:\n return _ECONVERGED, a\n fb = self._callf(b)\n if not np.isfinite(fb) or np.imag(fb) != 0:\n raise ValueError(\"Invalid function value: f(%f) -> %s \" % (b, fb))\n if fb == 0:\n return _ECONVERGED, b\n\n if np.sign(fb) * np.sign(fa) > 0:\n raise ValueError(\"a, b must bracket a root f(%e)=%e, f(%e)=%e \" %\n (a, fa, b, fb))\n self.fab[:] = [fa, fb]\n\n return _EINPROGRESS, sum(self.ab) / 2.0\n\n def get_status(self):\n \"\"\"Determine the current status.\"\"\"\n a, b = self.ab[:2]\n if _within_tolerance(a, b, self.rtol, self.xtol):\n return _ECONVERGED, sum(self.ab) / 2.0\n if self.iterations >= self.maxiter:\n return _ECONVERR, sum(self.ab) / 2.0\n return _EINPROGRESS, sum(self.ab) / 2.0\n\n def iterate(self):\n \"\"\"Perform one step in the algorithm.\n\n Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]\n \"\"\"\n self.iterations += 1\n eps = np.finfo(float).eps\n d, fd, e, fe = self.d, self.fd, self.e, self.fe\n ab_width = self.ab[1] - self.ab[0] # Need the start width below\n c = None\n\n for nsteps in range(2, self.k+2):\n # If the f-values are sufficiently separated, perform an inverse\n # polynomial interpolation step. Otherwise nsteps repeats of\n # an approximate Newton-Raphson step.\n if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):\n c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,\n self.fab[0], self.fab[1], fd, fe)\n if self.ab[0] < c0 < self.ab[1]:\n c = c0\n if c is None:\n c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)\n\n fc = self._callf(c)\n if fc == 0:\n return _ECONVERGED, c\n\n # re-bracket\n e, fe = d, fd\n d, fd = self._update_bracket(c, fc)\n\n # u is the endpoint with the smallest f-value\n uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)\n u, fu = self.ab[uix], self.fab[uix]\n\n _, A = _compute_divided_differences(self.ab, self.fab,\n forward=(uix == 0), full=False)\n c = u - 2 * fu / A\n if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):\n c = sum(self.ab) / 2.0\n else:\n if np.isclose(c, u, rtol=eps, atol=0):\n # c didn't change (much).\n # Either because the f-values at the endpoints have vastly\n # differing magnitudes, or because the root is very close to\n # that endpoint\n frs = np.frexp(self.fab)[1]\n if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50\n c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32\n else:\n # Make a bigger adjustment, about the\n # size of the requested tolerance.\n mm = (1 if uix == 0 else -1)\n adj = mm * np.abs(c) * self.rtol + mm * self.xtol\n c = u + adj\n if not self.ab[0] < c < self.ab[1]:\n c = sum(self.ab) / 2.0\n\n fc = self._callf(c)\n if fc == 0:\n return _ECONVERGED, c\n\n e, fe = d, fd\n d, fd = self._update_bracket(c, fc)\n\n # If the width of the new interval did not decrease enough, bisect\n if self.ab[1] - self.ab[0] > self._MU * ab_width:\n e, fe = d, fd\n z = sum(self.ab) / 2.0\n fz = self._callf(z)\n if fz == 0:\n return _ECONVERGED, z\n d, fd = self._update_bracket(z, fz)\n\n # Record d and e for next iteration\n self.d, self.fd = d, fd\n self.e, self.fe = e, fe\n\n status, xn = self.get_status()\n return status, xn\n\n def solve(self, f, a, b, args=(),\n xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):\n r\"\"\"Solve f(x) = 0 given an interval containing a zero.\"\"\"\n self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)\n status, xn = self.start(f, a, b, args)\n if status == _ECONVERGED:\n return self.get_result(xn)\n\n # The first step only has two x-values.\n c = _secant(self.ab, self.fab)\n if not self.ab[0] < c < self.ab[1]:\n c = sum(self.ab) / 2.0\n fc = self._callf(c)\n if fc == 0:\n return self.get_result(c)\n\n self.d, self.fd = self._update_bracket(c, fc)\n self.e, self.fe = None, None\n self.iterations += 1\n\n while True:\n status, xn = self.iterate()\n if status == _ECONVERGED:\n return self.get_result(xn)\n if status == _ECONVERR:\n fmt = \"Failed to converge after %d iterations, bracket is %s\"\n if disp:\n msg = fmt % (self.iterations + 1, self.ab)\n raise RuntimeError(msg)\n return self.get_result(xn, _ECONVERR)\n\n\ndef toms748(f, a, b, args=(), k=1,\n xtol=_xtol, rtol=_rtol, maxiter=_iter,\n full_output=False, disp=True):\n \"\"\"\n Find a zero using TOMS Algorithm 748 method.\n\n Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a\n zero of the function `f` on the interval `[a , b]`, where `f(a)` and\n `f(b)` must have opposite signs.\n\n It uses a mixture of inverse cubic interpolation and\n \"Newton-quadratic\" steps. [APS1995].\n\n Parameters\n ----------\n f : function\n Python function returning a scalar. The function :math:`f`\n must be continuous, and :math:`f(a)` and :math:`f(b)`\n have opposite signs.\n a : scalar,\n lower boundary of the search interval\n b : scalar,\n upper boundary of the search interval\n args : tuple, optional\n containing extra arguments for the function `f`.\n `f` is called by ``f(x, *args)``.\n k : int, optional\n The number of Newton quadratic steps to perform each\n iteration. ``k>=1``.\n xtol : scalar, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The\n parameter must be nonnegative.\n rtol : scalar, optional\n The computed root ``x0`` will satisfy ``np.allclose(x, x0,\n atol=xtol, rtol=rtol)``, where ``x`` is the exact root.\n maxiter : int, optional\n if convergence is not achieved in `maxiter` iterations, an error is\n raised. Must be >= 0.\n full_output : bool, optional\n If `full_output` is False, the root is returned. If `full_output` is\n True, the return value is ``(x, r)``, where `x` is the root, and `r` is\n a `RootResults` object.\n disp : bool, optional\n If True, raise RuntimeError if the algorithm didn't converge.\n Otherwise the convergence status is recorded in the `RootResults`\n return object.\n\n Returns\n -------\n x0 : float\n Approximate Zero of `f`\n r : `RootResults` (present if ``full_output = True``)\n Object containing information about the convergence. In particular,\n ``r.converged`` is True if the routine converged.\n\n See Also\n --------\n brentq, brenth, ridder, bisect, newton\n fsolve : find zeroes in n dimensions.\n\n Notes\n -----\n `f` must be continuous.\n Algorithm 748 with ``k=2`` is asymptotically the most efficient\n algorithm known for finding roots of a four times continuously\n differentiable function.\n In contrast with Brent's algorithm, which may only decrease the length of\n the enclosing bracket on the last step, Algorithm 748 decreases it each\n iteration with the same asymptotic efficiency as it finds the root.\n\n For easy statement of efficiency indices, assume that `f` has 4\n continuouous deriviatives.\n For ``k=1``, the convergence order is at least 2.7, and with about\n asymptotically 2 function evaluations per iteration, the efficiency\n index is approximately 1.65.\n For ``k=2``, the order is about 4.6 with asymptotically 3 function\n evaluations per iteration, and the efficiency index 1.66.\n For higher values of `k`, the efficiency index approaches\n the `k`-th root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are\n usually appropriate.\n\n References\n ----------\n .. [APS1995]\n Alefeld, G. E. and Potra, F. A. and Shi, Yixun,\n *Algorithm 748: Enclosing Zeros of Continuous Functions*,\n ACM Trans. Math. Softw. Volume 221(1995)\n doi = {10.1145/210089.210111}\n\n Examples\n --------\n >>> def f(x):\n ... return (x**3 - 1) # only one real root at x = 1\n\n >>> from scipy import optimize\n >>> root, results = optimize.toms748(f, 0, 2, full_output=True)\n >>> root\n 1.0\n >>> results\n converged: True\n flag: 'converged'\n function_calls: 11\n iterations: 5\n root: 1.0\n \"\"\"\n if xtol <= 0:\n raise ValueError(\"xtol too small (%g <= 0)\" % xtol)\n if rtol < _rtol / 4:\n raise ValueError(\"rtol too small (%g < %g)\" % (rtol, _rtol))\n if maxiter < 1:\n raise ValueError(\"maxiter must be greater than 0\")\n if not np.isfinite(a):\n raise ValueError(\"a is not finite %s\" % a)\n if not np.isfinite(b):\n raise ValueError(\"b is not finite %s\" % b)\n if a >= b:\n raise ValueError(\"a and b are not an interval [%d, %d]\" % (a, b))\n if not k >= 1:\n raise ValueError(\"k too small (%s < 1)\" % k)\n\n if not isinstance(args, tuple):\n args = (args,)\n solver = TOMS748Solver()\n result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol,\n maxiter=maxiter, disp=disp)\n x, function_calls, iterations, flag = result\n return _results_select(full_output, (x, function_calls, iterations, flag))\n",
"\"\"\"Testing for bicluster metrics module\"\"\"\n\nimport numpy as np\n\nfrom sklearn.utils.testing import assert_equal, assert_almost_equal\n\nfrom sklearn.metrics.cluster.bicluster import _jaccard\nfrom sklearn.metrics import consensus_score\n\n\ndef test_jaccard():\n a1 = np.array([True, True, False, False])\n a2 = np.array([True, True, True, True])\n a3 = np.array([False, True, True, False])\n a4 = np.array([False, False, True, True])\n\n assert_equal(_jaccard(a1, a1, a1, a1), 1)\n assert_equal(_jaccard(a1, a1, a2, a2), 0.25)\n assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)\n assert_equal(_jaccard(a1, a1, a4, a4), 0)\n\n\ndef test_consensus_score():\n a = [[True, True, False, False],\n [False, False, True, True]]\n b = a[::-1]\n\n assert_equal(consensus_score((a, a), (a, a)), 1)\n assert_equal(consensus_score((a, a), (b, b)), 1)\n assert_equal(consensus_score((a, b), (a, b)), 1)\n assert_equal(consensus_score((a, b), (b, a)), 1)\n\n assert_equal(consensus_score((a, a), (b, a)), 0)\n assert_equal(consensus_score((a, a), (a, b)), 0)\n assert_equal(consensus_score((b, b), (a, b)), 0)\n assert_equal(consensus_score((b, b), (b, a)), 0)\n\n\ndef test_consensus_score_issue2445():\n ''' Different number of biclusters in A and B'''\n a_rows = np.array([[True, True, False, False],\n [False, False, True, True],\n [False, False, False, True]])\n a_cols = np.array([[True, True, False, False],\n [False, False, True, True],\n [False, False, False, True]])\n idx = [0, 2]\n s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))\n # B contains 2 of the 3 biclusters in A, so score should be 2/3\n assert_almost_equal(s, 2.0/3.0)\n"
] |
[
[
"numpy._pytesttester.PytestTester"
],
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"scipy.linalg.cho_factor",
"numpy.asarray",
"numpy.eye",
"scipy.linalg.cho_solve",
"numpy.ones",
"scipy.linalg.eigh",
"scipy.linalg.cholesky",
"scipy.sparse.linalg.aslinearoperator",
"scipy.linalg.norm",
"numpy.savetxt",
"numpy.argsort",
"numpy.array",
"numpy.where",
"scipy.linalg.inv"
],
[
"matplotlib.ticker.ScalarFormatter.__init__"
],
[
"sklearn.utils.testing.assert_allclose",
"sklearn.utils.seq_dataset.ArrayDataset32",
"sklearn.utils.seq_dataset.CSRDataset32",
"numpy.arange",
"sklearn.datasets.load_iris",
"sklearn.utils.seq_dataset.CSRDataset64",
"scipy.sparse.csr_matrix",
"numpy.testing.assert_array_equal",
"sklearn.utils.seq_dataset.ArrayDataset64"
],
[
"numpy.testing.assert_equal",
"scipy.io.arff.arffread.read_header",
"numpy.dtype",
"scipy.io.arff.arffread.loadarff",
"numpy.testing.assert_array_equal",
"scipy._lib.six.xrange",
"numpy.testing.assert_",
"numpy.array",
"numpy.empty",
"numpy.testing.assert_array_almost_equal"
],
[
"matplotlib.rc_context",
"matplotlib.cbook._suppress_matplotlib_deprecation_warning",
"matplotlib.rcdefaults",
"matplotlib.get_data_path",
"matplotlib.rc_params_from_file",
"matplotlib.get_configdir"
],
[
"matplotlib.backend_bases.FigureCanvasBase.enter_notify_event",
"matplotlib.backend_bases.NavigationToolbar2.__init__",
"matplotlib.backend_bases.ToolContainerBase.__init__",
"matplotlib.widgets.SubplotTool",
"matplotlib.backend_bases.FigureCanvasBase.scroll_event",
"matplotlib.backend_tools.add_tools_to_manager",
"matplotlib._pylab_helpers.Gcf.destroy",
"matplotlib.backend_bases.FigureCanvasBase.button_release_event",
"matplotlib.is_interactive",
"matplotlib.backend_managers.ToolManager",
"matplotlib.backend_tools.ConfigureSubplotsBase.__init__",
"matplotlib.cbook.deprecated",
"matplotlib.backend_bases.FigureCanvasBase.key_press_event",
"matplotlib.backend_bases.FigureCanvasBase.leave_notify_event",
"matplotlib._pylab_helpers.Gcf.get_num_fig_managers",
"matplotlib.backend_bases.FigureCanvasBase.__init__",
"matplotlib.backend_tools.add_tools_to_container",
"matplotlib.figure.Figure",
"matplotlib.backend_bases.FigureCanvasBase.resize_event",
"matplotlib.backend_bases.FigureCanvasBase.motion_notify_event",
"matplotlib.backend_bases.FigureManagerBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.key_release_event",
"matplotlib.backend_bases.StatusbarBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.button_press_event",
"matplotlib.backend_bases.TimerBase._on_timer"
],
[
"numpy.random.lognormal",
"pandas.core.reshape.pivot.pivot_table",
"pandas.Series",
"pandas.PeriodIndex",
"numpy.asarray",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.random.randint",
"pandas.IntervalIndex.from_arrays",
"pandas.crosstab",
"numpy.arange",
"numpy.eye",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.Index",
"pandas.Int64Index",
"pandas.core.reshape.pivot.crosstab",
"pandas.Categorical.from_codes",
"pandas.concat",
"pandas.MultiIndex",
"pandas.Categorical",
"pandas.MultiIndex.from_product",
"pandas.Interval",
"pandas.date_range",
"pandas.pivot",
"numpy.array",
"pandas.pivot_table",
"pandas.CategoricalIndex",
"pandas.api.types.CategoricalDtype",
"pandas.Grouper",
"pandas.MultiIndex.from_arrays",
"pandas.Period",
"pandas.Timestamp",
"numpy.empty"
],
[
"matplotlib.cbook._setattr_cm",
"matplotlib.colors.to_rgba",
"numpy.asarray",
"matplotlib.transforms.Affine2D",
"matplotlib.cbook.CallbackRegistry",
"numpy.any",
"matplotlib.textpath.TextToPath",
"matplotlib.cbook._make_keyword_only",
"numpy.clip",
"matplotlib.path.Path.hatch",
"matplotlib.tight_bbox.adjust_bbox",
"matplotlib._pylab_helpers.Gcf.destroy_fig",
"matplotlib.is_interactive",
"matplotlib._pylab_helpers.Gcf.get_all_fig_managers",
"matplotlib.cbook.deprecated",
"matplotlib.path.Path",
"matplotlib.cbook._topmost_artist",
"matplotlib.cbook.Stack",
"matplotlib.get_backend",
"numpy.deg2rad",
"matplotlib.collections.QuadMesh.convert_mesh_to_paths",
"numpy.isfinite",
"matplotlib.widgets.LockDraw",
"matplotlib.transforms.IdentityTransform",
"matplotlib.texmanager.TexManager",
"matplotlib.cbook.warn_deprecated",
"matplotlib._pylab_helpers.Gcf.get_active",
"matplotlib.cbook.strip_math"
],
[
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.max",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.mean",
"numpy.random.randint",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_categorical_equal",
"numpy.arange",
"pandas.Index",
"pandas.util.testing.assert_series_equal",
"pandas.cut",
"pandas.Categorical.from_codes",
"pandas.Categorical",
"pandas.util.testing.assert_equal",
"pandas.date_range",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"pandas.CategoricalIndex",
"pandas.MultiIndex.from_arrays",
"pandas.qcut",
"pandas.util.testing.assert_dict_equal"
],
[
"scipy.linalg._interpolative_backend.iddp_id",
"scipy.linalg._interpolative_backend.idz_diffsnorm",
"scipy.linalg._interpolative_backend.idd_reconid",
"scipy.linalg._interpolative_backend.idzr_svd",
"scipy.linalg._interpolative_backend.idz_copycols",
"scipy.linalg._interpolative_backend.idzr_aid",
"scipy.linalg._interpolative_backend.idz_reconint",
"scipy.linalg._interpolative_backend.idz_reconid",
"scipy.linalg._interpolative_backend.id_srando",
"scipy.linalg._interpolative_backend.iddr_rsvd",
"scipy.linalg._interpolative_backend.idzp_asvd",
"scipy.linalg._interpolative_backend.idd_reconint",
"scipy.linalg._interpolative_backend.idd_diffsnorm",
"scipy.linalg._interpolative_backend.iddr_aid",
"scipy.linalg._interpolative_backend.idd_copycols",
"scipy.sparse.linalg.aslinearoperator",
"scipy.linalg._interpolative_backend.idzp_aid",
"scipy.linalg._interpolative_backend.idd_estrank",
"scipy.linalg._interpolative_backend.idzp_id",
"scipy.linalg._interpolative_backend.idzp_rid",
"scipy.linalg._interpolative_backend.iddp_asvd",
"scipy.linalg._interpolative_backend.idzr_asvd",
"scipy.linalg._interpolative_backend.idzp_rsvd",
"scipy.linalg._interpolative_backend.idz_findrank",
"scipy.linalg._interpolative_backend.idzp_svd",
"scipy.linalg._interpolative_backend.iddr_asvd",
"scipy.linalg._interpolative_backend.iddp_rsvd",
"numpy.asfortranarray",
"scipy.linalg._interpolative_backend.idzr_id",
"numpy.random.rand",
"scipy.linalg._interpolative_backend.idz_id2svd",
"numpy.random.RandomState",
"scipy.linalg._interpolative_backend.iddp_aid",
"scipy.linalg._interpolative_backend.iddr_svd",
"scipy.linalg._interpolative_backend.id_srandi",
"scipy.linalg._interpolative_backend.idzr_rsvd",
"scipy.linalg._interpolative_backend.idd_id2svd",
"scipy.linalg._interpolative_backend.iddr_rid",
"scipy.linalg._interpolative_backend.idz_estrank",
"scipy.linalg._interpolative_backend.idz_snorm",
"scipy.linalg._interpolative_backend.iddp_svd",
"scipy.linalg._interpolative_backend.idd_snorm",
"scipy.linalg._interpolative_backend.idzr_rid",
"numpy.prod",
"scipy.linalg._interpolative_backend.idd_findrank",
"scipy.linalg._interpolative_backend.iddr_id",
"scipy.linalg._interpolative_backend.iddp_rid"
],
[
"numpy.rollaxis",
"numpy.abs",
"numpy.isfinite",
"numpy.allclose",
"numpy.asarray",
"numpy.issubdtype",
"numpy.sign",
"numpy.max",
"scipy.linalg.solve",
"numpy.diff",
"numpy.any",
"numpy.zeros_like",
"numpy.errstate",
"numpy.zeros",
"scipy.linalg.solve_banded",
"numpy.empty"
],
[
"numpy.linspace",
"numpy.asarray",
"numpy.issubdtype",
"numpy.dtype",
"numpy.all",
"numpy.searchsorted",
"numpy.iscomplexobj",
"numpy.histogram",
"numpy.unique",
"numpy.arange",
"numpy.atleast_1d",
"numpy.asanyarray",
"numpy.zeros",
"numpy.median",
"numpy.broadcast_arrays",
"numpy.array",
"numpy.bincount",
"numpy.isscalar",
"numpy.empty"
],
[
"numpy.sqrt",
"numpy.arctan2",
"numpy.ma.subtract",
"numpy.ma.greater",
"numpy.ma.masked_where",
"numpy.where",
"numpy.exp",
"numpy.sin",
"numpy.less_equal",
"numpy.ma.arctan",
"numpy.zeros",
"numpy.ma.put",
"numpy.multiply",
"numpy.ma.concatenate",
"numpy.ma.absolute",
"numpy.equal",
"numpy.array",
"numpy.tanh",
"numpy.sum",
"numpy.ma.masked_greater_equal",
"numpy.absolute",
"numpy.ma.make_mask",
"numpy.ma.minimum",
"numpy.ma.masked_values",
"numpy.ma.masked_array",
"numpy.ma.not_equal",
"numpy.add",
"numpy.compat.pickle.loads",
"numpy.arctan",
"numpy.ma.cosh",
"numpy.concatenate",
"numpy.seterr",
"numpy.ma.where",
"numpy.ma.getmask",
"numpy.ma.tanh",
"numpy.divide",
"numpy.arcsin",
"numpy.compat.pickle.dumps",
"numpy.less",
"numpy.ma.ravel",
"numpy.ma.masked_greater",
"numpy.greater_equal",
"numpy.ma.arange",
"numpy.ma.product",
"numpy.ma.isMaskedArray",
"numpy.cosh",
"numpy.ma.less",
"numpy.ma.log",
"numpy.arccos",
"numpy.ma.add",
"numpy.tan",
"numpy.testing.assert_",
"numpy.errstate",
"numpy.not_equal",
"numpy.ma.masked_less_equal",
"numpy.add.accumulate",
"numpy.ma.minimum.reduce",
"numpy.ma.filled",
"numpy.ma.add.reduce",
"numpy.ma.take",
"numpy.sinh",
"numpy.ma.arccos",
"numpy.ma.sinh",
"numpy.product",
"numpy.ma.masked_less",
"numpy.take",
"numpy.ma.transpose",
"numpy.ma.outer",
"numpy.conjugate",
"numpy.testing.assert_equal",
"numpy.ma.sum",
"numpy.ma.nonzero",
"numpy.greater",
"numpy.ma.allclose",
"numpy.add.reduce",
"numpy.ma.cos",
"numpy.ma.ones",
"numpy.ma.sort",
"numpy.ma.add.accumulate",
"numpy.nonzero",
"numpy.transpose",
"numpy.ma.tan",
"numpy.ma.arcsin",
"numpy.ma.less_equal",
"numpy.ma.masked_not_equal",
"numpy.ma.maximum",
"numpy.cos",
"numpy.ma.allequal",
"numpy.ma.exp",
"numpy.ma.alltrue",
"numpy.ma.count",
"numpy.ma.multiply",
"numpy.ma.divide",
"numpy.ma.array",
"numpy.ma.arctan2",
"numpy.ma.sin",
"numpy.ma.choose",
"numpy.arange",
"numpy.ma.maximum.reduce",
"numpy.ma.sometrue",
"numpy.ma.shape",
"numpy.subtract",
"numpy.ma.greater_equal",
"numpy.ravel",
"numpy.ma.conjugate",
"numpy.ma.zeros",
"numpy.ma.sqrt",
"numpy.ma.inner",
"numpy.ma.masked_equal",
"numpy.ma.equal",
"numpy.ma.average",
"numpy.ma.log10",
"numpy.sort",
"numpy.ma.resize",
"numpy.ma.repeat"
],
[
"numpy.dot",
"scipy.sparse.issparse",
"numpy.nonzero",
"numpy.power",
"numpy.ones",
"numpy.finfo",
"scipy.special.gammaln",
"numpy.outer",
"numpy.exp",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
],
[
"numpy.frexp",
"numpy.imag",
"numpy.ones_like",
"numpy.abs",
"numpy.isfinite",
"numpy.asarray",
"numpy.finfo",
"numpy.sign",
"numpy.size",
"numpy.where",
"numpy.diff",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isclose"
],
[
"sklearn.metrics.consensus_score",
"numpy.array",
"sklearn.metrics.cluster.bicluster._jaccard",
"sklearn.utils.testing.assert_almost_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.18",
"0.19"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thunter009/thought
|
[
"c6ae1af13af5079186694968b2105192a6e26f3d"
] |
[
"src/thought/core.py"
] |
[
"\"\"\"Main module. If include_dataclasses_scaffolding is enabled, you will see Data Class scaffolding here\"\"\"\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import List\n\nimport pandas as pd\nfrom recordlinkage import Index, Compare\nfrom notion.collection import Collection\nfrom thought.client import get_client\nfrom thought.utils import default_field, now\n\n\n@dataclass\nclass Metadata:\n \"\"\"\n The Metadata object. Contains helper functions and generalized metadata\n \"\"\"\n run_time: datetime = default_field(now(), init=False, repr=False)\n\n@dataclass\nclass CollectionExtension:\n \"\"\"\n A Collection Extension object which wraps an existing notion-py Collection object and adds additional functionality.\n \"\"\"\n collection: Collection\n metadata: Metadata = default_field(Metadata(), init=False, repr=False)\n\n def dedupe(self, \n dataframe: pd.DataFrame = None, \n comparison_fields: List = None, \n keep_first: bool = True,\n **kwargs) -> pd.DataFrame:\n '''\n Function that dedupes an input dataframe\n\n Arguments\n ---------\n\n dataframe: A pandas DataFrame object to perform deduplication on. If a dataframe is not passed, \n comparison_fields: A List of string field names to perform the deduplication with. If not specified, defaults to all columns in the passed dataframe.\n\n Parameters\n ----------\n keep_first: Keeps the first instance of a duplicate record. If false, will keep the last instance of a record. Defaults to True.\n\n Returns\n -------\n A pandas dataframe with duplicated records removed\n '''\n # if dataframe argument not passed, use internal object records\n if not dataframe:\n dataframe = self.asdataframe()\n\n # if comparison fields defaults to all fields if not specified\n if not comparison_fields:\n comparison_fields = dataframe.columns.to_list()\n \n # Indexation step\n indexer = Index()\n # TODO: add flexability for different indexing strategies here\n indexer.full()\n candidate_links = indexer.index(dataframe)\n\n # Comparison step\n compare_cl = Compare()\n # TODO: add flexability for different comparison types here\n for field in comparison_fields:\n compare_cl.exact(field, field, label=field)\n features = compare_cl.compute(candidate_links, dataframe)\n\n # Classification step\n num_features = len(comparison_fields)\n matches = features[features.sum(axis=1) == num_features]\n index_to_drop = matches.index.get_level_values(0) if keep_first \\\n else matches.index.get_level_values(1)\n return dataframe.drop(index_to_drop).reset_index()\n \n def asdataframe(self) -> pd.DataFrame:\n '''\n Returns a Collection's Block rows as a pandas data frame using the get_all_properties function.\n '''\n holder = []\n rows = self.collection.get_rows()\n for block in rows:\n row = block.get_all_properties()\n holder.append(row)\n return pd.DataFrame(holder)\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
phorne-uncharted/d3m-primitives
|
[
"77d900b9dd6ab4b2b330f4e969dabcdc419c73e1"
] |
[
"tests/test_image_retrieval.py"
] |
[
"import os\nfrom glob import glob\nimport time\nimport json\n\nfrom PIL import Image\nimport pandas as pd\nimport numpy as np\nimport torchvision as tv\nfrom rsp.data import bilinear_upsample, BANDS\nfrom tifffile import imread as tiffread\nfrom d3m.container import DataFrame as d3m_DataFrame\nfrom d3m.metadata import base as metadata_base\n\nfrom kf_d3m_primitives.remote_sensing.featurizer.remote_sensing_pretrained import (\n RemoteSensingPretrainedPrimitive, \n Hyperparams as rs_hp\n)\nfrom kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval import (\n ImageRetrievalPrimitive, \n Hyperparams as ir_hp\n)\nfrom kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline import ImageRetrievalPipeline\n\n\namdim_path = '/static_volumes/8946fea864c29ed785e00a9cbaa9a50295eb5a334b014f27ba20927104b07f46'\nmoco_path = '/static_volumes/fcc8a5a05fa7dbad8fc55584a77fc5d2c407e03a88610267860b45208e152f1f'\n\ndef load_nwpu(data_dir: str = '/NWPU-RESISC45', n_imgs = 200):\n paths = sorted(glob(os.path.join(data_dir, '*/*')))\n paths = [os.path.abspath(p) for p in paths]\n imgs = [Image.open(p) for p in paths[:n_imgs]]\n labels = [os.path.basename(os.path.dirname(p)) for p in paths[:n_imgs]]\n\n transform = tv.transforms.Compose([\n tv.transforms.ToTensor(),\n tv.transforms.Normalize(\n mean = (0.3680, 0.3810, 0.3436),\n std = (0.2034, 0.1854, 0.1848),\n )\n ])\n imgs = [transform(img) for img in imgs]\n\n imgs = d3m_DataFrame(pd.DataFrame({'imgs': imgs}))\n labels = np.array(labels)\n return imgs, labels\n\ndef load_patch(imname):\n patch = [\n tiffread(f'{imname}_{band}.tif')\n for band in BANDS\n ]\n patch = np.stack([bilinear_upsample(xx) for xx in patch]) \n return patch\n\ndef load_big_earthnet():\n fnames = sorted(glob('/test_data/bigearth-100-single/*/*.tif'))\n imnames = sorted(list(set(['_'.join(f.split('_')[:-1]) for f in fnames])))\n imgs = [\n load_patch(img_path).astype(np.float32) \n for img_path in imnames\n ]\n imgs_df = pd.DataFrame({'image_col': imgs, 'index': range(len(imgs))})\n imgs_df = d3m_DataFrame(imgs_df)\n imgs_df.metadata = imgs_df.metadata.add_semantic_type(\n (metadata_base.ALL_ELEMENTS, 1),\n 'https://metadata.datadrivendiscovery.org/types/PrimaryKey'\n )\n\n y = [i.split('/')[3] for i in imnames]\n\n return imgs_df, np.array(y)\n\ndef iterative_labeling(features, labels, seed_idx = 2, n_rounds = 5):\n\n # initial query image\n y = (labels == labels[seed_idx]).astype(np.int)\n annotations = np.zeros(features.shape[0]) - 1\n annotations[seed_idx] = 1\n\n sampler = ImageRetrievalPrimitive(\n hyperparams=ir_hp(\n ir_hp.defaults(),\n reduce_dimension=32\n )\n )\n\n n_pos, n_neg = 1, 0\n for i in range(n_rounds):\n \n # generate ranking by similarity\n sampler.set_training_data(\n inputs = features, \n outputs = d3m_DataFrame(pd.DataFrame({'annotations': annotations}))\n )\n sampler.fit()\n ranking_df = sampler.produce(inputs = features).value\n assert ranking_df.shape[0] == features.shape[0] - i - 1\n\n exc_labeled = ranking_df['index'].values\n inc_labeled = np.concatenate((sampler.pos_idxs, exc_labeled))\n\n # simulate human labeling\n next_idx = exc_labeled[0]\n next_label = y[next_idx]\n annotations[next_idx] = next_label\n\n if next_label == 1:\n n_pos += 1\n else:\n n_neg += 1\n\n # evaluate ranking against ground truth\n results = {\n 'round': i + 1,\n 'next_idx': int(next_idx),\n 'next_label': next_label,\n 'n_pos': n_pos,\n 'n_neg': n_neg,\n 'a_p': [\n float(y[inc_labeled[:k]].mean()) \n for k in 2 ** np.arange(11)\n ], # precision, including labeled\n 'u_p': [\n float(y[exc_labeled[:k]].mean()) \n for k in 2 ** np.arange(11)\n ], # precision, excluding labeled\n 'r_p': [\n float(y[inc_labeled[:k]].sum()/y.sum()) \n for k in 2**np.arange(11)\n ], # recall, including labeled\n }\n print()\n print(results)\n\n# def test_nwpu():\n# train_inputs, labels = load_nwpu()\n\n# featurizer = RemoteSensingPretrainedPrimitive(\n# hyperparams=rs_hp(\n# rs_hp.defaults(),\n# inference_model = 'moco',\n# use_columns = [0],\n# ),\n# volumes = {'amdim_weights': amdim_path, 'moco_weights': moco_path}\n# )\n# features = featurizer.produce(inputs = train_inputs).value\n# #features.to_pickle(\"dummy.pkl\")\n# #features = pd.read_pickle(\"dummy.pkl\")\n\n# iterative_labeling(features, labels)\n\ndef test_big_earthnet():\n\n train_inputs, labels = load_big_earthnet()\n\n featurizer = RemoteSensingPretrainedPrimitive(\n hyperparams=rs_hp(\n rs_hp.defaults(),\n inference_model = 'moco',\n use_columns = [0],\n ),\n volumes = {'amdim_weights': amdim_path, 'moco_weights': moco_path}\n )\n features = featurizer.produce(inputs = train_inputs).value\n features.to_pickle(\"dummy.pkl\")\n # features = pd.read_pickle(\"dummy.pkl\")\n\n iterative_labeling(features, labels)\n\ndef test_iterative_pipeline(\n dataset = 'LL1_bigearth_landuse_detection', \n n_rows = 2188,\n n_rounds = 2,\n):\n pipeline = ImageRetrievalPipeline(dataset = dataset)\n pipeline.write_pipeline()\n for i in range(n_rounds):\n print(f'Running round {i} pipeline...')\n pipeline.make_annotations_dataset(n_rows, round_num = i)\n pipeline.fit_produce()\n pipeline.delete_pipeline()\n pipeline.delete_annotations_dataset()\n\n\n"
] |
[
[
"numpy.arange",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yaoMYZ/ROP
|
[
"08418e43deefeacfe32da86a2bcc8f94c0105c7c"
] |
[
"DealData/__init__.py"
] |
[
"from DealData.FileOperation import FileOperation\nimport numpy as np\ndef test():\n a=np.random.uniform(0,1,size=10)\n b = np.random.uniform(0, 1, size=10)\n print(a)\n print(b)\n print(np.corrcoef(a,b))\n print(cal_corrcoef(a,b))\n pass\n\ndef cal_corrcoef(X,Y):\n # 均值\n Xmean = np.mean(X)\n Ymean = np.mean(Y)\n\n # 标准差\n Xsd = np.std(X)\n Ysd = np.std(Y)\n\n # z分数\n ZX = (X - Xmean) / Xsd\n ZY = (Y - Ymean) / Ysd\n\n # 相关系数\n r = np.sum(ZX * ZY) / len(X)\n return r\n\nif __name__=='__main__':\n fileOp=FileOperation()\n file_path='fdf/dfd.csv'\n print(fileOp.get_file_name(file_path))\n # test()"
] |
[
[
"numpy.std",
"numpy.mean",
"numpy.corrcoef",
"numpy.random.uniform",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
midas-research/calling-out-bluff
|
[
"4de3c56b64edeeef9968288679c4e5b261e9949c",
"4de3c56b64edeeef9968288679c4e5b261e9949c"
] |
[
"Model5-MemoryNets/predict_adv-Copy1.py",
"Model5-MemoryNets/predict_adv-Copy5.py"
] |
[
"import tensorflow as tf\nimport data_utils\nfrom qwk import quadratic_weighted_kappa\nimport time\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\n\ngraph = tf.get_default_graph()\n\nessay_set_id = 1\nnum_tokens = 42\nembedding_size = 300\nnum_samples = 1\nis_regression = False\n\nearly_stop_count = 0\nmax_step_count = 10\nis_regression = False\ngated_addressing = False\n# essay_set_id = 1\nbatch_size = 15\nembedding_size = 300\nfeature_size = 100\nl2_lambda = 0.3\nhops = 3\nreader = 'bow' # gru may not work\nepochs = 200\nnum_samples = 1\nnum_tokens = 42\ntest_batch_size = batch_size\nrandom_state = 0\n\ntraining_path = 'training_set_rel3.tsv'\nessay_list, resolved_scores, essay_id = data_utils.load_training_data(training_path, essay_set_id)\n\nmax_score = max(resolved_scores)\nmin_score = min(resolved_scores)\nif essay_set_id == 7:\n min_score, max_score = 0, 30\nelif essay_set_id == 8:\n min_score, max_score = 0, 60\n\nprint(\"Max Score\", max_score)\nprint(\"Min Score\", min_score)\nscore_range = range(min_score, max_score+1)\n# load glove\nword_idx, word2vec = data_utils.load_glove(num_tokens, dim=embedding_size)\n\nvocab_size = len(word_idx) + 1\nprint(\"vocab size\", vocab_size)\n# stat info on data set\n\nsent_size_list = list(map(len, [essay for essay in essay_list]))\nmax_sent_size = max(sent_size_list)\nmean_sent_size = int(np.mean(sent_size_list))\nE = data_utils.vectorize_data(essay_list, word_idx, max_sent_size)\n\ntesting_path = 'aes_data/essay1/fold_0/test.txt'\nessay_list_test, resolved_scores_test, essay_id_test = data_utils.load_testing_data(testing_path, essay_set_id)\n\ntest_batch_size = 15\n\ntestE = []\ntest_scores = []\ntest_essay_id = []\nfor test_index in range(len(essay_id_test)):\n testE.append(E[test_index])\n test_scores.append(resolved_scores_test[test_index])\n test_essay_id.append(essay_id_test[test_index])\n\ntrainE = []\ntrain_scores = []\ntrain_essay_id = []\nfor train_index in range(len(essay_id)):\n trainE.append(E[train_index])\n train_scores.append(resolved_scores[train_index])\n train_essay_id.append(essay_id[train_index])\n\nn_train = len(trainE) \nn_test = len(testE)\n\nmemory = []\nfor i in score_range:\n for j in range(num_samples):\n if i in train_scores:\n score_idx = train_scores.index(i)\n# score = train_scores.pop(score_idx)\n essay = trainE.pop(score_idx)\n# sent_size = sent_size_list.pop(score_idx)\n memory.append(essay)\n\ndef test_step(e, m):\n feed_dict = {\n query: e,\n memory_key: m,\n keep_prob: 1\n #model.w_placeholder: word2vec\n }\n preds = sess.run(output, feed_dict)\n if is_regression:\n preds = np.clip(np.round(preds), min_score, max_score)\n return preds\n else:\n return preds\n \nsaver = tf.train.import_meta_graph(\"runs/essay_set_1_cv_1_Mar_25_2020_19:43:40/checkpoints-2820.meta\") \nwith tf.Session() as sess:\n saver.restore(sess,\"runs/essay_set_1_cv_1_Mar_25_2020_19:43:40/checkpoints-2820\")\n query = graph.get_tensor_by_name(\"input/question:0\")\n memory_key = graph.get_tensor_by_name(\"input/memory_key:0\")\n keep_prob = graph.get_tensor_by_name(\"input/keep_prob:0\")\n# for op in graph.get_operations():\n# print(op.name)\n output = graph.get_tensor_by_name(\"prediction/predict_op:0\")\n# output=tf.get_collection('predict_op:0')\n \n test_preds = []\n for start in range(0, n_test, test_batch_size):\n end = min(n_test, start+test_batch_size)\n print(\"Start: \", start, \"End: \", end)\n# print(\"Memory\", memory)\n batched_memory = [memory] * (end-start)\n# print(\"Batched_memory\", batched_memory)\n preds = sess.run(output, feed_dict={query: testE[start:end], memory_key:batched_memory, keep_prob:1})\n# preds = test_step(testE[start:end], batched_memory)\n# print(\"Preds\", preds)\n# preds = preds.tolist()\n predsF = preds.astype('float32') \n if type(predsF) is np.float32:\n test_preds = np.append(test_preds, predsF)\n else:\n preds = preds.astype('int32')\n preds2 = preds.tolist()\n# print(\"Preds2\",preds2) \n for ite in range(len(preds2)):\n# ite2 = ite.astype(numpy.int32)\n# print(\"Ite\", type(ite))\n# print(\"pred ite\", preds2[ite])\n test_preds = np.append(test_preds, preds2[ite])\n# np.concatenate(test_preds, preds2[ite])\n# test_preds.append(preds2[ite])\n if not is_regression:\n# test_preds = np.add(test_preds, min_score)\n #test_kappp_score = kappa(test_scores, test_preds, 'quadratic')\n# test_kappp_score = quadratic_weighted_kappa(test_scores, test_preds, min_score, max_score)\n# print(test_kappp_score)\n# stat_dict = {'pred_score': test_preds}\n stat_dict = {'essay_id': test_essay_id, 'org_score': test_scores, 'pred_score': test_preds}\n pred_dict = {'pred_score':test_preds}\n \n test_kappp_score = quadratic_weighted_kappa(test_scores, test_preds, min_score, max_score)\n print(test_kappp_score)\n stat_df = pd.DataFrame(stat_dict)\n pred_df = pd.DataFrame(pred_dict)\n print(stat_df)\n stat_df.to_csv('statistics/stat_pred_prompt1.csv')\n pred_df.to_csv('statistics/pred_prompt1.csv')",
"import tensorflow as tf\nimport data_utils\nfrom qwk import quadratic_weighted_kappa\nimport time\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\n\ngraph = tf.get_default_graph()\n\nessay_set_id = 5\nnum_tokens = 42\nembedding_size = 300\nnum_samples = 1\nis_regression = True\n\nearly_stop_count = 0\nmax_step_count = 10\nis_regression = False\ngated_addressing = False\n# essay_set_id = 1\nbatch_size = 15\nembedding_size = 300\nfeature_size = 100\nl2_lambda = 0.3\nhops = 3\nreader = 'bow' # gru may not work\nepochs = 200\nnum_samples = 1\nnum_tokens = 42\ntest_batch_size = batch_size\nrandom_state = 0\n\ntraining_path = 'training_set_rel3.tsv'\nessay_list, resolved_scores, essay_id = data_utils.load_training_data(training_path, essay_set_id)\nprint(len(resolved_scores))\n\nmax_score = max(resolved_scores)\nmin_score = min(resolved_scores)\nif essay_set_id == 7:\n min_score, max_score = 0, 30\nelif essay_set_id == 8:\n min_score, max_score = 0, 60\n\nprint(\"Max Score\", max_score)\nprint(\"Min Score\", min_score)\nscore_range = range(min_score, max_score+1)\n# load glove\nword_idx, word2vec = data_utils.load_glove(num_tokens, dim=embedding_size)\n\nvocab_size = len(word_idx) + 1\nprint(\"vocab size\", vocab_size)\n# stat info on data set\n\nsent_size_list = list(map(len, [essay for essay in essay_list]))\nmax_sent_size = max(sent_size_list)\nprint(\"train max sent size: \", max_sent_size)\nmean_sent_size = int(np.mean(sent_size_list))\nE = data_utils.vectorize_data(essay_list, word_idx, max_sent_size)\n\ntestcases_path = 'AES_FinalTestcases/prompt5/'\n# print(testcases_path)\ntestcases_files = os.listdir(testcases_path)\nprint(\"Number of files\", len(testcases_files))\ntestcases_files = sorted(testcases_files)\ntestcases_files = [\"babel_prompt5.csv\", \"entities_beg_unbound_5.csv\"]\n# te\n# testcases_files.remove(\"babel_prompt5.csv\")\n# testcases_files.remove(\"test4.csv\")\n# testcases_files.remove(\"svo_triplets_all_prompt5.csv\")\n# testcases_files.remove(\"svo_triplets_random_prompt5.csv\")\n# testcases_files.remove(\"entities_beg_unbound_1.csv\")\n# testcases_files.remove(\"entities_end_bound_1.csv\")\n# testcases_files.remove(\"entities_end_unbound_1.csv\")\n# testcases_files.remove(\"entities_mid_bound_1.csv\")\n# testcases_files.remove(\"entities_mid_unbound_1.csv\")\nprint(testcases_files)\n\n# testcases_files = ['disfluency_1.csv', 'incorrect_grammar_1.csv']\nfor testcase in testcases_files:\n print(\"Testcase: \", testcase)\n testing_path = testcases_path+testcase\n apple = True\n# if testcase == 'incorrect_1.csv':\n# df_essay = pd.read_csv(testing_path, engine='python')\n# # df_essay = df_essay[:0]\n# df_score = pd.read_csv('statistics/pred_prompt1.csv')\n# df_essay['rating'] = df_score['pred_score']\n# df_essay['text2'] = df_essay['text']\n# df_essay = df_essay[['text', 'rating']]\n# print(df_essay.head())\n# if (testcase =='test8.csv' or 'babel_prompt1.csv'):\n# continue\n# df_essay = pd.read_csv(testing_path, engine='python')\n# df_essay = df_essay.iloc[:,0]\n# print(df_essay.head())\n if apple==True:\n# print(\"Before: \", df_essay)\n df_essay = pd.read_csv(testing_path, engine='python')\n# print(\"Before: \", df_essay)\n# df_essay[:-1]\n# print(\"After: \", df_essay)\n print(len(df_essay))\n# df_score = pd.read_csv('predicted_scores/org_scores/prompt2_org.csv')\n# print(len(df_score))\n# df_essay['rating'] = df_score['score']\n# print(df_essay.head())\n \n# testing_path = 'aes_data/essay1/fold_0/test.txt'\n essay_list_test, essay_id_test = data_utils.load_testcase_data(df_essay, essay_set_id)\n \n# sent_size_list_test = list(map(len, [essay2 for essay2 in essay_list_test]))\n# max_sent_size_test = max(sent_size_list_test)\n# print(\"test max sent size: \", max_sent_size_test)\n# mean_sent_size_test = int(np.mean(sent_size_list_test))\n\n E_test = data_utils.vectorize_data(essay_list_test, word_idx, max_sent_size)\n# print(essay_list_test)\n test_batch_size = 15\n\n testE = []\n test_scores = []\n test_essay_id = []\n for test_index in range(len(essay_id_test)):\n testE.append(E_test[test_index])\n# print(testE)\n# test_scores.append(resolved_scores_test[test_index])\n test_essay_id.append(essay_id_test[test_index])\n\n trainE = []\n train_scores = []\n train_essay_id = []\n for train_index in range(len(essay_id)):\n trainE.append(E[train_index])\n train_scores.append(resolved_scores[train_index])\n train_essay_id.append(essay_id[train_index])\n\n n_train = len(trainE) \n n_test = len(testE)\n print(\"training_data: \", n_train)\n print(\"Testcase: \", testcase, n_test)\n\n memory = []\n for i in score_range:\n for j in range(num_samples):\n if i in train_scores:\n score_idx = train_scores.index(i)\n # score = train_scores.pop(score_idx)\n essay = trainE.pop(score_idx)\n# print(\"essay\", essay)\n # sent_size = sent_size_list.pop(score_idx)\n memory.append(essay)\n\n def test_step(e, m):\n feed_dict = {\n query: e,\n memory_key: m,\n keep_prob: 1\n #model.w_placeholder: word2vec\n }\n preds = sess.run(output, feed_dict)\n if is_regression:\n preds = np.clip(np.round(preds), min_score, max_score)\n return preds\n else:\n return preds\n\n saver = tf.train.import_meta_graph(\"runs/essay_set_5_cv_1_Jun_05_2020_00:26:49/checkpoints-17100.meta\") \n with tf.Session() as sess:\n saver.restore(sess,\"runs/essay_set_5_cv_1_Jun_05_2020_00:26:49/checkpoints-17100\")\n query = graph.get_tensor_by_name(\"input/question:0\")\n memory_key = graph.get_tensor_by_name(\"input/memory_key:0\")\n keep_prob = graph.get_tensor_by_name(\"input/keep_prob:0\")\n # for op in graph.get_operations():\n # print(op.name)\n output = graph.get_tensor_by_name(\"prediction/predict_op:0\")\n# y0 = sess.run([y_])\n # output=tf.get_collection('predict_op:0')\n\n test_preds = []\n for start in range(0, n_test, test_batch_size):\n# print(\"E1\")\n end = min(n_test, start+test_batch_size)\n print(\"Start: \", start, \"End: \", end)\n# print(\"Memory\", memory)\n batched_memory = [memory] * (end-start)\n# print(\"Batched_memory\", batched_memory)\n# print(\"Query: \", testE[start:end])\n# print(\"E2\")\n# print(testE[start:end])\n query_list = []\n for elem2 in testE[start:end]:\n# print(len(elem2))\n if (len(elem2) == 452):\n query_list.append(elem2)\n else:\n elem2 = elem2[:452]\n query_list.append(elem2)\n# elem2 = [int(x) for x in elem2]\n# print(len(elem2))\n# else:\n# print(len(elem2))\n# elem2 = [int(x) for x in elem2]\n for ab in query_list:\n print(len(ab))\n# if (start == 15 and end == 30):\n# print(\"############################\")\n# print(testE[15:30])\n# for elem in batched_memory:\n# print(len(elem))\n# print(batched_memory)\n preds = sess.run(output, feed_dict={query: query_list, memory_key:batched_memory, keep_prob:1})\n print(\"Preds\", preds)\n print(\"type: \", type(preds))\n \n if is_regression:\n preds = np.clip(np.round(preds), min_score, max_score)\n print(preds)\n# return preds\n# else:\n# continue\n # preds = test_step(testE[start:end], batched_memory)\n # print(\"Preds\", preds)\n # preds = preds.tolist()\n predsF = preds.astype('float32') \n if type(predsF) is np.float32:\n print(\"Here1\")\n test_preds = np.append(test_preds, predsF)\n else:\n preds = preds.astype('int32')\n print(\"Here2\")\n# preds2 = []\n# for i in range(0, len(preds)): \n# print(preds[i])\n# preds2.append(preds[i])\n try:\n preds2 = preds.tolist()\n except ValueError:\n print(testcase, e)\n pass\n# preds2 = []\n# for i in range(0, len(preds)): \n# print(preds[i])\n# pred2.append(preds[i])\n print(\"type here: \", type(preds))\n preds2 = preds.tolist()\n print(\"Preds2\",preds2)\n# print(\"pred_list\", pred_list)\n for ite in range(len(preds2)):\n# print(\"Done\")\n # ite2 = ite.astype(numpy.int32)\n # print(\"Ite\", type(ite))\n # print(\"pred ite\", preds2[ite])\n test_preds = np.append(test_preds, preds2[ite])\n # np.concatenate(test_preds, preds2[ite])\n # test_preds.append(preds2[ite])\n# if not is_regression:\n # test_preds = np.add(test_preds, min_score)\n #test_kappp_score = kappa(test_scores, test_preds, 'quadratic')\n # test_kappp_score = quadratic_weighted_kappa(test_scores, test_preds, min_score, max_score)\n # print(test_kappp_score)\n # stat_dict = {'pred_score': test_preds}\n# stat_dict = {'essay_id': test_essay_id, 'org_score': test_scores, 'pred_score': test_preds}\n pred_dict = {'pred_score':test_preds}\n\n# test_kappp_score = quadratic_weighted_kappa(test_scores, test_preds, min_score, max_score)\n# print(test_kappp_score)\n# stat_df = pd.DataFrame(stat_dict)\n pred_df = pd.DataFrame(pred_dict)\n# print(stat_df)\n# stat_df.to_csv('predicted_scores/prompt1/stats_'+testcase, index=None)\n pred_df.to_csv('babel/prompt5/pred_'+testcase, index=None)\n"
] |
[
[
"tensorflow.train.import_meta_graph",
"pandas.DataFrame",
"numpy.round",
"numpy.append",
"numpy.mean",
"tensorflow.Session",
"tensorflow.get_default_graph"
],
[
"pandas.read_csv",
"tensorflow.train.import_meta_graph",
"pandas.DataFrame",
"numpy.round",
"numpy.append",
"numpy.mean",
"tensorflow.Session",
"tensorflow.get_default_graph"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tallamjr/NeuralCompression
|
[
"21d05ec0d9f8c52d8742fde36f569b4dad2842a5"
] |
[
"neuralcompression/functional/_dense_image_warp.py"
] |
[
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\ndef _create_dense_warp_base_grid(\n dim1: int,\n dim2: int,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n) -> Tensor:\n \"\"\"Basic wrapper for meshgrid.\"\"\"\n if dtype is None:\n dtype = torch.get_default_dtype()\n if device is None:\n device = torch.device(\"cpu\")\n\n grid_y, grid_x = torch.meshgrid(\n torch.linspace(-1, 1, dim1, dtype=dtype, device=device),\n torch.linspace(-1, 1, dim2, dtype=dtype, device=device),\n )\n\n # for gridding we need to flip the order here\n base_grid = torch.stack((grid_x, grid_y), dim=-1)\n\n return base_grid.unsqueeze(0)\n\n\ndef dense_image_warp(\n image: Tensor,\n flow: Tensor,\n mode: str = \"bilinear\",\n padding_mode: str = \"border\",\n align_corners: bool = False,\n) -> Tensor:\n r\"\"\"\n Warp image based on flow grid.\n\n Designed to mimic behavior of ``tf.contrib.image.dense_image_warp``. This\n function uses ``torch.nn.functional.grid_sample`` as its interpolation\n backend.\n\n * This function essentially applies inverse optical flow, i.e., for an\n image function :math:`f(x)`, we compute :math:`f(x+\\delta)`, where\n :math:`\\delta` is the flow. The flow uses the normalized grid in\n ``[-1, 1]`` as detailed in ``torch.nn.functional.grid_sample``. See\n ``torch.nn.functional.grid_sample`` for details.\n\n Args:\n image: Input image to be warped.\n flow: Optical flow field for applying warp. Can be different size than\n ``image``.\n mode: Interpolation mode to calculate output values ``'bilinear'`` |\n ``'nearest'`` | ``'bicubic'``. Default: ``'bilinear'``.\n padding_mode: Padding mode for outside grid values ``'zeros'`` |\n ``'border'`` | ``'reflection'``.\n align_corners: Whether to align corners. See\n ``torch.nn.functional.grid_sample``.\n\n Returns:\n The warped image.\n \"\"\"\n if (not image.dtype == flow.dtype) or (not image.device == flow.device):\n raise ValueError(\"Either dtype or device not matched between inputs.\")\n\n if not flow.shape[-1] == 2:\n raise ValueError(\"dense_image_warp only implemented for 2D images.\")\n\n base_grid = _create_dense_warp_base_grid(\n flow.shape[1], flow.shape[2], dtype=image.dtype, device=image.device\n ).repeat(flow.shape[0], 1, 1, 1)\n\n return F.grid_sample(\n image,\n base_grid + flow,\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners,\n )\n"
] |
[
[
"torch.linspace",
"torch.nn.functional.grid_sample",
"torch.stack",
"torch.device",
"torch.get_default_dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lizhaoliu-Lec/Conformer
|
[
"577cff26b78b338f035c075727c408fca3272208",
"577cff26b78b338f035c075727c408fca3272208",
"577cff26b78b338f035c075727c408fca3272208"
] |
[
"mmdetection/mmdet/models/necks/hrfpn.py",
"mmdetection/mmdet/apis/test.py",
"mmdetection/tests/test_models/test_backbones.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, caffe2_xavier_init\nfrom torch.utils.checkpoint import checkpoint\n\nfrom ..builder import NECKS\n\n\[email protected]_module()\nclass HRFPN(nn.Module):\n \"\"\"HRFPN (High Resolution Feature Pyrmamids)\n\n paper: `High-Resolution Representations for Labeling Pixels and Regions\n <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n in_channels (list): number of channels for each branch.\n out_channels (int): output channels of feature pyramids.\n num_outs (int): number of output stages.\n pooling_type (str): pooling for generating feature pyramids\n from {MAX, AVG}.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n stride (int): stride of 3x3 convolutional layers\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs=5,\n pooling_type='AVG',\n conv_cfg=None,\n norm_cfg=None,\n with_cp=False,\n stride=1):\n super(HRFPN, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n self.reduction_conv = ConvModule(\n sum(in_channels),\n out_channels,\n kernel_size=1,\n conv_cfg=self.conv_cfg,\n act_cfg=None)\n\n self.fpn_convs = nn.ModuleList()\n for i in range(self.num_outs):\n self.fpn_convs.append(\n ConvModule(\n out_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n stride=stride,\n conv_cfg=self.conv_cfg,\n act_cfg=None))\n\n if pooling_type == 'MAX':\n self.pooling = F.max_pool2d\n else:\n self.pooling = F.avg_pool2d\n\n def init_weights(self):\n \"\"\"Initialize the weights of module.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n caffe2_xavier_init(m)\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n assert len(inputs) == self.num_ins\n outs = [inputs[0]]\n for i in range(1, self.num_ins):\n outs.append(\n F.interpolate(inputs[i], scale_factor=2 ** i, mode='bilinear'))\n out = torch.cat(outs, dim=1)\n if out.requires_grad and self.with_cp:\n out = checkpoint(self.reduction_conv, out)\n else:\n out = self.reduction_conv(out)\n outs = [out]\n for i in range(1, self.num_outs):\n outs.append(self.pooling(out, kernel_size=2 ** i, stride=2 ** i))\n outputs = []\n\n for i in range(self.num_outs):\n if outs[i].requires_grad and self.with_cp:\n tmp_out = checkpoint(self.fpn_convs[i], outs[i])\n else:\n tmp_out = self.fpn_convs[i](outs[i])\n outputs.append(tmp_out)\n return tuple(outputs)\n",
"import os.path as osp\nimport pickle\nimport shutil\nimport tempfile\nimport time\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.image import tensor2imgs\nfrom mmcv.runner import get_dist_info\n\nfrom mmdet.core import encode_mask_results\n\n\ndef single_gpu_test(model,\n data_loader,\n show=False,\n out_dir=None,\n show_score_thr=0.3):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n\n batch_size = len(result)\n if show or out_dir:\n if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):\n img_tensor = data['img'][0]\n else:\n img_tensor = data['img'][0].data[0]\n img_metas = data['img_metas'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n ori_h, ori_w = img_meta['ori_shape'][:-1]\n img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result[i],\n show=show,\n out_file=out_file,\n score_thr=show_score_thr)\n\n # encode mask results\n if isinstance(result[0], tuple):\n result = [(bbox_results, encode_mask_results(mask_results))\n for bbox_results, mask_results in result]\n results.extend(result)\n\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):\n \"\"\"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n \"\"\"\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n time.sleep(2) # This line can prevent deadlock problem in some cases.\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n # encode mask results\n if isinstance(result[0], tuple):\n result = [(bbox_results, encode_mask_results(mask_results))\n for bbox_results, mask_results in result]\n results.extend(result)\n\n if rank == 0:\n batch_size = len(result)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n if gpu_collect:\n results = collect_results_gpu(results, len(dataset))\n else:\n results = collect_results_cpu(results, len(dataset), tmpdir)\n return results\n\n\ndef collect_results_cpu(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN,),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n mmcv.mkdir_or_exist('.dist_test')\n tmpdir = tempfile.mkdtemp(dir='.dist_test')\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, f'part_{i}.pkl')\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef collect_results_gpu(result_part, size):\n rank, world_size = get_dist_info()\n # dump result part to tensor with pickle\n part_tensor = torch.tensor(\n bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')\n # gather all result part tensor shape\n shape_tensor = torch.tensor(part_tensor.shape, device='cuda')\n shape_list = [shape_tensor.clone() for _ in range(world_size)]\n dist.all_gather(shape_list, shape_tensor)\n # padding result part tensor to max length\n shape_max = torch.tensor(shape_list).max()\n part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')\n part_send[:shape_tensor[0]] = part_tensor\n part_recv_list = [\n part_tensor.new_zeros(shape_max) for _ in range(world_size)\n ]\n # gather all result part\n dist.all_gather(part_recv_list, part_send)\n\n if rank == 0:\n part_list = []\n for recv, shape in zip(part_recv_list, shape_list):\n part_list.append(\n pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n return ordered_results\n",
"import pytest\nimport torch\nfrom mmcv.ops import DeformConv2dPack\nfrom torch.nn.modules import AvgPool2d, GroupNorm\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.models.backbones import (RegNet, Res2Net, ResNeSt, ResNet,\n ResNetV1d, ResNeXt, TridentResNet)\nfrom mmdet.models.backbones.hourglass import HourglassNet\nfrom mmdet.models.backbones.res2net import Bottle2neck\nfrom mmdet.models.backbones.resnest import Bottleneck as BottleneckS\nfrom mmdet.models.backbones.resnet import BasicBlock, Bottleneck\nfrom mmdet.models.backbones.resnext import Bottleneck as BottleneckX\nfrom mmdet.models.backbones.trident_resnet import TridentBottleneck\nfrom mmdet.models.utils import ResLayer\n\n\ndef is_block(modules):\n \"\"\"Check if is ResNet building block.\"\"\"\n if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck)):\n return True\n return False\n\n\ndef is_norm(modules):\n \"\"\"Check if is one of the norms.\"\"\"\n if isinstance(modules, (GroupNorm, _BatchNorm)):\n return True\n return False\n\n\ndef all_zeros(modules):\n \"\"\"Check if the weight(and bias) is all zero.\"\"\"\n weight_zero = torch.allclose(modules.weight.data,\n torch.zeros_like(modules.weight.data))\n if hasattr(modules, 'bias'):\n bias_zero = torch.allclose(modules.bias.data,\n torch.zeros_like(modules.bias.data))\n else:\n bias_zero = True\n\n return weight_zero and bias_zero\n\n\ndef check_norm_state(modules, train_state):\n \"\"\"Check if norm layer is in correct train state.\"\"\"\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True\n\n\ndef test_resnet_basic_block():\n with pytest.raises(AssertionError):\n # Not implemented yet.\n dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n BasicBlock(64, 64, dcn=dcn)\n\n with pytest.raises(AssertionError):\n # Not implemented yet.\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n BasicBlock(64, 64, plugins=plugins)\n\n with pytest.raises(AssertionError):\n # Not implemented yet\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n position='after_conv2')\n ]\n BasicBlock(64, 64, plugins=plugins)\n\n # test BasicBlock structure and forward\n block = BasicBlock(64, 64)\n assert block.conv1.in_channels == 64\n assert block.conv1.out_channels == 64\n assert block.conv1.kernel_size == (3, 3)\n assert block.conv2.in_channels == 64\n assert block.conv2.out_channels == 64\n assert block.conv2.kernel_size == (3, 3)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test BasicBlock with checkpoint forward\n block = BasicBlock(64, 64, with_cp=True)\n assert block.with_cp\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_resnet_bottleneck():\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n Bottleneck(64, 64, style='tensorflow')\n\n with pytest.raises(AssertionError):\n # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv4')\n ]\n Bottleneck(64, 16, plugins=plugins)\n\n with pytest.raises(AssertionError):\n # Need to specify different postfix to avoid duplicate plugin name\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n Bottleneck(64, 16, plugins=plugins)\n\n with pytest.raises(KeyError):\n # Plugin type is not supported\n plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]\n Bottleneck(64, 16, plugins=plugins)\n\n # Test Bottleneck with checkpoint forward\n block = Bottleneck(64, 16, with_cp=True)\n assert block.with_cp\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test Bottleneck style\n block = Bottleneck(64, 64, stride=2, style='pytorch')\n assert block.conv1.stride == (1, 1)\n assert block.conv2.stride == (2, 2)\n block = Bottleneck(64, 64, stride=2, style='caffe')\n assert block.conv1.stride == (2, 2)\n assert block.conv2.stride == (1, 1)\n\n # Test Bottleneck DCN\n dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n with pytest.raises(AssertionError):\n Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))\n block = Bottleneck(64, 64, dcn=dcn)\n assert isinstance(block.conv2, DeformConv2dPack)\n\n # Test Bottleneck forward\n block = Bottleneck(64, 16)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test Bottleneck with 1 ContextBlock after conv3\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n block = Bottleneck(64, 16, plugins=plugins)\n assert block.context_block.in_channels == 64\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test Bottleneck with 1 GeneralizedAttention after conv2\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n position='after_conv2')\n ]\n block = Bottleneck(64, 16, plugins=plugins)\n assert block.gen_attention_block.in_channels == 16\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D\n # after conv2, 1 ContextBlock after conv3\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n position='after_conv2'),\n dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n block = Bottleneck(64, 16, plugins=plugins)\n assert block.gen_attention_block.in_channels == 16\n assert block.nonlocal_block.in_channels == 16\n assert block.context_block.in_channels == 64\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after\n # conv3\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),\n position='after_conv2'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),\n position='after_conv3'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),\n position='after_conv3')\n ]\n block = Bottleneck(64, 16, plugins=plugins)\n assert block.context_block1.in_channels == 16\n assert block.context_block2.in_channels == 64\n assert block.context_block3.in_channels == 64\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_trident_resnet_bottleneck():\n trident_dilations = (1, 2, 3)\n test_branch_idx = 1\n concat_output = True\n trident_build_config = (trident_dilations, test_branch_idx, concat_output)\n\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n TridentBottleneck(\n *trident_build_config, inplanes=64, planes=64, style='tensorflow')\n\n with pytest.raises(AssertionError):\n # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv4')\n ]\n TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n\n with pytest.raises(AssertionError):\n # Need to specify different postfix to avoid duplicate plugin name\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n\n with pytest.raises(KeyError):\n # Plugin type is not supported\n plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]\n TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n\n # Test Bottleneck with checkpoint forward\n block = TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, with_cp=True)\n assert block.with_cp\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n # Test Bottleneck style\n block = TridentBottleneck(\n *trident_build_config,\n inplanes=64,\n planes=64,\n stride=2,\n style='pytorch')\n assert block.conv1.stride == (1, 1)\n assert block.conv2.stride == (2, 2)\n block = TridentBottleneck(\n *trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')\n assert block.conv1.stride == (2, 2)\n assert block.conv2.stride == (1, 1)\n\n # Test Bottleneck forward\n block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n # Test Bottleneck with 1 ContextBlock after conv3\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n block = TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n assert block.context_block.in_channels == 64\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n # Test Bottleneck with 1 GeneralizedAttention after conv2\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n position='after_conv2')\n ]\n block = TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n assert block.gen_attention_block.in_channels == 16\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D\n # after conv2, 1 ContextBlock after conv3\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n position='after_conv2'),\n dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n position='after_conv3')\n ]\n block = TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n assert block.gen_attention_block.in_channels == 16\n assert block.nonlocal_block.in_channels == 16\n assert block.context_block.in_channels == 64\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after\n # conv3\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),\n position='after_conv2'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),\n position='after_conv3'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),\n position='after_conv3')\n ]\n block = TridentBottleneck(\n *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n assert block.context_block1.in_channels == 16\n assert block.context_block2.in_channels == 64\n assert block.context_block3.in_channels == 64\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n\ndef test_trident_resnet_backbone():\n tridentresnet_config = dict(\n num_branch=3,\n test_branch_idx=1,\n strides=(1, 2, 2),\n dilations=(1, 1, 1),\n trident_dilations=(1, 2, 3),\n out_indices=(2,),\n )\n \"\"\"Test tridentresnet backbone.\"\"\"\n with pytest.raises(AssertionError):\n # TridentResNet depth should be in [50, 101, 152]\n TridentResNet(18, **tridentresnet_config)\n\n with pytest.raises(AssertionError):\n # In TridentResNet: num_stages == 3\n TridentResNet(50, num_stages=4, **tridentresnet_config)\n\n model = TridentResNet(50, num_stages=3, **tridentresnet_config)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 1\n assert feat[0].shape == torch.Size([3, 1024, 14, 14])\n\n\ndef test_resnet_res_layer():\n # Test ResLayer of 3 Bottleneck w\\o downsample\n layer = ResLayer(Bottleneck, 64, 16, 3)\n assert len(layer) == 3\n assert layer[0].conv1.in_channels == 64\n assert layer[0].conv1.out_channels == 16\n for i in range(1, len(layer)):\n assert layer[i].conv1.in_channels == 64\n assert layer[i].conv1.out_channels == 16\n for i in range(len(layer)):\n assert layer[i].downsample is None\n x = torch.randn(1, 64, 56, 56)\n x_out = layer(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test ResLayer of 3 Bottleneck with downsample\n layer = ResLayer(Bottleneck, 64, 64, 3)\n assert layer[0].downsample[0].out_channels == 256\n for i in range(1, len(layer)):\n assert layer[i].downsample is None\n x = torch.randn(1, 64, 56, 56)\n x_out = layer(x)\n assert x_out.shape == torch.Size([1, 256, 56, 56])\n\n # Test ResLayer of 3 Bottleneck with stride=2\n layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)\n assert layer[0].downsample[0].out_channels == 256\n assert layer[0].downsample[0].stride == (2, 2)\n for i in range(1, len(layer)):\n assert layer[i].downsample is None\n x = torch.randn(1, 64, 56, 56)\n x_out = layer(x)\n assert x_out.shape == torch.Size([1, 256, 28, 28])\n\n # Test ResLayer of 3 Bottleneck with stride=2 and average downsample\n layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)\n assert isinstance(layer[0].downsample[0], AvgPool2d)\n assert layer[0].downsample[1].out_channels == 256\n assert layer[0].downsample[1].stride == (1, 1)\n for i in range(1, len(layer)):\n assert layer[i].downsample is None\n x = torch.randn(1, 64, 56, 56)\n x_out = layer(x)\n assert x_out.shape == torch.Size([1, 256, 28, 28])\n\n # Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False\n layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)\n assert layer[2].downsample[0].out_channels == 64\n assert layer[2].downsample[0].stride == (2, 2)\n for i in range(len(layer) - 1):\n assert layer[i].downsample is None\n x = torch.randn(1, 64, 56, 56)\n x_out = layer(x)\n assert x_out.shape == torch.Size([1, 64, 28, 28])\n\n\ndef test_resnest_stem():\n # Test default stem_channels\n model = ResNet(50)\n assert model.stem_channels == 64\n assert model.conv1.out_channels == 64\n assert model.norm1.num_features == 64\n\n # Test default stem_channels, with base_channels=32\n model = ResNet(50, base_channels=32)\n assert model.stem_channels == 32\n assert model.conv1.out_channels == 32\n assert model.norm1.num_features == 32\n assert model.layer1[0].conv1.in_channels == 32\n\n # Test stem_channels=64\n model = ResNet(50, stem_channels=64)\n assert model.stem_channels == 64\n assert model.conv1.out_channels == 64\n assert model.norm1.num_features == 64\n assert model.layer1[0].conv1.in_channels == 64\n\n # Test stem_channels=64, with base_channels=32\n model = ResNet(50, stem_channels=64, base_channels=32)\n assert model.stem_channels == 64\n assert model.conv1.out_channels == 64\n assert model.norm1.num_features == 64\n assert model.layer1[0].conv1.in_channels == 64\n\n # Test stem_channels=128\n model = ResNet(depth=50, stem_channels=128)\n model.init_weights()\n model.train()\n assert model.conv1.out_channels == 128\n assert model.layer1[0].conv1.in_channels == 128\n\n # Test V1d stem_channels\n model = ResNetV1d(depth=50, stem_channels=128)\n model.init_weights()\n model.train()\n assert model.stem[0].out_channels == 64\n assert model.stem[1].num_features == 64\n assert model.stem[3].out_channels == 64\n assert model.stem[4].num_features == 64\n assert model.stem[6].out_channels == 128\n assert model.stem[7].num_features == 128\n assert model.layer1[0].conv1.in_channels == 128\n\n\ndef test_resnet_backbone():\n \"\"\"Test resnet backbone.\"\"\"\n with pytest.raises(KeyError):\n # ResNet depth should be in [18, 34, 50, 101, 152]\n ResNet(20)\n\n with pytest.raises(AssertionError):\n # In ResNet: 1 <= num_stages <= 4\n ResNet(50, num_stages=0)\n\n with pytest.raises(AssertionError):\n # len(stage_with_dcn) == num_stages\n dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n ResNet(50, dcn=dcn, stage_with_dcn=(True,))\n\n with pytest.raises(AssertionError):\n # len(stage_with_plugin) == num_stages\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n stages=(False, True, True),\n position='after_conv3')\n ]\n ResNet(50, plugins=plugins)\n\n with pytest.raises(AssertionError):\n # In ResNet: 1 <= num_stages <= 4\n ResNet(50, num_stages=5)\n\n with pytest.raises(AssertionError):\n # len(strides) == len(dilations) == num_stages\n ResNet(50, strides=(1,), dilations=(1, 1), num_stages=3)\n\n with pytest.raises(TypeError):\n # pretrained must be a string path\n model = ResNet(50)\n model.init_weights(pretrained=0)\n\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n ResNet(50, style='tensorflow')\n\n # Test ResNet50 norm_eval=True\n model = ResNet(50, norm_eval=True)\n model.init_weights()\n model.train()\n assert check_norm_state(model.modules(), False)\n\n # Test ResNet50 with torchvision pretrained weight\n model = ResNet(depth=50, norm_eval=True)\n model.init_weights('torchvision://resnet50')\n model.train()\n assert check_norm_state(model.modules(), False)\n\n # Test ResNet50 with first stage frozen\n frozen_stages = 1\n model = ResNet(50, frozen_stages=frozen_stages)\n model.init_weights()\n model.train()\n assert model.norm1.training is False\n for layer in [model.conv1, model.norm1]:\n for param in layer.parameters():\n assert param.requires_grad is False\n for i in range(1, frozen_stages + 1):\n layer = getattr(model, f'layer{i}')\n for mod in layer.modules():\n if isinstance(mod, _BatchNorm):\n assert mod.training is False\n for param in layer.parameters():\n assert param.requires_grad is False\n\n # Test ResNet50V1d with first stage frozen\n model = ResNetV1d(depth=50, frozen_stages=frozen_stages)\n assert len(model.stem) == 9\n model.init_weights()\n model.train()\n check_norm_state(model.stem, False)\n for param in model.stem.parameters():\n assert param.requires_grad is False\n for i in range(1, frozen_stages + 1):\n layer = getattr(model, f'layer{i}')\n for mod in layer.modules():\n if isinstance(mod, _BatchNorm):\n assert mod.training is False\n for param in layer.parameters():\n assert param.requires_grad is False\n\n # Test ResNet18 forward\n model = ResNet(18)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 64, 56, 56])\n assert feat[1].shape == torch.Size([1, 128, 28, 28])\n assert feat[2].shape == torch.Size([1, 256, 14, 14])\n assert feat[3].shape == torch.Size([1, 512, 7, 7])\n\n # Test ResNet18 with checkpoint forward\n model = ResNet(18, with_cp=True)\n for m in model.modules():\n if is_block(m):\n assert m.with_cp\n\n # Test ResNet50 with BatchNorm forward\n model = ResNet(50)\n for m in model.modules():\n if is_norm(m):\n assert isinstance(m, _BatchNorm)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n # Test ResNet50 with layers 1, 2, 3 out forward\n model = ResNet(50, out_indices=(0, 1, 2))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 3\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n\n # Test ResNet50 with checkpoint forward\n model = ResNet(50, with_cp=True)\n for m in model.modules():\n if is_block(m):\n assert m.with_cp\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n # Test ResNet50 with GroupNorm forward\n model = ResNet(\n 50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))\n for m in model.modules():\n if is_norm(m):\n assert isinstance(m, GroupNorm)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D\n # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n stages=(False, True, True, True),\n position='after_conv2'),\n dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16),\n stages=(False, True, True, False),\n position='after_conv3')\n ]\n model = ResNet(50, plugins=plugins)\n for m in model.layer1.modules():\n if is_block(m):\n assert not hasattr(m, 'context_block')\n assert not hasattr(m, 'gen_attention_block')\n assert m.nonlocal_block.in_channels == 64\n for m in model.layer2.modules():\n if is_block(m):\n assert m.nonlocal_block.in_channels == 128\n assert m.gen_attention_block.in_channels == 128\n assert m.context_block.in_channels == 512\n\n for m in model.layer3.modules():\n if is_block(m):\n assert m.nonlocal_block.in_channels == 256\n assert m.gen_attention_block.in_channels == 256\n assert m.context_block.in_channels == 1024\n\n for m in model.layer4.modules():\n if is_block(m):\n assert m.nonlocal_block.in_channels == 512\n assert m.gen_attention_block.in_channels == 512\n assert not hasattr(m, 'context_block')\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after\n # conv3 in layers 2, 3, 4\n plugins = [\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),\n stages=(False, True, True, False),\n position='after_conv3'),\n dict(\n cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),\n stages=(False, True, True, False),\n position='after_conv3')\n ]\n\n model = ResNet(50, plugins=plugins)\n for m in model.layer1.modules():\n if is_block(m):\n assert not hasattr(m, 'context_block')\n assert not hasattr(m, 'context_block1')\n assert not hasattr(m, 'context_block2')\n for m in model.layer2.modules():\n if is_block(m):\n assert not hasattr(m, 'context_block')\n assert m.context_block1.in_channels == 512\n assert m.context_block2.in_channels == 512\n\n for m in model.layer3.modules():\n if is_block(m):\n assert not hasattr(m, 'context_block')\n assert m.context_block1.in_channels == 1024\n assert m.context_block2.in_channels == 1024\n\n for m in model.layer4.modules():\n if is_block(m):\n assert not hasattr(m, 'context_block')\n assert not hasattr(m, 'context_block1')\n assert not hasattr(m, 'context_block2')\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n # Test ResNet50 zero initialization of residual\n model = ResNet(50, zero_init_residual=True)\n model.init_weights()\n for m in model.modules():\n if isinstance(m, Bottleneck):\n assert all_zeros(m.norm3)\n elif isinstance(m, BasicBlock):\n assert all_zeros(m.norm2)\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n # Test ResNetV1d forward\n model = ResNetV1d(depth=50)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n\ndef test_renext_bottleneck():\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')\n\n # Test ResNeXt Bottleneck structure\n block = BottleneckX(\n 64, 64, groups=32, base_width=4, stride=2, style='pytorch')\n assert block.conv2.stride == (2, 2)\n assert block.conv2.groups == 32\n assert block.conv2.out_channels == 128\n\n # Test ResNeXt Bottleneck with DCN\n dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n with pytest.raises(AssertionError):\n # conv_cfg must be None if dcn is not None\n BottleneckX(\n 64,\n 64,\n groups=32,\n base_width=4,\n dcn=dcn,\n conv_cfg=dict(type='Conv'))\n BottleneckX(64, 64, dcn=dcn)\n\n # Test ResNeXt Bottleneck forward\n block = BottleneckX(64, 16, groups=32, base_width=4)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n # Test ResNeXt Bottleneck forward with plugins\n plugins = [\n dict(\n cfg=dict(\n type='GeneralizedAttention',\n spatial_range=-1,\n num_heads=8,\n attention_type='0010',\n kv_stride=2),\n stages=(False, False, True, True),\n position='after_conv2')\n ]\n block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_resnext_backbone():\n with pytest.raises(KeyError):\n # ResNeXt depth should be in [50, 101, 152]\n ResNeXt(depth=18)\n\n # Test ResNeXt with group 32, base_width 4\n model = ResNeXt(depth=50, groups=32, base_width=4)\n for m in model.modules():\n if is_block(m):\n assert m.conv2.groups == 32\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n\nregnet_test_data = [\n ('regnetx_400mf',\n dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,\n bot_mul=1.0), [32, 64, 160, 384]),\n ('regnetx_800mf',\n dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,\n bot_mul=1.0), [64, 128, 288, 672]),\n ('regnetx_1.6gf',\n dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,\n bot_mul=1.0), [72, 168, 408, 912]),\n ('regnetx_3.2gf',\n dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,\n bot_mul=1.0), [96, 192, 432, 1008]),\n ('regnetx_4.0gf',\n dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,\n bot_mul=1.0), [80, 240, 560, 1360]),\n ('regnetx_6.4gf',\n dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,\n bot_mul=1.0), [168, 392, 784, 1624]),\n ('regnetx_8.0gf',\n dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,\n bot_mul=1.0), [80, 240, 720, 1920]),\n ('regnetx_12gf',\n dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,\n bot_mul=1.0), [224, 448, 896, 2240]),\n]\n\n\[email protected]('arch_name,arch,out_channels', regnet_test_data)\ndef test_regnet_backbone(arch_name, arch, out_channels):\n with pytest.raises(AssertionError):\n # ResNeXt depth should be in [50, 101, 152]\n RegNet(arch_name + '233')\n\n # Test RegNet with arch_name\n model = RegNet(arch_name)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])\n assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])\n assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])\n assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])\n\n # Test RegNet with arch\n model = RegNet(arch)\n assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])\n assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])\n assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])\n assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])\n\n\ndef test_res2net_bottle2neck():\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')\n\n with pytest.raises(AssertionError):\n # Scale must be larger than 1\n Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')\n\n # Test Res2Net Bottle2neck structure\n block = Bottle2neck(\n 64, 64, base_width=26, stride=2, scales=4, style='pytorch')\n assert block.scales == 4\n\n # Test Res2Net Bottle2neck with DCN\n dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n with pytest.raises(AssertionError):\n # conv_cfg must be None if dcn is not None\n Bottle2neck(\n 64,\n 64,\n base_width=26,\n scales=4,\n dcn=dcn,\n conv_cfg=dict(type='Conv'))\n Bottle2neck(64, 64, dcn=dcn)\n\n # Test Res2Net Bottle2neck forward\n block = Bottle2neck(64, 16, base_width=26, scales=4)\n x = torch.randn(1, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_res2net_backbone():\n with pytest.raises(KeyError):\n # Res2Net depth should be in [50, 101, 152]\n Res2Net(depth=18)\n\n # Test Res2Net with scales 4, base_width 26\n model = Res2Net(depth=50, scales=4, base_width=26)\n for m in model.modules():\n if is_block(m):\n assert m.scales == 4\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([1, 256, 56, 56])\n assert feat[1].shape == torch.Size([1, 512, 28, 28])\n assert feat[2].shape == torch.Size([1, 1024, 14, 14])\n assert feat[3].shape == torch.Size([1, 2048, 7, 7])\n\n\ndef test_hourglass_backbone():\n with pytest.raises(AssertionError):\n # HourglassNet's num_stacks should larger than 0\n HourglassNet(num_stacks=0)\n\n with pytest.raises(AssertionError):\n # len(stage_channels) should equal len(stage_blocks)\n HourglassNet(\n stage_channels=[256, 256, 384, 384, 384],\n stage_blocks=[2, 2, 2, 2, 2, 4])\n\n with pytest.raises(AssertionError):\n # len(stage_channels) should lagrer than downsample_times\n HourglassNet(\n downsample_times=5,\n stage_channels=[256, 256, 384, 384, 384],\n stage_blocks=[2, 2, 2, 2, 2])\n\n # Test HourglassNet-52\n model = HourglassNet(num_stacks=1)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 511, 511)\n feat = model(imgs)\n assert len(feat) == 1\n assert feat[0].shape == torch.Size([1, 256, 128, 128])\n\n # Test HourglassNet-104\n model = HourglassNet(num_stacks=2)\n model.init_weights()\n model.train()\n\n imgs = torch.randn(1, 3, 511, 511)\n feat = model(imgs)\n assert len(feat) == 2\n assert feat[0].shape == torch.Size([1, 256, 128, 128])\n assert feat[1].shape == torch.Size([1, 256, 128, 128])\n\n\ndef test_resnest_bottleneck():\n with pytest.raises(AssertionError):\n # Style must be in ['pytorch', 'caffe']\n BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')\n\n # Test ResNeSt Bottleneck structure\n block = BottleneckS(\n 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch')\n assert block.avd_layer.stride == 2\n assert block.conv2.channels == 256\n\n # Test ResNeSt Bottleneck forward\n block = BottleneckS(64, 16, radix=2, reduction_factor=4)\n x = torch.randn(2, 64, 56, 56)\n x_out = block(x)\n assert x_out.shape == torch.Size([2, 64, 56, 56])\n\n\ndef test_resnest_backbone():\n with pytest.raises(KeyError):\n # ResNeSt depth should be in [50, 101, 152, 200]\n ResNeSt(depth=18)\n\n # Test ResNeSt with radix 2, reduction_factor 4\n model = ResNeSt(\n depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3))\n model.init_weights()\n model.train()\n\n imgs = torch.randn(2, 3, 224, 224)\n feat = model(imgs)\n assert len(feat) == 4\n assert feat[0].shape == torch.Size([2, 256, 56, 56])\n assert feat[1].shape == torch.Size([2, 512, 28, 28])\n assert feat[2].shape == torch.Size([2, 1024, 14, 14])\n assert feat[3].shape == torch.Size([2, 2048, 7, 7])\n"
] |
[
[
"torch.nn.ModuleList",
"torch.utils.checkpoint.checkpoint",
"torch.nn.functional.interpolate",
"torch.cat"
],
[
"torch.distributed.broadcast",
"torch.full",
"torch.zeros",
"torch.distributed.all_gather",
"torch.distributed.barrier",
"torch.tensor",
"torch.no_grad"
],
[
"torch.randn",
"torch.Size",
"torch.zeros_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tkloong/nn-sg
|
[
"6bf0609d10db0c6378e2dff386dcdbc294b390e8"
] |
[
"src/dnn_data_engine.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Modifications copyright (C) 2017 Kent Loong Tan\n# ==============================================================================\n\nimport time\nimport numpy\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.contrib.learn.python.learn.datasets import base\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle as pickle\n\nclass DataSet(object):\n\n def __init__(self,\n features,\n labels,\n fake_data=False,\n one_hot=False,\n scaling=False,\n seed=None):\n \"\"\"Construct a DataSet.\n one_hot arg is used only if fake_data is true. When `scaling` is true,\n it scales the input from `[0, 255]` into `[0, 1]`.\n \"\"\"\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever graph level seed is returned\n numpy.random.seed(seed1 if seed is None else seed2)\n if fake_data:\n self._num_examples = 10000\n self.one_hot = one_hot\n else:\n assert features.shape[0] == labels.shape[0], (\n 'features.shape: %s labels.shape: %s' % (features.shape, labels.shape))\n self._num_examples = features.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns*depth] (assuming depth == 1)\n if scaling:\n # Convert from [0, 255] -> [0.0, 1.0].\n features = features.astype(numpy.float32)\n features = numpy.multiply(features, 1.0 / 255.0)\n self._features = features\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0\n\n @property\n def features(self):\n return self._features\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_examples(self):\n return self._num_examples\n\n @property\n def epochs_completed(self):\n return self._epochs_completed\n\n def next_batch(self, batch_size, withoutMixWithNextEpoch=False, shuffle=True):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm0)\n self._features = self.features[perm0]\n self._labels = self.labels[perm0]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n features_rest_part = self._features[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._features = self.features[perm]\n self._labels = self.labels[perm]\n if withoutMixWithNextEpoch:\n self._index_in_epoch = 0\n return features_rest_part, labels_rest_part\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n features_new_part = self._features[start:end]\n labels_new_part = self._labels[start:end]\n return numpy.concatenate((features_rest_part, features_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._features[start:end], self._labels[start:end]\n\ndef svm_read_problem(data_file_name, num_features, return_scipy=False):\n \"\"\"\n svm_read_problem(data_file_name, return_scipy=False) -> [y, x], y: list, x: list of dictionary\n svm_read_problem(data_file_name, return_scipy=True) -> [y, x], y: ndarray, x: csr_matrix\n\n Read LIBSVM-format data from data_file_name and return labels y\n and data instances x.\n \"\"\"\n prob_y = [] \n prob_x = []\n row_ptr = [0]\n col_idx = []\n i = 0\n with open(data_file_name) as fp:\n i = 0\n lines = fp.readlines()\n for line in lines:\n line = line.split(None, 1)\n # In case an instance with all zero features\n if len(line) == 1: line += ['']\n label, features = line\n\n idx = 1\n xi = [0.0] * num_features\n for e in features.split():\n ind, val = e.split(\":\")\n if int(ind) == idx:\n xi[idx-1] = float(val)\n else:\n while (idx < int(ind)):\n idx += 1\n xi[idx-1] = float(val)\n idx += 1\n prob_x += [xi]\n prob_y += [float(label)]\n i += 1\n return (prob_y, prob_x)\n\ndef read_dataset(dataset_name,\n train_path,\n test_path,\n num_classes,\n num_features,\n one_hot=False,\n y_label_offset=0,\n scaling=False,\n validation_size=0,\n seed=None):\n # Read LIBSVM data\n try:\n print('Read data from `../data/' + dataset_name + '/train_data.pkl`...')\n with open('../data/' + dataset_name + '/train_data.pkl', 'rb') as filehandler:\n (y_train, x_train) = pickle.load(filehandler)\n except:\n print('(No such file or directory: `../data/' + dataset_name + '/train_data.pkl`)')\n print('Read data from ' + train_path + '...')\n y_train, x_train = svm_read_problem(train_path, num_features)\n x_train = numpy.array(x_train); y_train = numpy.array(y_train)\n\n try:\n print('Read data from `../data/' + dataset_name + '/test_data.pkl`...')\n with open('../data/' + dataset_name + '/test_data.pkl', 'rb') as filehandler:\n (y_test, x_test) = pickle.load(filehandler)\n except:\n print('(No such file or directory: `../data/' + dataset_name + '/test_data.pkl`)')\n print('Read data from ' + test_path + '...')\n y_test, x_test = svm_read_problem(test_path, num_features)\n x_test = numpy.array(x_test); y_test = numpy.array(y_test)\n\n if y_label_offset != 0:\n y_test -= y_label_offset\n\n if one_hot:\n y_train = dense_to_one_hot(y_train, num_classes)\n y_test = dense_to_one_hot(y_test, num_classes)\n\n if not 0 <= validation_size <= len(x_train):\n raise ValueError(\n 'Validation size should be between 0 and {}. Received: {}.'\n .format(len(x_train), validation_size))\n\n validation_features = x_train[:validation_size]\n validation_labels = y_train[:validation_size]\n x_train = x_train[validation_size:]\n y_train = y_train[validation_size:]\n\n options = dict(scaling=scaling, seed=seed)\n\n train = DataSet(x_train, y_train, **options)\n validation = DataSet(validation_features, validation_labels, **options)\n test = DataSet(x_test, y_test, **options)\n \n return base.Datasets(train=train, validation=validation, test=test)\n\ndef dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[list(index_offset + labels_dense.ravel())] = 1\n return labels_one_hot\n\nif __name__=='__main__':\n num_inst = 10000\n CHANNEL = 1\n HEIGHT = 28\n WIDTH = 28\n NUM_CLASSES = 10\n TEST_SIZE = 10000.0\n train_path = '/home/loong/data/mnist.scale'\n test_path = '/home/loong/data/mnist.scale.t'\n dataset = read_dataset(train_path, test_path, NUM_CLASSES, CHANNEL, HEIGHT, WIDTH, one_hot=True)\n"
] |
[
[
"numpy.random.seed",
"numpy.multiply",
"numpy.arange",
"tensorflow.contrib.learn.python.learn.datasets.base.Datasets",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"tensorflow.python.framework.random_seed.get_seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
prazek/jax
|
[
"a41d3e289299ce948f4fa1331ec9ae3ba9ab832d"
] |
[
"tests/pjit_test.py"
] |
[
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom functools import partial\nimport logging\nimport threading\nimport unittest\nfrom collections import OrderedDict, namedtuple\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport jax\nimport jax.numpy as jnp\nfrom jax._src import test_util as jtu\nfrom jax.errors import JAXTypeError\nfrom jax import lax\n# TODO(skye): do we still wanna call this PartitionSpec?\nfrom jax.experimental import PartitionSpec as P\nfrom jax.experimental.maps import xmap, mesh, Mesh\nfrom jax.experimental import global_device_array\nimport jax.experimental.pjit as pjit_lib\nfrom jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,\n SpecSync, FROM_GDA)\nfrom jax.interpreters import pxla\nfrom jax.interpreters import xla\nfrom jax._src.lib import xla_client\nfrom jax._src.util import prod, curry, unzip2, safe_zip\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n\ndef setUpModule():\n if jax.default_backend() not in {'gpu', 'tpu'}:\n raise unittest.SkipTest(\"pjit only supports GPU and TPU backends\")\n jtu.set_spmd_lowering_flag(True)\n\ndef tearDownModule():\n jtu.restore_spmd_lowering_flag()\n\n\ndef create_gda(global_shape, global_mesh, mesh_axes):\n global_data = np.arange(\n prod(global_shape), dtype=np.float32).reshape(global_shape)\n\n return global_device_array.GlobalDeviceArray.from_callback(\n global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx])\n\n\n@curry\ndef check_1d_2d_mesh(f, set_mesh):\n return parameterized.named_parameters(\n {\"testcase_name\": \"_\" + name, \"mesh\": mesh, \"resources\": resources}\n for name, mesh, resources in (\n (\"2\", ((\"x\", 2),), \"x\"),\n (\"2x1\", ((\"x\", 2), (\"y\", 1)), (\"x\", \"y\")),\n (\"2x2\", ((\"x\", 2), (\"y\", 2)), (\"x\", \"y\")),\n ))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)\n\n\ndef create_global_mesh(mesh_shape, axis_names):\n size = prod(mesh_shape)\n if len(jax.devices()) < size:\n raise unittest.SkipTest(f\"Test requires {size} local devices\")\n mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)\n global_mesh = Mesh(mesh_devices, axis_names)\n return global_mesh\n\n\n# TODO(skye): make the buffer donation utils part of JaxTestCase\nclass PJitTest(jtu.BufferDonationTestCase):\n\n @jtu.with_mesh([('x', 1)])\n def testDeviceBufferAval(self):\n\n @partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))\n def f(x):\n return x\n\n shape = (2, 2)\n x = np.arange(prod(shape), dtype=np.float32).reshape(shape)\n actual = f(x)\n expected = x\n self.assertAllClose(actual, expected, check_dtypes=False)\n self.assertIsInstance(actual, pxla.ShardedDeviceArray)\n self.assertLen(actual.device_buffers, 1)\n self.assertAllClose(\n actual.device_buffers[0].to_py(), expected, check_dtypes=False)\n # Repro for a bug on device_buffer aval\n _ = repr(actual.device_buffers)\n\n @jtu.with_mesh([('x', 2)])\n def testBasic1D(self):\n @partial(pjit,\n in_axis_resources=(P('x'), P('x')),\n out_axis_resources=None)\n def f(x, y):\n return x + y\n\n shape = (8, 8)\n x = np.arange(prod(shape), dtype=np.float32).reshape(shape)\n actual = f(x, x + 1)\n expected = x + (x + 1)\n self.assertAllClose(actual, expected, check_dtypes=False)\n self.assertIsInstance(actual, pxla.ShardedDeviceArray)\n self.assertLen(actual.device_buffers, 2)\n self.assertAllClose(actual.device_buffers[0].to_py(), expected,\n check_dtypes=False)\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def testBasic2D(self):\n @partial(pjit,\n in_axis_resources=(P(None, 'x', 'y'), P('y')),\n out_axis_resources=P('x'))\n def f(x, y):\n return x @ y\n\n x_shape = (8, 6, 4)\n y_shape = (4, 2)\n x = jnp.arange(np.prod(x_shape)).reshape(x_shape)\n y = jnp.arange(np.prod(y_shape)).reshape(y_shape)\n actual = f(x, y)\n expected = x @ y\n self.assertAllClose(actual, expected, check_dtypes=False)\n self.assertIsInstance(actual, pxla.ShardedDeviceArray)\n self.assertLen(actual.device_buffers, 4)\n\n split0, split1 = np.split(expected, 2)\n self.assertAllClose(actual.device_buffers[0].to_py(), split0,\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[1].to_py(), split0,\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[2].to_py(), split1,\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[3].to_py(), split1,\n check_dtypes=False)\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def testTwoMeshAxisSharding(self):\n @partial(pjit,\n in_axis_resources=P(('x', 'y'),),\n out_axis_resources=P(('x', 'y'),))\n def f(x, y):\n return x @ y\n\n shape = (8, 8)\n x = jnp.arange(np.prod(shape)).reshape(shape)\n actual = f(x, x + 1)\n expected = x @ (x + 1)\n self.assertAllClose(actual, expected, check_dtypes=False)\n self.assertIsInstance(actual, pxla.ShardedDeviceArray)\n self.assertLen(actual.device_buffers, 4)\n\n splits = np.split(expected, 4)\n self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],\n check_dtypes=False)\n\n @jtu.with_mesh([('x', 2)])\n def testBufferDonation(self):\n @partial(pjit,\n in_axis_resources=P('x'),\n out_axis_resources=P('x'),\n donate_argnums=0)\n def f(x, y):\n return x + y\n\n shard = pjit(lambda x: x, in_axis_resources=P('x'),\n out_axis_resources=P('x'))\n x = shard(jnp.ones((2, 5)) * 4)\n y = shard(jnp.ones((2, 5)) * 2)\n expected = x + y\n self.assertAllClose(f(x, y), expected)\n self.assertNotDeleted(y)\n self.assertDeleted(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testShardingConstraint(self):\n @partial(pjit, in_axis_resources=None, out_axis_resources=None)\n def f(x):\n y = x + 1\n y = with_sharding_constraint(y, P('x', 'y'))\n return y * 2\n\n shape = (8, 8)\n x = np.arange(prod(shape)).reshape(shape)\n expected = (x + 1) * 2\n actual = f(x)\n self.assertAllClose(actual, expected, check_dtypes=False)\n self.assertIsInstance(actual, pxla.ShardedDeviceArray)\n self.assertLen(actual.device_buffers, 2)\n self.assertAllClose(actual.device_buffers[0].to_py(), expected,\n check_dtypes=False)\n\n hlo = jax.xla_computation(f)(np.ones(shape))\n # Annotation from with_sharding_constraint\n self.assertIn(\"sharding={devices=[2,1]0,1}\", hlo.as_hlo_text())\n # Annotation from pjit\n self.assertIn(\"sharding={replicated}\", hlo.as_hlo_text())\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testShardingConstraintPyTree(self):\n @partial(pjit, in_axis_resources=None, out_axis_resources=None)\n def f(x):\n x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])\n x = x.copy()\n x[0][\"a\"] *= 2\n return x\n\n shape = (8, 8)\n v = np.arange(prod(shape)).reshape(shape)\n x = [{\"a\": v, \"b\": v * 2}, v * 3]\n actual = f(x)\n\n expected = x.copy()\n expected[0][\"a\"] *= 2\n self.assertAllClose(actual, expected, check_dtypes=False)\n self.assertLen(actual[0][\"a\"].device_buffers, 2)\n\n hlo = jax.xla_computation(f)(x)\n # Annotations from with_sharding_constraint\n self.assertIn(\"sharding={devices=[2,1]0,1}\", hlo.as_hlo_text())\n self.assertIn(\"sharding={devices=[1,2]0,1}\", hlo.as_hlo_text())\n # Annotation from pjit\n self.assertIn(\"sharding={replicated}\", hlo.as_hlo_text())\n\n def testCaching(self):\n def f(x):\n assert should_be_tracing\n return jnp.sin(x) * 2\n\n x = np.arange(16).reshape(4, 4)\n devices = np.array(list(jax.local_devices())[:4])\n if devices.size < 4:\n raise unittest.SkipTest(\"Test requires 4 devices\")\n devices = devices.reshape((2, 2))\n with mesh(devices, ('x', 'y')):\n should_be_tracing = True\n pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)\n should_be_tracing = False\n pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)\n # Re-create the mesh to make sure that has no influence on caching\n with mesh(devices, ('x', 'y')):\n should_be_tracing = False\n pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testNested(self):\n # Add a constant captured by the nested pjit to make things more complicated\n h = jnp.arange(4)\n f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)\n g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)\n x = jnp.arange(16).reshape((4, 4))\n y = g(x)\n self.assertAllClose(y, jnp.sin(x).sum() + h.sum())\n self.assertTrue(hasattr(y, \"sharding_spec\"))\n\n @check_1d_2d_mesh(set_mesh=True)\n def testAutodiff(self, mesh, resources):\n if len(mesh) != 2: return\n assert resources == ('x', 'y')\n # Add a constant captured by the nested pjit to make things more complicated\n h = jnp.arange(4)\n f = pjit(lambda x: x.sum(1) * h.sum(),\n in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))\n g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),\n in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))\n jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),\n order=2)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testEvalJaxpr(self):\n x, y = jnp.arange(4), jnp.arange(5)\n f = pjit(lambda x, y: x.sum() + jnp.sin(y),\n in_axis_resources=(P('x'), P('y')),\n out_axis_resources=P('y'))\n f_jaxpr = jax.make_jaxpr(f)(x, y)\n f_eval = jax.core.jaxpr_as_fun(f_jaxpr)\n r, = f_eval(x, y)\n self.assertAllClose(r, x.sum() + jnp.sin(y))\n\n @jtu.with_mesh([('x', 2)])\n def testNonArrayArg(self):\n self.assertEqual(pjit(lambda x: x + 2,\n in_axis_resources=None,\n out_axis_resources=None)(1), 3)\n\n @jtu.with_mesh([('x', 2)])\n def testNonHashableAxisResources(self):\n x = jnp.arange(4)\n y = pjit(lambda x: {'b': x['a'] + 2},\n in_axis_resources=({'a': P('x')},),\n out_axis_resources={'b': P('x')})({'a': x})\n self.assertAllClose(y, {'b': x + 2})\n\n @jtu.with_mesh([('x', 2)])\n def testGradOfConstraint(self):\n # Make sure that we can compute grads through sharding constraints\n h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()\n f = pjit(lambda x: jax.grad(h)(x),\n in_axis_resources=None, out_axis_resources=None)\n x = jnp.arange(8, dtype=jnp.float32)\n self.assertAllClose(f(x), jnp.cos(x))\n\n @jtu.with_mesh([('x', 2)])\n def testNoopPartitionSpecs(self):\n noops = [P(), P(None), P(()), P((), None), P(None, None, ())]\n x = jnp.arange(8).reshape((2, 2, 2))\n for spec in noops:\n y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)\n self.assertAllClose(y, x * 2)\n\n @jtu.with_mesh([('x', 2)])\n def testVmapModifiesAxisResources(self):\n h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)\n x = jnp.arange(4)\n y = jnp.arange(5*4).reshape((5, 4))\n jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr\n eqn = jaxpr.eqns[0]\n self.assertIs(eqn.primitive, pjit_p)\n x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])\n self.assertEqual(x_sync, SpecSync.IN_SYNC)\n self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)\n x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])\n self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)\n self.assertEqual(y_sync, SpecSync.IN_SYNC)\n self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)\n\n @jtu.with_mesh([('x', 2)])\n def testVMap(self):\n f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))\n x = jnp.arange(4)\n y = jnp.arange(5*4).reshape((5, 4))\n z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)\n self.assertAllClose(z, x + y)\n self.assertAllClose(w, x)\n self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))\n self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))\n\n @jtu.with_mesh([('x', 2)])\n def testVMapShardingConstraint(self):\n f = pjit(lambda x: with_sharding_constraint(x, P('x')),\n in_axis_resources=P(), out_axis_resources=P('x'))\n x = jnp.arange(5*4).reshape((5, 4))\n jaxpr = jax.make_jaxpr(jax.vmap(f))(x)\n pjit_eqn, = jaxpr.eqns\n constraint_eqn, = pjit_eqn.params['jaxpr'].eqns\n self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))\n self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testShardingInXMap(self):\n h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)\n f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],\n axis_resources={'i': 'y'})\n x = jnp.arange(16).reshape((4, 4))\n rule = xla._translations[pjit_p]\n test_rule_called = False\n def _test_rule(*args, **kwargs):\n nonlocal test_rule_called\n test_rule_called = True\n in_axis_resources = kwargs['in_axis_resources']\n self.assertEqual(len(in_axis_resources), 1)\n self.assertIn(('y',), in_axis_resources[0].partitions)\n return rule(*args, **kwargs)\n try:\n xla._translations[pjit_p] = _test_rule\n f(x)\n self.assertTrue(test_rule_called)\n finally:\n xla._translations[pjit_p] = rule\n\n @jtu.with_mesh([('x', 2)])\n def testLowerWithDuckTyping(self):\n x = jax.ShapeDtypeStruct((2, 2), jnp.float32)\n # Make sure this doesn't crash\n pjit(lambda x: x + 4,\n in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)\n\n @jtu.with_mesh([('x', 2)])\n def testLowerDonateArgnumsAvailable(self):\n x = jax.ShapeDtypeStruct((2, 2), jnp.float32)\n def f(*args):\n x, *_ = args\n return x\n f_low = pjit(f, donate_argnums=(0,),\n in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)\n f_com = f_low.compile()\n f_low.donate_argnums == f_com.donate_argnums == (0,)\n\n def testInfeed(self):\n devices = np.array(jax.local_devices())\n nr_devices = len(devices)\n shape = (nr_devices * 3, nr_devices * 5)\n\n def f_for_jit(x):\n token = lax.create_token(x)\n (y,), token = lax.infeed(\n token, shape=(jax.ShapedArray(x.shape, np.float32),))\n (z,), token = lax.infeed(\n token, shape=(jax.ShapedArray(x.shape, np.float32),))\n (w,), token = lax.infeed(\n token, shape=(jax.ShapedArray(x.shape, np.float32),))\n\n return x + y + z + w\n\n x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)\n y = x * 2.\n z = x * 3.\n w = x * 4.\n\n # Transfer data to infeed before executing the function. For GPUs, the\n # execution of the compiled function is blocking, so transferring data\n # to infeed before executing ensures that the execution does not deadlock\n # waiting for the infeed data.\n logging.info('Transfering to infeed for the jit call')\n d = devices[0]\n d.transfer_to_infeed((y,))\n d.transfer_to_infeed((z,))\n d.transfer_to_infeed((w,))\n\n # JIT\n logging.info('Making jit call')\n res0 = jax.jit(f_for_jit)(x)\n self.assertAllClose(res0, x + y + z + w, check_dtypes=True)\n\n # PJIT\n def f_for_pjit(x):\n token = lax.create_token(x)\n # A replicated infeed\n (y,), token = lax.infeed(\n token,\n shape=(jax.ShapedArray(x.shape, np.float32),),\n partitions=(None,))\n # An infeed sharded on first axis\n (z,), token = lax.infeed(\n token,\n shape=(jax.ShapedArray(x.shape, np.float32),),\n partitions=(P(nr_devices, 1),))\n # An infeed sharded on second axis\n (w,), token = lax.infeed(\n token,\n shape=(jax.ShapedArray(x.shape, np.float32),),\n partitions=(P(1, nr_devices),))\n return x + y + z + w\n\n logging.info('Transfering to infeed for the pjit call')\n for didx, d in enumerate(devices):\n # Transfer the whole array to all devices for replicated.\n d.transfer_to_infeed((y,))\n # For sharded infeed, transfer only the needed slices to each device.\n d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))\n d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))\n\n with mesh(devices, ['d']):\n logging.info('Making pjit call')\n res = pjit(\n f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(\n x)\n\n self.assertAllClose(res0, res, check_dtypes=True)\n\n def testOutfeed(self):\n devices = np.array(jax.local_devices())\n nr_devices = len(devices)\n shape = (nr_devices * 3, nr_devices * 5)\n\n def f(x):\n token = lax.create_token(x)\n token = lax.outfeed(token, x, partitions=(None,))\n token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))\n token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))\n return x\n\n x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)\n\n def dispatch():\n with mesh(devices, ['d']):\n logging.info('Making pjit call')\n pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)\n execution = threading.Thread(target=dispatch)\n execution.start()\n\n def check_outfeed(d, x):\n y, = d.transfer_from_outfeed(\n xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())\n self.assertAllClose(x, y, check_dtypes=True)\n\n logging.info('Transfering from outfeed for the pjit call')\n for didx, d in enumerate(devices):\n # Transfer the whole array from all devices for replicated.\n check_outfeed(d, x)\n # For sharded outfeed, the results are sliced.\n check_outfeed(d, x[3 * didx:3 * didx + 3, :])\n check_outfeed(d, x[:, 5 * didx:5 * didx + 5])\n\n execution.join()\n\n @jtu.with_mesh([('x', 2)])\n def testWithCustomPRNGKey(self):\n if not config.jax_enable_custom_prng:\n raise unittest.SkipTest(\"test requires jax_enable_custom_prng\")\n key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)\n # Make sure this doesn't crash\n pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def testLowerCompile(self):\n @partial(pjit,\n in_axis_resources=P(('x', 'y'),),\n out_axis_resources=P(('x', 'y'),))\n def f(x, y):\n return x @ y\n\n shape = (8, 8)\n x = jnp.arange(np.prod(shape)).reshape(shape)\n expected = x @ (x + 1)\n\n exe = f.lower(x, x + 1).compile()\n actual = exe(x, x + 1)\n\n splits = np.split(expected, 4)\n self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],\n check_dtypes=False)\n self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],\n check_dtypes=False)\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def testLowerCompileWithKwargs(self):\n @partial(pjit,\n in_axis_resources=P(('x', 'y'),),\n out_axis_resources=P(('x', 'y'),))\n def f(x, y, **kwargs):\n return x @ y\n\n shape = (8, 8)\n x = jnp.arange(np.prod(shape)).reshape(shape)\n exe = f.lower(x, x + 1).compile()\n\n self.assertRaisesRegex(\n NotImplementedError,\n \"function was compiled by a transformation that does not support \"\n \"keyword arguments, but called with keyword arguments: a, b\",\n lambda: exe(x, x + 1, a=1, b=2))\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def testLowerCompileInTreeMismatch(self):\n @partial(pjit,\n in_axis_resources=P(('x', 'y'),),\n out_axis_resources=P(('x', 'y'),))\n def f(x, y):\n return x @ y\n\n shape = (8, 8)\n x = jnp.arange(np.prod(shape)).reshape(shape)\n exe = f.lower(x, x + 1).compile()\n\n self.assertRaisesRegex(\n TypeError, \"function compiled for .*, called with .*\",\n lambda: exe([x], [x + 1]))\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def testLowerCompileArgTypeMismatch(self):\n @partial(pjit,\n in_axis_resources=P(('x', 'y'),),\n out_axis_resources=P(('x', 'y'),))\n def f(x, y):\n return x @ y\n\n shape = (8, 8)\n x = jnp.arange(np.prod(shape)).reshape(shape)\n x_f32 = x.astype(jnp.float32)\n x_i32 = x.astype(jnp.int32)\n exe = f.lower(x_f32, x_f32).compile()\n self.assertRaisesRegex(\n TypeError,\n \"Computation compiled for input types:\\n.*float32.*\\n\"\n \"called with:\\n.*int32.*\",\n lambda: exe(x_i32, x_i32))\n\n\nclass GDAPjitTest(jtu.JaxTestCase):\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_pjit_gda_single_output(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = P('x', 'y')\n input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return input_data[index]\n\n gda_obj = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n\n with jax._src.config.parallel_functions_output_gda(True):\n @partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))\n def f(x):\n return x @ x.T\n expected_matrix_mul = input_data @ input_data.T\n\n out = f(gda_obj)\n self.assertIsInstance(out, global_device_array.GlobalDeviceArray)\n self.assertEqual(out.shape, (8, 8))\n self.assertEqual(out.local_shards[0].data.shape, (2, 4))\n self.assertDictEqual(out._global_mesh.shape, {'x': 4, 'y': 2})\n for s in out.local_shards:\n self.assertArraysEqual(s.data, expected_matrix_mul[s.index])\n\n out2 = f(out)\n self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)\n\n with self.assertRaisesRegex(\n ValueError, ('For a non-GDA input, the corresponding resource in '\n 'in_axis_resources cannot be `pjit.FROM_GDA`.')):\n f(input_data)\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_pjit_gda_multi_input_multi_output(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return input_data[index]\n\n mesh_axes1 = P('x', 'y')\n gda1 = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes1, cb)\n mesh_axes2 = P('x')\n gda2 = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes2, cb)\n mesh_axes3 = P(('x', 'y'))\n gda3 = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes3, cb)\n mesh_axes4 = P(None)\n gda4 = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes4, cb)\n\n with jax._src.config.parallel_functions_output_gda(True):\n @partial(\n pjit,\n # `FROM_GDA` will be replicated for all the inputs.\n in_axis_resources=FROM_GDA,\n out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))\n def f(x, y, z, a):\n return x @ x.T, y, z, a\n out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)\n\n self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)\n self.assertEqual(out1.shape, (8, 8))\n self.assertEqual(out1.local_shards[0].data.shape, (2, 4))\n self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))\n self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))\n self.assertListEqual([s.replica_id for s in out1.local_shards],\n [0, 0, 0, 0, 0, 0, 0, 0])\n expected_matrix_mul = input_data @ input_data.T\n for s in out1.local_shards:\n self.assertArraysEqual(s.data, expected_matrix_mul[s.index])\n\n self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)\n self.assertEqual(out2.shape, (8, 2))\n self.assertEqual(out2.local_shards[0].data.shape, (8, 2))\n self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))\n self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))\n self.assertListEqual([s.replica_id for s in out2.local_shards],\n [0, 1, 2, 3, 4, 5, 6, 7])\n for s in out2.local_shards:\n self.assertArraysEqual(s.data, input_data)\n\n self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)\n self.assertEqual(out3.shape, (8, 2))\n self.assertEqual(out3.local_shards[0].data.shape, (2, 2))\n self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))\n self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))\n self.assertListEqual([s.replica_id for s in out3.local_shards],\n [0, 1, 0, 1, 0, 1, 0, 1])\n for s in out3.local_shards:\n self.assertArraysEqual(s.data, input_data[s.index])\n\n self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)\n self.assertEqual(out4.shape, (8, 2))\n self.assertEqual(out4.local_shards[0].data.shape, (1, 2))\n self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))\n self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))\n self.assertListEqual([s.replica_id for s in out4.local_shards],\n [0, 0, 0, 0, 0, 0, 0, 0])\n for s in out4.local_shards:\n self.assertArraysEqual(s.data, input_data[s.index])\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_pjit_gda_mixed_inputs(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = P('x', 'y')\n input_data = np.arange(\n prod(global_input_shape)).reshape(global_input_shape)\n def cb(index):\n return input_data[index]\n\n gda_obj = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n\n with jax._src.config.parallel_functions_output_gda(True):\n @partial(pjit,\n in_axis_resources=(FROM_GDA, P('x', 'y')),\n out_axis_resources=(P('x', 'y'), P(('x', 'y'))))\n def f(x, y):\n return x @ x.T, y @ y.T\n expected_matrix_mul = input_data @ input_data.T\n\n out1, out2 = f(gda_obj, input_data)\n self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)\n self.assertEqual(out1.shape, (8, 8))\n self.assertEqual(out1.local_shards[0].data.shape, (2, 4))\n self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})\n for s in out1.local_shards:\n self.assertArraysEqual(s.data, expected_matrix_mul[s.index])\n\n self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)\n self.assertEqual(out2.shape, (8, 8))\n self.assertEqual(out2.local_shards[0].data.shape, (1, 8))\n self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})\n for s in out2.local_shards:\n self.assertArraysEqual(s.data, expected_matrix_mul[s.index])\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_pjit_gda_non_gda_inputs(self):\n input_shape = (8, 2)\n input_data = np.arange(prod(input_shape)).reshape(input_shape)\n\n with jax._src.config.parallel_functions_output_gda(True):\n @partial(pjit,\n in_axis_resources=(None, P('x', 'y')),\n out_axis_resources=(P('x', 'y'), P(('x', 'y'))))\n def f(x, y):\n return x @ x.T, y @ y.T\n\n expected_matrix_mul = input_data @ input_data.T\n out1, out2 = f(input_data, input_data)\n\n self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)\n self.assertEqual(out1.shape, (8, 8))\n self.assertEqual(out1.local_shards[0].data.shape, (2, 4))\n self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})\n for s in out1.local_shards:\n self.assertArraysEqual(s.data, expected_matrix_mul[s.index])\n\n self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)\n self.assertEqual(out2.shape, (8, 8))\n self.assertEqual(out2.local_shards[0].data.shape, (1, 8))\n self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})\n for s in out2.local_shards:\n self.assertArraysEqual(s.data, expected_matrix_mul[s.index])\n\n @jtu.with_mesh([('x', 2), ('y', 2)])\n def test_pjit_gda_mesh_mismatch(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = ['x', 'y']\n global_input_data = np.arange(\n prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n\n gda_obj = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n\n with self.assertRaisesRegex(ValueError,\n \"Pjit's mesh and GDA's mesh should be equal.\"):\n @partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))\n def f(x):\n return x\n\n f(gda_obj)\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_pjit_gda_wrong_resource_for_gda_input(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = ['x']\n global_input_data = np.arange(\n prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)\n def cb(index):\n return global_input_data[index]\n\n gda_obj = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Got an input GDA to pjit with different partitioning than specified \"\n 'in the in_axis_resources argument to pjit. The partitioning must '\n 'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '\n \"Got GDA spec: PartitionSpec('x',) and \"\n \"pjit spec: PartitionSpec('x', 'y') \"\n 'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):\n @partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))\n def f(x):\n return x\n\n f(gda_obj)\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_pjit_gda_caching(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n input_shape = (8, 2)\n mesh_axes = P('x', 'y')\n input_data = np.arange(\n prod(input_shape), dtype=np.float32).reshape(input_shape)\n def cb(index):\n return input_data[index]\n\n gda_obj = global_device_array.GlobalDeviceArray.from_callback(\n input_shape, global_mesh, mesh_axes, cb)\n\n trace_counter = [0]\n @partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))\n def f(x, y):\n trace_counter[0] += 1\n return x @ y.T\n\n f(gda_obj, gda_obj)\n self.assertListEqual(trace_counter, [1])\n f(gda_obj, gda_obj)\n self.assertListEqual(trace_counter, [1])\n f(input_data, input_data)\n self.assertListEqual(trace_counter, [2])\n f(gda_obj, input_data)\n self.assertListEqual(trace_counter, [3])\n\n @jtu.with_mesh([('x', 4), ('y', 2)])\n def test_partition_spec_mismatch_semantically_equivalent(self):\n global_mesh = create_global_mesh((4, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = [None]\n global_input_data = np.arange(\n prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)\n\n def cb(index):\n return global_input_data[index]\n\n with jax._src.config.parallel_functions_output_gda(True):\n gda_obj = global_device_array.GlobalDeviceArray.from_callback(\n global_input_shape, global_mesh, mesh_axes, cb)\n\n @partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))\n def f(x):\n return x\n\n output_gda = f(gda_obj)\n # Ensure output_gda._mesh_axes = P() is matched with P(None).\n self.assertEqual(output_gda._mesh_axes, ())\n # P(None) is in_axis_resources.\n f(output_gda)\n\n def test_from_gda_duplicates(self):\n global_mesh = create_global_mesh((1, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = ['x', 'y']\n input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)\n\n # It's occasionally possible to end up with two FROM_GDA singletons (e.g. if\n # pickling in_axis_resources and sending to other processes). Make sure this\n # this doesn't cause an error to avoid user confusion.\n from_gda_dup = pjit_lib._FromGdaSingleton()\n with mesh(global_mesh.devices, global_mesh.axis_names):\n pjit(lambda x: x, in_axis_resources=from_gda_dup, out_axis_resources=None)(\n input_gda)\n\n def test_no_recompilation_due_to_in_axis_resources(self):\n global_mesh = create_global_mesh((1, 2), ('x', 'y'))\n global_input_shape = (8, 2)\n mesh_axes = P(None,)\n input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)\n\n with jax._src.config.parallel_functions_output_gda(True):\n @partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=mesh_axes)\n def f(x):\n return x\n\n with mesh(global_mesh.devices, global_mesh.axis_names):\n out_gda = f(input_gda)\n self.assertEqual(out_gda._mesh_axes, ())\n\n before_cache = pjit_lib._pjit_lower.cache_info()\n f(out_gda)\n after_cache = pjit_lib._pjit_lower.cache_info()\n\n self.assertNotEqual(id(before_cache), id(after_cache))\n self.assertEqual(before_cache.hits + 1, after_cache.hits)\n self.assertEqual(before_cache.misses, after_cache.misses)\n\n\ndef spec_regex(s):\n return str(s).replace(r\"(\", r\"\\(\").replace(r\")\", r\"\\)\")\n\nclass PJitErrorTest(jtu.JaxTestCase):\n @check_1d_2d_mesh(set_mesh=True)\n def testNonDivisibleArgs(self, mesh, resources):\n x = jnp.ones((3, 2))\n spec = P(resources, None)\n mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))\n with self.assertRaisesRegex(ValueError,\n r\"One of pjit arguments.*\" + spec_regex(spec) + r\".*\"\n r\"implies that the size of its dimension 0 should be \"\n r\"divisible by \" + mesh_size + r\", but it is equal to 3\"):\n pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)\n\n @check_1d_2d_mesh(set_mesh=True)\n def testNonDivisibleOuts(self, mesh, resources):\n x = jnp.ones((3, 2))\n spec = P(resources, None)\n mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))\n with self.assertRaisesRegex(ValueError,\n r\"One of pjit outputs.*\" + spec_regex(spec) + r\".*\"\n r\"implies that the size of its dimension 0 should be \"\n r\"divisible by \" + mesh_size + r\", but it is equal to 3\"):\n pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)\n\n @check_1d_2d_mesh(set_mesh=True)\n def testNonDivisibleConstraint(self, mesh, resources):\n x = jnp.ones((3, 2))\n spec = P(resources,)\n mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))\n with self.assertRaisesRegex(ValueError,\n r\"One of with_sharding_constraint arguments\"\n r\".*\" + spec_regex(spec) + r\".*implies that the size of \"\n r\"its dimension 0 should be divisible by \" + mesh_size +\n r\", but it is equal to 3\"):\n pjit(lambda x: with_sharding_constraint(x, spec),\n in_axis_resources=None, out_axis_resources=None)(x)\n\n @check_1d_2d_mesh(set_mesh=False)\n @jtu.with_mesh([('z', 1)])\n def testUndefinedResourcesArgs(self, mesh, resources):\n x = jnp.ones((2, 2))\n spec = P(resources,)\n with self.assertRaisesRegex(ValueError,\n r\"One of pjit arguments.*\" + spec_regex(spec) + r\", \"\n r\"but resource axis x is undefined.\"):\n pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)\n\n @check_1d_2d_mesh(set_mesh=False)\n @jtu.with_mesh([('z', 1)])\n def testUndefinedResourcesOuts(self, mesh, resources):\n x = jnp.ones((2, 2))\n spec = P(resources,)\n with self.assertRaisesRegex(ValueError,\n r\"One of pjit outputs.*\" + spec_regex(spec) + r\", \"\n r\"but resource axis x is undefined.\"):\n pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)\n\n @check_1d_2d_mesh(set_mesh=False)\n @jtu.with_mesh([('z', 1)])\n def testUndefinedResourcesConstraint(self, mesh, resources):\n x = jnp.ones((2, 2))\n spec = P(resources,)\n with self.assertRaisesRegex(ValueError,\n r\"One of with_sharding_constraint arguments\"\n r\".*\" + spec_regex(spec) + r\", but resource axis \"\n r\"x is undefined.\"):\n pjit(lambda x: with_sharding_constraint(x, spec),\n in_axis_resources=None, out_axis_resources=None)(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testRankTooLowArgs(self):\n x = jnp.arange(2)\n spec = P('x', 'y')\n error = (r\"One of pjit arguments.*\" + spec_regex(spec) + r\", which implies \"\n r\"that it has a rank of at least 2, but it is 1\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testRankTooLowOuts(self):\n x = jnp.arange(2)\n spec = P('x', 'y')\n error = (r\"One of pjit outputs.*\" + spec_regex(spec) + r\", which implies \"\n r\"that it has a rank of at least 2, but it is 0\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testRankTooLowConstraint(self):\n x = jnp.arange(2)\n spec = P('x', 'y')\n error = (r\"One of with_sharding_constraint arguments \" +\n r\"was given.*\" + spec_regex(spec) + r\", which implies \"\n r\"that it has a rank of at least 2, but it is 1\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x: with_sharding_constraint(x, spec),\n in_axis_resources=None, out_axis_resources=None)(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testRepeatedInResources(self):\n x = jnp.arange(2)\n for spec in [P('x', 'x'), P('x', ('y', 'x'))]:\n error = (r\"A single in_axis_resources specification can map every mesh \"\n r\"axis to at most one positional dimension, but \" +\n spec_regex(spec) + \" has duplicate entries for `x`\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)\n\n @jtu.with_mesh([('x', 2), ('y', 1)])\n def testRepeatedOutResources(self):\n x = jnp.arange(2)\n for spec in [P('x', 'x'), P('x', ('y', 'x'))]:\n error = (r\"A single out_axis_resources specification can map every mesh \"\n r\"axis to at most one positional dimension, but \" +\n spec_regex(spec) + \" has duplicate entries for `x`\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)\n\n @jtu.with_mesh([('x', 2)])\n def testInputShardsXMapAxis(self):\n spec = P('x')\n f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),\n in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})\n x = jnp.arange(4).reshape((2, 2))\n error = (r\"pjit input has an axis resources specification of \" +\n spec_regex(spec) + r\" that uses one or more mesh axes already used by \"\n r\"xmap to partition a named axis appearing in its named_shape \\(both \"\n r\"use mesh axes `x`\\)\")\n with self.assertRaisesRegex(JAXTypeError, error):\n f(x)\n\n @jtu.with_mesh([('x', 2)])\n def testOutputShardsXMapAxis(self):\n spec = P('x')\n f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),\n in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})\n x = jnp.arange(4).reshape((2, 2))\n error = (r\"pjit output has an axis resources specification of \" +\n spec_regex(spec) + r\" that uses one or more mesh axes already used by \"\n r\"xmap to partition a named axis appearing in its named_shape \\(both \"\n r\"use mesh axes `x`\\)\")\n with self.assertRaisesRegex(JAXTypeError, error):\n f(x)\n\n @jtu.with_mesh([('x', 2)])\n def testConstraintShardsXMapAxis(self):\n spec = P('x')\n f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),\n in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})\n x = jnp.arange(4).reshape((2, 2))\n error = (r\"with_sharding_constraint input has an axis resources specification of \" +\n spec_regex(spec) + r\" that uses one or more mesh axes already used by \"\n r\"xmap to partition a named axis appearing in its named_shape \\(both \"\n r\"use mesh axes `x`\\)\")\n with self.assertRaisesRegex(JAXTypeError, error):\n f(x)\n\n @jtu.with_mesh([('x', 2)])\n def testCatchesInnerXMapErrors(self):\n f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],\n axis_resources={'i': 'x', 'j': 'x'}),\n in_axis_resources=None, out_axis_resources=None)\n x = jnp.arange(4)\n with self.assertRaises(JAXTypeError):\n f(x, x)\n\n def testEmptyMesh(self):\n error = (r\"pjit requires a non-empty mesh! Are you sure that it's defined \"\n r\"at the call site?\")\n with self.assertRaisesRegex(RuntimeError, error):\n pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))\n\n @jtu.with_mesh([('x', 2)])\n def testAxisResourcesMismatch(self):\n x = jnp.ones([])\n p = [None, None, None]\n pjit(lambda x: x, (p,), p)([x, x, x]) # OK\n error = re.escape(\n r\"pjit in_axis_resources specification must be a tree prefix of the \"\n r\"corresponding value, got specification (None, None, None) for value \"\n r\"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are \"\n r\"non-trivial pytrees should always be wrapped in a tuple representing \"\n r\"the argument list.\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling\n # TODO(apaszke): Disable implicit list casts and enable this\n # error = re.escape(\n # r\"pjit in_axis_resources specification must be a tree prefix of the \"\n # r\"corresponding value, got specification (None, None, None) for value \"\n # r\"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that \"\n # r\"are non-trivial pytrees should always be wrapped in a tuple representing \"\n # r\"the argument list. In particular, you're passing in a single argument \"\n # r\"which means that pjit in_axis_resources might need to be wrapped in a \"\n # r\"singleton tuple.\")\n # with self.assertRaisesRegex(ValueError, error):\n # pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple\n error = re.escape(\n r\"pjit out_axis_resources specification must be a tree prefix of the \"\n r\"corresponding value, got specification [[None, None, None], None] for \"\n r\"value tree PyTreeDef([*, *, *]).\")\n with self.assertRaisesRegex(ValueError, error):\n pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message\n\n @jtu.with_mesh([('x', 2)])\n def testNestedDifferentResources(self):\n @partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)\n def f(x):\n with mesh(np.array([jax.local_devices()[0]]), ('x')):\n @partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)\n def h(x):\n return x\n return h(x)\n xshape = (2, 5, 6)\n x = jnp.arange(np.prod(xshape)).reshape(xshape)\n with self.assertRaisesRegex(RuntimeError,\n \"Changing the physical mesh is not allowed.*\"):\n f(x)\n\n\nclass UtilTest(jtu.JaxTestCase):\n\n def testOpShardingRoundTrip(self):\n FakeDevice = namedtuple('FakeDevice', ['id'])\n mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])\n mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())\n devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]\n mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))\n\n dims = 5\n aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)\n def roundtrip(spec):\n op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)\n parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions\n self.assertEqual(parsed_spec[:len(spec)], spec)\n self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))\n\n special_specs = [P()]\n for spec in special_specs:\n roundtrip(spec)\n\n rng = self.rng()\n for i in range(100):\n spec = [()] * dims\n for axis in rng.permutation(mesh_axes)[:rng.randint(low=1, high=len(mesh_axes) + 1)]:\n spec[rng.choice(dims)] += (axis,)\n roundtrip(P(*spec))\n\n @parameterized.named_parameters(\n (\"linear\", {'x': 0, 'y': 1, 'z': 2}, (('x',), ('y',), ('z',))),\n (\"combine\", {'x': 0, 'y': 0, 'z': 1}, (('x', 'y'), ('z',))),\n (\"skip\", {'x': 0, 'y': 0, 'z': 2}, (('x', 'y'), None, ('z',))),\n (\"multi_skip\", {'x': 0, 'y': 1, 'z': 3}, (('x',), ('y',), None, ('z',))),\n )\n def test_array_mapping_to_axis_resources(self, inp, expected_out):\n self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)\n\n def test_get_input_metadata_fully_replicated(self):\n global_mesh = create_global_mesh((2, 2), ('x', 'y'))\n global_in_aval1 = jax.core.ShapedArray((4, 4), jnp.int32)\n global_in_aval2 = jax.core.ShapedArray((4, 4, 4), jnp.int32)\n in_avals = [global_in_aval1, global_in_aval2]\n\n _, out_indices, _ = pxla._get_input_metadata(\n in_avals, global_mesh, [{}, {}], [False, False])\n\n self.assertLen(out_indices, len(in_avals))\n self.assertLen(out_indices[0], len(global_mesh.local_devices))\n self.assertLen(out_indices[1], len(global_mesh.local_devices))\n self.assertTrue(all(len(i) == aval.ndim\n for out, aval in safe_zip(out_indices, in_avals) for i in out))\n self.assertTrue(all(i == (slice(None),) * aval.ndim\n for out, aval in safe_zip(out_indices, in_avals) for i in out))\n\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] |
[
[
"numpy.split",
"numpy.arange",
"numpy.ones",
"numpy.prod",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.