repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
karhunenloeve/AjinReeses | [
"8f09f65b9f74442137212f98839948a842a115c1"
]
| [
"persistenceStatistics.py"
]
| [
"#!/usr/bin/env python\n\nimport numpy as np\n\nfrom multiprocessing import Pool\nfrom sklearn.neighbors import KDTree\nfrom typing import *\n\ndef hausd_interval(\n data: np.ndarray,\n confidenceLevel: float = 0.95,\n subsampleSize: int = -1,\n subsampleNumber: int = 1000,\n pairwiseDist: bool = False,\n leafSize: int = 2,\n ncores: int = 2,\n) -> float:\n \"\"\"\n **Computation of Hausdorff distance based confidence values.**\n\n Measures the confidence between two persistent features, wether they are drawn from\n a distribution fitting the underlying manifold of the data. This function is based on\n the Hausdorff distance between the points.\n\n + param **data**: a data set, type `np.ndarray`.\n + param **confidenceLevel**: confidence level, default `0.95`, type `float`.\n + param **subsampleSize**: size of each subsample, type `int`.\n + param **subsampleNumber**: number of subsamples, type `int`.\n + param **pairwiseDist**: if `true`, a symmetric `nxn`-matrix is generated out of the data, type `bool`.\n + param **leafSize**: leaf size for KDTree, type `int`.\n + param **ncores**: number of cores for parallel computing, type `int`.\n + return **confidence**: the confidence to be a persistent homology class, type `float`.\n \"\"\"\n dataSize = np.size(data, 0)\n\n if subsampleSize == -1:\n subsampleSize = int(dataSize / np.log(dataSize))\n global hausdorff_distance\n\n if pairwiseDist == False:\n\n def hausdorff_distance(subsampleSize: list) -> float:\n \"\"\"\n **Distances between the points of data and a random subsample of data of size `m`.**\n\n + param **subsampleSize**: the size of the data, type `int`.\n + return **hausdorffDistance**: Hausdorff distance, type `float`.\n \"\"\"\n I = np.random.choice(dataSize, subsampleSize)\n Icomp = [item for item in np.arange(dataSize) if item not in I]\n tree = KDTree(data[I,], leaf_size=leafSize)\n distance, ind = tree.query(data[Icomp,], k=1)\n hausdorffDistance = max(distance)\n return hausdorffDistance\n\n with Pool(ncores) as cores:\n distanceVector = cores.map(\n hausdorff_distance, [subsampleSize] * subsampleNumber\n )\n cores.close()\n\n else:\n\n def hausdorff_distance(subsampleSize: list) -> float:\n \"\"\"\n **Distances between the points of data and a random subsample of data of size `m`.**\n\n + param **subsampleSize**: the size of the data, type `int`.\n + return **hausdorffDistance**: Hausdorff distance, type `float`.\n \"\"\"\n I = np.random.choice(dataSize, subsampleSize)\n hausdorffDistance = np.max(\n [np.min(data[I, j]) for j in np.arange(dataSize) if j not in I]\n )\n return hausdorffDistance\n\n with Pool(ncores) as cores:\n distanceVector = cores.map(\n hausdorff_distance, [subsampleSize] * subsampleNumber\n )\n cores.close()\n distanceVector = [i[0] for i in distanceVector]\n\n # Quantile and confidence band.\n myquantile = np.quantile(distanceVector, confidenceLevel)\n confidence = 2 * myquantile\n\n return confidence\n\n\ndef truncated_simplex_tree(simplexTree: np.ndarray, int_trunc: int = 100) -> tuple:\n \"\"\"\n **This function return a truncated simplex tree.**\n\n A sparse representation of the persistence diagram in the form of a truncated\n persistence tree. Speeds up computation on large scale data sets.\n\n + param **simplexTree**: simplex tree, type `np.ndarray`.\n + param **int_trunc**: number of persistent interval kept per dimension, default is `100`, type `int`.\n + return **simplexTreeTruncatedPersistence**: truncated simplex tree, type `np.ndarray`.\n \"\"\"\n simplexTree.persistence()\n dimension = simplexTree.dimension()\n simplexTreeTruncatedPersistence = []\n\n for i in range(dimension):\n dPersistence = simplexTree.persistence_intervals_in_dimension(dimension)\n j = len(dPersistence)\n\n if j > int_trunc:\n dPersistenceTruncated = [dPersistence[i] for i in range(j - int_trunc, j)]\n else:\n dPersistenceTruncated = dPersistence\n simplexTreeTruncatedPersistence = simplexTreeTruncatedPersistence + [\n (i, (l[0], l[1])) for l in dPersistenceTruncated\n ]\n\n return simplexTreeTruncatedPersistence\n"
]
| [
[
"numpy.quantile",
"numpy.random.choice",
"numpy.log",
"numpy.min",
"numpy.arange",
"numpy.size",
"sklearn.neighbors.KDTree"
]
]
|
JacobARose/project-fossils | [
"a0fe21a29d21bb7a1fdb969d819ce4d4243ae8cd"
]
| [
"leavesdb/models/inception_v3.py"
]
| [
"\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom tensorflow.keras.layers import Dense, Activation\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import SGD\n\n\n\nimg_shape = (299,299)\npreprocess = preprocess_input\n\n\n\ndef build_model(num_classes, learning_rate):\n\n\tbase_model = InceptionV3(weights='imagenet', include_top=True)\n\t\n\tlogits = Dense(num_classes, name='logits')(base_model.layers[-2].output)\n\tpredictions = Activation('softmax',name='predictions')(logits)\n\tmodel = Model(inputs=base_model.input, outputs=predictions)\n\t\n\topt = SGD(lr=learning_rate)#,momentum=SGDmomentum)\n\tmodel.compile(optimizer=opt,\n\t\t\t\t\tloss='categorical_crossentropy',\n\t\t\t\t\tmetrics=['accuracy'])\n\t\t\t\t\t\n\treturn model\n\ndef train_model(model,\n\t\t\t\ttrain_data,\n\t\t\t\tvalidation_data=None, \n\t\t\t\tsteps_per_epoch=None, \n\t\t\t\tvalidation_steps=None, \n\t\t\t\tmax_epochs=None, \n\t\t\t\tcallbacks=None,\n\t\t\t\tworkers=-1,\n\t\t\t\tinitial_epoch=0,\n\t\t\t\tverbose=True):\n\t\n\thistory = model.fit(\n\t\t\t\t\t\ttrain_data,\n\t\t\t\t\t\tsteps_per_epoch=steps_per_epoch,\n\t\t\t\t\t\tepochs=max_epochs,\n\t\t\t\t\t\tvalidation_data=validation_data,\n\t\t\t\t\t\tvalidation_steps=validation_steps,\n\t\t\t\t\t\tcallbacks=callbacks,\n\t\t\t\t\t\tworkers=-1,\n\t\t\t\t\t\tinitial_epoch=initial_epoch,\n\t\t\t\t\t\tverbose=verbose)\n\treturn history"
]
| [
[
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.applications.inception_v3.InceptionV3",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense"
]
]
|
c200chromebook/numba | [
"4d43e13fdefe59e1dcc27d7bddbd32a6b9c98af7"
]
| [
"numba/cuda/api.py"
]
| [
"\"\"\"\nAPI that are reported to numba.cuda\n\"\"\"\n\n\nimport contextlib\n\nimport numpy as np\n\nfrom .cudadrv import devicearray, devices, driver\n\n\n# NDarray device helper\n\nrequire_context = devices.require_context\ncurrent_context = devices.get_context\ngpus = devices.gpus\n\n\n@require_context\ndef from_cuda_array_interface(desc, owner=None):\n \"\"\"Create a DeviceNDArray from a cuda-array-interface description.\n The *owner* is the owner of the underlying memory.\n The resulting DeviceNDArray will acquire a reference from it.\n \"\"\"\n version = desc.get('version')\n # Mask introduced in version 1\n if 1 <= version:\n mask = desc.get('mask')\n # Would ideally be better to detect if the mask is all valid\n if mask is not None:\n raise NotImplementedError('Masked arrays are not supported')\n\n shape = desc['shape']\n strides = desc.get('strides')\n dtype = np.dtype(desc['typestr'])\n\n shape, strides, dtype = _prepare_shape_strides_dtype(\n shape, strides, dtype, order='C')\n size = driver.memory_size_from_info(shape, strides, dtype.itemsize)\n\n devptr = driver.get_devptr_for_active_ctx(desc['data'][0])\n data = driver.MemoryPointer(\n current_context(), devptr, size=size, owner=owner)\n da = devicearray.DeviceNDArray(shape=shape, strides=strides,\n dtype=dtype, gpu_data=data)\n return da\n\n\ndef as_cuda_array(obj):\n \"\"\"Create a DeviceNDArray from any object that implements\n the :ref:`cuda array interface <cuda-array-interface>`.\n\n A view of the underlying GPU buffer is created. No copying of the data\n is done. The resulting DeviceNDArray will acquire a reference from `obj`.\n \"\"\"\n if not is_cuda_array(obj):\n raise TypeError(\"*obj* doesn't implement the cuda array interface.\")\n else:\n return from_cuda_array_interface(obj.__cuda_array_interface__,\n owner=obj)\n\n\ndef is_cuda_array(obj):\n \"\"\"Test if the object has defined the `__cuda_array_interface__` attribute.\n\n Does not verify the validity of the interface.\n \"\"\"\n return hasattr(obj, '__cuda_array_interface__')\n\n\n@require_context\ndef to_device(obj, stream=0, copy=True, to=None):\n \"\"\"to_device(obj, stream=0, copy=True, to=None)\n\n Allocate and transfer a numpy ndarray or structured scalar to the device.\n\n To copy host->device a numpy array::\n\n ary = np.arange(10)\n d_ary = cuda.to_device(ary)\n\n To enqueue the transfer to a stream::\n\n stream = cuda.stream()\n d_ary = cuda.to_device(ary, stream=stream)\n\n The resulting ``d_ary`` is a ``DeviceNDArray``.\n\n To copy device->host::\n\n hary = d_ary.copy_to_host()\n\n To copy device->host to an existing array::\n\n ary = np.empty(shape=d_ary.shape, dtype=d_ary.dtype)\n d_ary.copy_to_host(ary)\n\n To enqueue the transfer to a stream::\n\n hary = d_ary.copy_to_host(stream=stream)\n \"\"\"\n if to is None:\n to, new = devicearray.auto_device(obj, stream=stream, copy=copy)\n return to\n if copy:\n to.copy_to_device(obj, stream=stream)\n return to\n\n\n@require_context\ndef device_array(shape, dtype=np.float, strides=None, order='C', stream=0):\n \"\"\"device_array(shape, dtype=np.float, strides=None, order='C', stream=0)\n\n Allocate an empty device ndarray. Similar to :meth:`numpy.empty`.\n \"\"\"\n shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,\n order)\n return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype,\n stream=stream)\n\n\n@require_context\ndef managed_array(shape, dtype=np.float, strides=None, order='C', stream=0,\n attach_global=True):\n \"\"\"managed_array(shape, dtype=np.float, strides=None, order='C', stream=0,\n attach_global=True)\n\n Allocate a np.ndarray with a buffer that is managed.\n Similar to np.empty().\n\n :param attach_global: A flag indicating whether to attach globally. Global\n attachment implies that the memory is accessible from\n any stream on any device. If ``False``, attachment is\n *host*, and memory is only accessible by devices\n with Compute Capability 6.0 and later.\n \"\"\"\n shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,\n order)\n bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)\n buffer = current_context().memallocmanaged(bytesize,\n attach_global=attach_global)\n npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,\n buffer=buffer)\n managedview = np.ndarray.view(npary, type=devicearray.ManagedNDArray)\n managedview.device_setup(buffer, strides, stream=stream)\n return managedview\n\n\n@require_context\ndef pinned_array(shape, dtype=np.float, strides=None, order='C'):\n \"\"\"pinned_array(shape, dtype=np.float, strides=None, order='C')\n\n Allocate an :class:`ndarray <numpy.ndarray>` with a buffer that is pinned\n (pagelocked). Similar to :func:`np.empty() <numpy.empty>`.\n \"\"\"\n shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,\n order)\n bytesize = driver.memory_size_from_info(shape, strides,\n dtype.itemsize)\n buffer = current_context().memhostalloc(bytesize)\n return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,\n buffer=buffer)\n\n\n@require_context\ndef mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0,\n portable=False, wc=False):\n \"\"\"mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0,\n portable=False, wc=False)\n\n Allocate a mapped ndarray with a buffer that is pinned and mapped on\n to the device. Similar to np.empty()\n\n :param portable: a boolean flag to allow the allocated device memory to be\n usable in multiple devices.\n :param wc: a boolean flag to enable writecombined allocation which is faster\n to write by the host and to read by the device, but slower to\n write by the host and slower to write by the device.\n \"\"\"\n shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,\n order)\n bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)\n buffer = current_context().memhostalloc(bytesize, mapped=True)\n npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,\n buffer=buffer)\n mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)\n mappedview.device_setup(buffer, strides, stream=stream)\n return mappedview\n\n\[email protected]\n@require_context\ndef open_ipc_array(handle, shape, dtype, strides=None, offset=0):\n \"\"\"\n A context manager that opens a IPC *handle* (*CUipcMemHandle*) that is\n represented as a sequence of bytes (e.g. *bytes*, tuple of int)\n and represent it as an array of the given *shape*, *strides* and *dtype*.\n The *strides* can be omitted. In that case, it is assumed to be a 1D\n C contiguous array.\n\n Yields a device array.\n\n The IPC handle is closed automatically when context manager exits.\n \"\"\"\n dtype = np.dtype(dtype)\n # compute size\n size = np.prod(shape) * dtype.itemsize\n # manually recreate the IPC mem handle\n handle = driver.drvapi.cu_ipc_mem_handle(*handle)\n # use *IpcHandle* to open the IPC memory\n ipchandle = driver.IpcHandle(None, handle, size, offset=offset)\n yield ipchandle.open_array(current_context(), shape=shape,\n strides=strides, dtype=dtype)\n ipchandle.close()\n\n\ndef synchronize():\n \"Synchronize the current context.\"\n return current_context().synchronize()\n\n\ndef _prepare_shape_strides_dtype(shape, strides, dtype, order):\n dtype = np.dtype(dtype)\n if isinstance(shape, int):\n shape = (shape,)\n if isinstance(strides, int):\n strides = (strides,)\n else:\n if shape == ():\n shape = (1,)\n strides = strides or _fill_stride_by_order(shape, dtype, order)\n return shape, strides, dtype\n\n\ndef _fill_stride_by_order(shape, dtype, order):\n nd = len(shape)\n strides = [0] * nd\n if order == 'C':\n strides[-1] = dtype.itemsize\n for d in reversed(range(nd - 1)):\n strides[d] = strides[d + 1] * shape[d + 1]\n elif order == 'F':\n strides[0] = dtype.itemsize\n for d in range(1, nd):\n strides[d] = strides[d - 1] * shape[d - 1]\n else:\n raise ValueError('must be either C/F order')\n return tuple(strides)\n\n\ndef _contiguous_strides_like_array(ary):\n \"\"\"\n Given an array, compute strides for a new contiguous array of the same\n shape.\n \"\"\"\n # Don't recompute strides if the default strides will be sufficient to\n # create a contiguous array.\n if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:\n return None\n\n # Otherwise, we need to compute new strides using an algorithm adapted from\n # NumPy v1.17.4's PyArray_NewLikeArrayWithShape in\n # core/src/multiarray/ctors.c. We permute the strides in ascending order\n # then compute the stride for the dimensions with the same permutation.\n\n # Stride permutation. E.g. a stride array (4, -2, 12) becomes\n # [(1, -2), (0, 4), (2, 12)]\n strideperm = [ x for x in enumerate(ary.strides) ]\n strideperm.sort(key=lambda x: x[1])\n\n # Compute new strides using permutation\n strides = [0] * len(ary.strides)\n stride = ary.dtype.itemsize\n for i_perm, _ in strideperm:\n strides[i_perm] = stride\n stride *= ary.shape[i_perm]\n return tuple(strides)\n\n\ndef _order_like_array(ary):\n if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:\n return 'F'\n else:\n return 'C'\n\n\ndef device_array_like(ary, stream=0):\n \"\"\"\n Call :func:`device_array() <numba.cuda.device_array>` with information from\n the array.\n \"\"\"\n strides = _contiguous_strides_like_array(ary)\n order = _order_like_array(ary)\n return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,\n order=order, stream=stream)\n\n\ndef mapped_array_like(ary, stream=0, portable=False, wc=False):\n \"\"\"\n Call :func:`mapped_array() <numba.cuda.mapped_array>` with the information\n from the array.\n \"\"\"\n strides = _contiguous_strides_like_array(ary)\n order = _order_like_array(ary)\n return mapped_array(shape=ary.shape, dtype=ary.dtype, strides=strides,\n order=order, stream=stream, portable=portable, wc=wc)\n\n\ndef pinned_array_like(ary):\n \"\"\"\n Call :func:`pinned_array() <numba.cuda.pinned_array>` with the information\n from the array.\n \"\"\"\n strides = _contiguous_strides_like_array(ary)\n order = _order_like_array(ary)\n return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,\n order=order)\n\n\n# Stream helper\n@require_context\ndef stream():\n \"\"\"\n Create a CUDA stream that represents a command queue for the device.\n \"\"\"\n return current_context().create_stream()\n\n\n@require_context\ndef default_stream():\n \"\"\"\n Get the default CUDA stream. CUDA semantics in general are that the default\n stream is either the legacy default stream or the per-thread default stream\n depending on which CUDA APIs are in use. In Numba, the APIs for the legacy\n default stream are always the ones in use, but an option to use APIs for\n the per-thread default stream may be provided in future.\n \"\"\"\n return current_context().get_default_stream()\n\n\n@require_context\ndef legacy_default_stream():\n \"\"\"\n Get the legacy default CUDA stream.\n \"\"\"\n return current_context().get_legacy_default_stream()\n\n\n@require_context\ndef per_thread_default_stream():\n \"\"\"\n Get the per-thread default CUDA stream.\n \"\"\"\n return current_context().get_per_thread_default_stream()\n\n\n@require_context\ndef external_stream(ptr):\n \"\"\"Create a Numba stream object for a stream allocated outside Numba.\n\n :param ptr: Pointer to the external stream to wrap in a Numba Stream\n :type ptr: int\n \"\"\"\n return current_context().create_external_stream(ptr)\n\n\n# Page lock\n@require_context\[email protected]\ndef pinned(*arylist):\n \"\"\"A context manager for temporary pinning a sequence of host ndarrays.\n \"\"\"\n pmlist = []\n for ary in arylist:\n pm = current_context().mempin(ary, driver.host_pointer(ary),\n driver.host_memory_size(ary),\n mapped=False)\n pmlist.append(pm)\n yield\n\n\n@require_context\[email protected]\ndef mapped(*arylist, **kws):\n \"\"\"A context manager for temporarily mapping a sequence of host ndarrays.\n \"\"\"\n assert not kws or 'stream' in kws, \"Only accept 'stream' as keyword.\"\n stream = kws.get('stream', 0)\n pmlist = []\n devarylist = []\n for ary in arylist:\n pm = current_context().mempin(ary, driver.host_pointer(ary),\n driver.host_memory_size(ary),\n mapped=True)\n pmlist.append(pm)\n devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream)\n devarylist.append(devary)\n try:\n if len(devarylist) == 1:\n yield devarylist[0]\n else:\n yield devarylist\n finally:\n # When exiting from `with cuda.mapped(*arrs) as mapped_arrs:`, the name\n # `mapped_arrs` stays in scope, blocking automatic unmapping based on\n # reference count. We therefore invoke the finalizer manually.\n for pm in pmlist:\n pm.free()\n\n\ndef event(timing=True):\n \"\"\"\n Create a CUDA event. Timing data is only recorded by the event if it is\n created with ``timing=True``.\n \"\"\"\n evt = current_context().create_event(timing=timing)\n return evt\n\n\nevent_elapsed_time = driver.event_elapsed_time\n\n\n# Device selection\n\ndef select_device(device_id):\n \"\"\"\n Make the context associated with device *device_id* the current context.\n\n Returns a Device instance.\n\n Raises exception on error.\n \"\"\"\n context = devices.get_context(device_id)\n return context.device\n\n\ndef get_current_device():\n \"Get current device associated with the current thread\"\n return current_context().device\n\n\ndef list_devices():\n \"Return a list of all detected devices\"\n return devices.gpus\n\n\ndef close():\n \"\"\"\n Explicitly clears all contexts in the current thread, and destroys all\n contexts if the current thread is the main thread.\n \"\"\"\n devices.reset()\n\n\ndef _auto_device(ary, stream=0, copy=True):\n return devicearray.auto_device(ary, stream=stream, copy=copy)\n\n\ndef detect():\n \"\"\"\n Detect supported CUDA hardware and print a summary of the detected hardware.\n\n Returns a boolean indicating whether any supported devices were detected.\n \"\"\"\n devlist = list_devices()\n print('Found %d CUDA devices' % len(devlist))\n supported_count = 0\n for dev in devlist:\n attrs = []\n cc = dev.compute_capability\n attrs += [('compute capability', '%d.%d' % cc)]\n attrs += [('pci device id', dev.PCI_DEVICE_ID)]\n attrs += [('pci bus id', dev.PCI_BUS_ID)]\n if cc < (2, 0):\n support = '[NOT SUPPORTED: CC < 2.0]'\n else:\n support = '[SUPPORTED]'\n supported_count += 1\n\n print('id %d %20s %40s' % (dev.id, dev.name, support))\n for key, val in attrs:\n print('%40s: %s' % (key, val))\n\n print('Summary:')\n print('\\t%d/%d devices are supported' % (supported_count, len(devlist)))\n return supported_count > 0\n\n\[email protected]\ndef defer_cleanup():\n \"\"\"\n Temporarily disable memory deallocation.\n Use this to prevent resource deallocation breaking asynchronous execution.\n\n For example::\n\n with defer_cleanup():\n # all cleanup is deferred in here\n do_speed_critical_code()\n # cleanup can occur here\n\n Note: this context manager can be nested.\n \"\"\"\n with current_context().defer_cleanup():\n yield\n\n\nprofiling = require_context(driver.profiling)\nprofile_start = require_context(driver.profile_start)\nprofile_stop = require_context(driver.profile_stop)\n"
]
| [
[
"numpy.ndarray.view",
"numpy.ndarray",
"numpy.prod",
"numpy.dtype"
]
]
|
zhaoliuUT/tuning | [
"1f1a9b1d66b994196de076f08f74a48c60dd7141"
]
| [
"src/tuning/functions_for_analysis.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\n\n#========= Generate hierachical binary tuning curves =========\ndef gen_binary_hierachical_curves(numNeuro, fp = 1, fm = 0):\n x_ = np.array([0, 1]).reshape((1, 2))\n curr_dim = 1\n\n while curr_dim < numNeuro:\n if len(x_.shape) ==1:\n old_dim = 1\n old_len = len(x_)\n else:\n old_dim, old_len = x_.shape\n\n curr_dim = old_dim+1\n curr_len = 2*old_len#x_.shape[1]\n y = np.zeros((curr_dim, curr_len)).astype(int)\n y[0, :old_len] = 0\n y[0, old_len:] = 1\n y[1:, :old_len]= x_.copy()\n y[1:, old_len:]= np.flip(x_, axis = 1)\n x_ = y.copy()\n if fp != 1 or fm != 0:\n xnew = np.zeros_like(x_).astype(float)\n xnew[x_ == 0] = fm\n xnew[x_ == 1] = fp\n return xnew\n else:\n return x_\n\n## example:\n# tc = gen_binary_hierachical_curves(5, fp = 1, fm = 0.01)\n# from tuning.anim_3dcube import plot_funcs_in_figure\n# fig = plt.figure()\n# _ = plot_funcs_in_figure(fig, tc, np.ones(tc.shape[1]), nrow=5, ncol=1)\n\ndef compute_period(yy, noise_tol = 0.15, period_tol = 0.6):\n yy_diff = np.diff(np.concatenate((yy, [yy[0]])))\n # find the positions where yy_diff changes sign\n sign_diff = 1.0*(yy_diff > 0) - 1.0*(yy_diff < 0)\n sign_change = 1 + np.where(np.diff(np.concatenate((sign_diff, [sign_diff[0]])))!=0)[0]\n sign_change = list(sign_change)\n if len(yy) in sign_change:\n sign_change.remove(len(yy))\n sign_change.append(0)\n \n# plt.figure()\n# plt.plot(sign_diff)\n# plt.plot(yy)\n# print(sign_change)\n\n func_variations = []\n func_variations_sign = []\n func_variations_index = []\n for i, idx in enumerate(sign_change):\n if i != len(sign_change) - 1:\n next_idx = sign_change[i+1]\n else:\n next_idx = sign_change[0]\n# print(next_idx, np.arange(idx, next_idx+1), yy[idx:next_idx+1], yy[next_idx] - yy[idx])\n# print(next_idx, yy[next_idx] - yy[idx])\n curr_variation = yy[next_idx] - yy[idx]\n if i ==0 or (np.fabs(curr_variation) > noise_tol and curr_variation*func_variations_sign[-1] <= 0):\n # includes the case when sign=0 (can happen at i=0)\n func_variations.append(curr_variation)\n func_variations_sign.append(np.sign(curr_variation))\n func_variations_index.append(next_idx)\n else:\n func_variations[-1] += curr_variation\n func_variations_index[-1] = next_idx\n# print(func_variations)\n# print(func_variations_sign)\n func_variations = np.array(func_variations)\n func_variations_index = [func_variations_index[-1]]+func_variations_index[0:-1]\n #print(func_variations)\n #print(func_variations_sign)\n #print(func_variations_index)\n \n# for k in range(len(func_variations_index)):\n# print(all_index[k], all_index[(k-1)%len(all_index)])\n# curr_index = func_variations_index[k]\n# prev_index = func_variations_index[(k-1)%len(func_variations_index)]\n# print(yy[curr_index] - yy[prev_index])\n # should be the same as func_variations\n increase_num = np.sum(func_variations > period_tol) # 0.6\n decrease_num = np.sum(func_variations < -period_tol)\n if increase_num == decrease_num:\n return increase_num\n else:\n print('different number of increasing intervals and decreasing intervals: %d and %d!'%(increase_num, decrease_num))\n return max(increase_num, decrease_num)\n\n\n\n\ndef find_unique_points(points_data, tol = 1e-3, return_index = False):\n # points_data: (numDimension, numPoints)\n # each column is a point\n # return value has dimension (numDimension, smaller number of points)\n # for the return index: points_data[:, returnedindex] == result.\n point_dim, point_num = points_data.shape\n\n if point_dim == 1:\n points_data = points_data.reshape(-1)\n ind = np.argsort(points_data)\n xx = points_data[ind]\n xxdiff = np.append(1, np.diff(xx))\n result = xx[xxdiff > tol]\n result = result.reshape(1, len(result))\n if return_index:\n return result, ind[xxdiff>tol]\n else:\n return result\n\n xx = points_data.T\n sort_keys = (xx[:,0], xx[:,1])\n for k in range(2, point_dim):\n sort_keys = (*sort_keys, xx[:,k])\n ind = np.lexsort(sort_keys) # sort using multiple keys\n xx = xx[ind, :]\n xxdiff = np.diff(xx, axis = 0)\n errors = np.append(1, np.sum(xxdiff**2, axis = 1))\n result = xx[errors>tol, :].T\n if return_index:\n return result, ind[errors > tol]\n else:\n return result\n\ndef find_unique_points_weights(points_data, points_weights=None, tol = 1e-3, return_index = False):\n # points_data: (numDimension, numPoints)\n # each column is a point\n # return value has dimension (numDimension, smaller number of points)\n # for the return index: points_data[:, returnedindex] == result.\n # also sum up the weights according to the unique indices\n point_dim, point_num = points_data.shape\n\n if point_dim == 1:\n points_data = points_data.reshape(-1)\n ind = np.argsort(points_data)\n xx = points_data[ind]\n errors = np.append(1, np.diff(xx))\n result = xx[errors > tol]\n result = result.reshape(1, len(result))\n else:\n xx = points_data.T\n sort_keys = (xx[:,0], xx[:,1])\n for k in range(2, point_dim):\n sort_keys = (*sort_keys, xx[:,k])\n ind = np.lexsort(sort_keys) # sort using multiple keys\n xx = xx[ind, :]\n xxdiff = np.diff(xx, axis = 0)\n errors = np.append(1, np.sum(xxdiff**2, axis = 1))\n result = xx[errors>tol, :].T\n\n if points_weights is not None:\n # sum up the weights according to the unique indices\n newweights = np.zeros(result.shape[1])\n\n errors_ind = np.where(errors > tol)[0]\n for j, start_idx in enumerate(errors_ind[:-1]):\n #start_idx = errors_ind[j]\n end_idx = errors_ind[j+1]\n newweights[j] = np.sum(points_weights[ind[start_idx:end_idx]])\n newweights[-1] = np.sum(points_weights[ind[errors_ind[-1]:]])\n # return results\n if points_weights is None:\n if return_index:\n return result, ind[errors > tol]\n else:\n return result\n else:\n if return_index:\n return result, newweights, ind[errors > tol]\n else:\n return result, newweights\n \ndef compute_bump_widths(one_dim_tc, weights, fm = None):\n # compute widths of bumps (which are continuous parts != fm)\n # one_dim_tc: (numBin, ) numpy array, same shape as weights\n # circulated.\n if fm is None:\n fm = np.min(one_dim_tc)\n nBin = len(one_dim_tc)\n fmindices = np.where(one_dim_tc==fm)[0]\n diff_fmindices = np.diff(list(fmindices)+[fmindices[0]+100]) # diff_fmindices[i]=fmindices[i+1]-fmindices[i]\n diff_fmindices2 = np.roll(diff_fmindices, 1) #diff_fmindices2[i] = fmindices[i] - fmindices[i-1]\n \n bump_start_indices = fmindices[diff_fmindices>1] \n bump_end_indices = fmindices[diff_fmindices2>1]\n if diff_fmindices[-1]!=1: #one_dim_tc[-1]!=fm or one_dim_tc[0]!=fm\n bump_start_indices = np.roll(bump_start_indices, 1)\n \n bump_widths_with_weights = np.zeros(len(bump_start_indices))\n for k in range(len(bump_start_indices)):\n j1 = bump_start_indices[k]\n j2 = bump_end_indices[k]\n if(j1 > j2):\n bump_widths_with_weights[k] = np.sum(ww[j1+1:]) + np.sum(ww[0:j2])\n else:\n bump_widths_with_weights[k] = np.sum(ww[j1+1:j2]) # sum from ww[j1+1] to ww[j2-1]\n return bump_widths_with_weights\n "
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros_like",
"numpy.lexsort",
"numpy.zeros",
"numpy.sum",
"numpy.roll",
"numpy.min",
"numpy.diff",
"numpy.where",
"numpy.sign",
"numpy.fabs",
"numpy.argsort",
"numpy.flip"
]
]
|
georgi/MusicTransformer-pytorch | [
"fe1af43fa5f38703c41b2887673eadbfb2db761a"
]
| [
"data.py"
]
| [
"import random\nimport torch\nimport os\nfrom random import randrange, uniform\nfrom torch.utils.data import DataLoader\nfrom note_seq.sequences_lib import (\n stretch_note_sequence,\n transpose_note_sequence,\n NegativeTimeError\n)\nfrom note_seq.protobuf import music_pb2\nfrom utils import find_files_by_extensions\nfrom concurrent.futures import ProcessPoolExecutor\nfrom tqdm import tqdm\n\n\ndef process_midi(seq, max_seq, token_pad):\n if len(seq) <= max_seq:\n x = torch.full((max_seq, ), token_pad, dtype=torch.long)\n tgt = torch.full((max_seq, ), token_pad, dtype=torch.long)\n x[:len(seq)] = seq\n tgt[:len(seq) - 1] = seq[1:]\n else:\n try:\n start = random.randint(0, len(seq) - max_seq - 1)\n except ValueError:\n start = 0\n end = start + max_seq + 1\n data = seq[start:end]\n x = data[:max_seq]\n tgt = data[1:max_seq + 1]\n return x, tgt\n\n\ndef train_test_split(dataset, split=0.90):\n train = list()\n train_size = split * len(dataset)\n dataset_copy = list(dataset)\n while len(train) < train_size:\n index = randrange(len(dataset_copy))\n train.append(dataset_copy.pop(index))\n return train, dataset_copy\n\n\ndef save_sequence(ns, path):\n with open(path, 'wb') as f:\n f.write(ns.SerializeToString())\n\n\ndef load_sequence(fname):\n with open(fname, 'rb') as f:\n ns = music_pb2.NoteSequence()\n ns.ParseFromString(f.read())\n return ns\n\n\ndef convert_midi_to_proto(midi_encoder, src, dest_dir):\n res = []\n for i, ns in enumerate(midi_encoder.load_midi(src)):\n fname = os.path.join(dest_dir, os.path.basename(src) + f'-{i}.pb')\n save_sequence(ns, fname)\n res.append(fname)\n return res\n\n\ndef convert_midi_to_proto_folder(midi_encoder, src_dir, dest_dir, max_workers=10):\n files = list(find_files_by_extensions(src_dir, ['.mid', '.midi']))\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n res = []\n futures = [\n executor.submit(\n convert_midi_to_proto, midi_encoder, f, dest_dir)\n for f in files\n ]\n for future in tqdm(futures):\n res.extend(future.result())\n\n\ndef data_loaders(\n midi_encoder,\n data_dir,\n batch_size,\n max_seq,\n time_augment,\n transpose_augment,\n num_workers=8\n):\n data_files = list(find_files_by_extensions(data_dir, ['.pb']))\n train_files, valid_files = train_test_split(data_files)\n\n train_data = SequenceDataset(\n sequences=train_files,\n seq_length=max_seq,\n midi_encoder=midi_encoder,\n time_augment=time_augment,\n transpose_augment=transpose_augment\n )\n valid_data = SequenceDataset(\n sequences=valid_files,\n seq_length=max_seq,\n midi_encoder=midi_encoder,\n time_augment=0,\n transpose_augment=0\n )\n\n train_loader = DataLoader(train_data, batch_size, num_workers=num_workers)\n valid_loader = DataLoader(valid_data, batch_size, num_workers=num_workers)\n\n return train_loader, valid_loader\n\n\nclass SequenceDataset(torch.utils.data.Dataset):\n def __init__(\n self,\n sequences,\n seq_length,\n midi_encoder,\n time_augment,\n transpose_augment\n ):\n self.sequences = sequences\n self.seq_length = seq_length\n self.midi_encoder = midi_encoder\n self.time_augment = time_augment\n self.transpose_augment = transpose_augment\n\n def __len__(self):\n return len(self.sequences)\n\n def augment(self, ns):\n if self.transpose_augment > 0:\n transpose = randrange(-self.transpose_augment,\n self.transpose_augment)\n ns = transpose_note_sequence(ns, transpose)[0]\n if self.time_augment > 0:\n try:\n stretch_factor = uniform(\n 1.0 - self.time_augment,\n 1.0 + self.time_augment\n )\n ns = stretch_note_sequence(ns, stretch_factor)\n except NegativeTimeError:\n pass\n return ns\n\n def encode(self, ns):\n return self.midi_encoder.encode_note_sequence(ns)\n\n def __getitem__(self, idx):\n return self._get_seq(self.sequences[idx])\n\n def _get_seq(self, file):\n ns = load_sequence(file)\n data = torch.tensor(self.encode(self.augment(ns)))\n data = process_midi(data, self.seq_length, 0)\n return data\n"
]
| [
[
"torch.full",
"torch.utils.data.DataLoader"
]
]
|
mediacatch/semantic-segmentation | [
"a4131f3b56bfbb0b02f50bcbd73aebb6f6b372e9"
]
| [
"semseg/datasets/stadiums.py"
]
| [
"import torch \nfrom torch import Tensor\nfrom torch.utils.data import Dataset\nfrom torchvision import io\nfrom pathlib import Path\nfrom typing import Tuple\n\n\nclass Stadiums(Dataset):\n CLASSES = ['background', 'tribune', 'grass', 'floor']\n\n PALETTE = torch.tensor([[0, 0, 0], [250, 50, 83], [102, 255, 102], [250, 250, 55]])\n\n def __init__(self, root: str, split: str = 'train', transform = None) -> None:\n super().__init__()\n assert split in ['train', 'val']\n self.transform = transform\n self.n_classes = len(self.CLASSES)\n self.ignore_label = -1\n\n img_path = Path(root) / 'images' / split \n self.files = list(img_path.glob('*.jpg'))\n \n if not self.files:\n raise Exception(f\"No images found in {img_path}\")\n print(f\"Found {len(self.files)} {split} images.\")\n\n def __len__(self) -> int:\n return len(self.files)\n\n def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:\n img_path = str(self.files[index])\n lbl_path = str(self.files[index]).replace('images', 'labels').replace('.jpg', '.png')\n\n image = io.read_image(img_path)\n label = io.read_image(lbl_path)\n \n if self.transform:\n image, label = self.transform(image, label)\n return image, self.encode(label).long()\n\n\n def encode(self, label: Tensor) -> Tensor:\n label = label.permute(1, 2, 0)\n mask = torch.zeros(label.shape[:-1])\n\n for index, color in enumerate(self.PALETTE):\n bool_mask = torch.eq(label, color)\n class_map = torch.all(bool_mask, dim=-1)\n mask[class_map] = index\n return mask\n\n# if __name__ == '__main__':\n# from semseg.utils.visualize import visualize_dataset_sample\n# visualize_dataset_sample(Stadiums, '/home/sithu/datasets/ADEChallenge/ADEChallengeData2016')"
]
| [
[
"torch.zeros",
"torch.all",
"torch.tensor",
"torch.eq"
]
]
|
ismarou/vtkplotter-examples | [
"1eefcc026be169ab7a77a5bce6dec8044c33b554"
]
| [
"vtkplotter_examples/other/dolfin/stokes.py"
]
| [
"\"\"\"\nThis demo solves the Stokes equations, using quadratic elements for\nthe velocity and first degree elements for the pressure (Taylor-Hood elements).\n\"\"\"\n# Credits:\n# https://github.com/pf4d/fenics_scripts/blob/master/cbc_block/stokes.py\nfrom dolfin import *\nimport numpy as np\nfrom vtkplotter.dolfin import plot, datadir, Latex\n\n# Load mesh and subdomains\nmesh = Mesh(datadir+\"dolfin_fine.xml\")\nsub_domains = MeshFunction(\"size_t\", mesh,\n datadir+\"dolfin_fine_subdomains.xml.gz\")\n\n# Define function spaces\nP2 = VectorElement(\"Lagrange\", mesh.ufl_cell(), 2)\nP1 = FiniteElement(\"Lagrange\", mesh.ufl_cell(), 1)\nTH = P2 * P1\nW = FunctionSpace(mesh, TH)\n\n# No-slip boundary condition for velocity\nnoslip = Constant((0, 0))\nbc0 = DirichletBC(W.sub(0), noslip, sub_domains, 0)\n\n# Inflow boundary condition for velocity\ninflow = Expression((\"-sin(x[1]*pi)\", \"0.0\"), degree=2)\nbc1 = DirichletBC(W.sub(0), inflow, sub_domains, 1)\nbcs = [bc0, bc1]\n\n# Define variational problem\n(u, p) = TrialFunctions(W)\n(v, q) = TestFunctions(W)\nf = Constant((0, 0))\na = (inner(grad(u), grad(v)) - div(v)*p + q*div(u))*dx\nL = inner(f, v)*dx\nw = Function(W)\n\nsolve(a == L, w, bcs)\n\n# Split the mixed solution using a shallow copy\n(u, p) = w.split()\n\n##################################################################### vtkplotter\nf = r'-\\nabla \\cdot(\\nabla u+p I)=f ~\\mathrm{in}~\\Omega'\nformula = Latex(f, pos=(0.55,0.45,-.05), s=0.1)\n\nplot(u, formula, at=0, N=2,\n mode='mesh and arrows', scale=.03,\n wireframe=True, scalarbar=False, style=1)\nplot(p, at=1, text=\"pressure\", cmap='rainbow', interactive=False)\n\n\n##################################################################### streamlines\n# A list of seed points (can be automatic: just comment out 'probes')\nally = np.linspace(0,1, num=100)\nprobes = np.c_[np.ones_like(ally), ally, np.zeros_like(ally)]\n\nplot(u, \n mode='mesh with streamlines',\n streamlines={'tol':0.02, # control density of streams\n 'lw':2, # line width \n 'direction':'forward', # direction of integration\n 'maxPropagation':1.2, # max length of propagation\n 'probes':probes, # custom list of point in space as seeds\n },\n c='white', # mesh color\n alpha=0.3, # mesh alpha\n lw=0, # mesh line width\n wireframe=True, # show as wireframe\n bg='blackboard', # background color\n newPlotter=True, # new window\n pos=(200,200), # window position on screen\n )\n"
]
| [
[
"numpy.zeros_like",
"numpy.linspace",
"numpy.ones_like"
]
]
|
tareqdandachi/qiskit-terra | [
"5221fe330adba5529bfa22dc25262ac8e6291aaf"
]
| [
"test/python/quantum_info/test_quaternions.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Tests quaternion conversion\"\"\"\n\nimport math\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport scipy.linalg as la\n\nfrom qiskit.quantum_info.operators.quaternion import \\\n quaternion_from_euler, Quaternion, quaternion_from_axis_rotation\n\nfrom qiskit.test import QiskitTestCase\n\n\nclass TestQuaternions(QiskitTestCase):\n \"\"\"Tests qiskit.quantum_info.operators.quaternion\"\"\"\n\n def setUp(self):\n self.rnd_array = np.array([0.5, 0.8, 0.9, -0.3])\n self.quat_unnormalized = Quaternion(self.rnd_array)\n axes = ['x', 'y', 'z']\n rnd = np.array([-0.92545003, -2.19985357, 6.01761209])\n idx = np.array([0, 2, 1])\n self.mat1 = rotation_matrix(rnd[0], axes[idx[0]]).dot(\n rotation_matrix(rnd[1], axes[idx[1]]).dot(\n rotation_matrix(rnd[2], axes[idx[2]])))\n axes_str = ''.join(axes[i] for i in idx)\n quat = quaternion_from_euler(rnd, axes_str)\n self.mat2 = quat.to_matrix()\n\n def test_str(self):\n \"\"\"Quaternion should have a correct string representation.\"\"\"\n self.assertEqual(self.quat_unnormalized.__str__(), self.rnd_array.__str__())\n\n def test_repr(self):\n \"\"\"Quaternion should have a correct string representation.\"\"\"\n self.assertEqual(self.quat_unnormalized.__repr__(), self.rnd_array.__str__())\n\n def test_norm(self):\n \"\"\"Quaternions should give correct norm.\"\"\"\n norm = la.norm(self.rnd_array)\n self.assertEqual(norm, self.quat_unnormalized.norm())\n\n def test_normalize(self):\n \"\"\"Quaternions should be normalizable\"\"\"\n self.assertAlmostEqual(self.quat_unnormalized.normalize().norm(), 1, places=5)\n\n def test_random_euler(self):\n \"\"\"Quaternion from Euler rotations.\"\"\"\n assert_allclose(self.mat1, self.mat2)\n\n def test_orthogonality(self):\n \"\"\"Quaternion rotation matrix orthogonality\"\"\"\n assert_allclose(self.mat2.dot(self.mat2.T), np.identity(3, dtype=float), atol=1e-8)\n\n def test_det(self):\n \"\"\"Quaternion det = 1\"\"\"\n assert_allclose(la.det(self.mat2), 1)\n\n def test_equiv_quaternions(self):\n \"\"\"Different Euler rotations give same quaternion, up to sign.\"\"\"\n # Check if euler angles from to_zyz return same quaternion\n # up to a sign (2pi rotation)\n rot = ['xyz', 'xyx', 'xzy', 'xzx', 'yzx', 'yzy', 'yxz', 'yxy', 'zxy', 'zxz', 'zyx', 'zyz']\n for value in rot:\n rnd = np.array([-1.57657536, 5.66384302, 2.91532185])\n quat1 = quaternion_from_euler(rnd, value)\n euler = quat1.to_zyz()\n quat2 = quaternion_from_euler(euler, 'zyz')\n assert_allclose(abs(quat1.data.dot(quat2.data)), 1)\n\n def test_mul_by_quat(self):\n \"\"\"Quaternions should multiply correctly.\"\"\"\n # multiplication of quaternions is equivalent to the\n # multiplication of corresponding rotation matrices.\n other_quat = Quaternion(np.array([0.4, 0.2, -0.7, 0.8]))\n other_mat = other_quat.to_matrix()\n product_quat = self.quat_unnormalized * other_quat\n product_mat = (self.quat_unnormalized.to_matrix()).dot(other_mat)\n assert_allclose(product_quat.to_matrix(), product_mat)\n\n def test_mul_by_array(self):\n \"\"\"Quaternions cannot be multiplied with an array.\"\"\"\n other_array = np.array([0.1, 0.2, 0.3, 0.4])\n self.assertRaises(Exception, self.quat_unnormalized.__mul__, other_array)\n\n def test_mul_by_scalar(self):\n \"\"\"Quaternions cannot be multiplied with a scalar.\"\"\"\n other_scalar = 0.123456789\n self.assertRaises(Exception, self.quat_unnormalized.__mul__, other_scalar)\n\n def test_rotation(self):\n \"\"\"Multiplication by -1 should give the same rotation.\"\"\"\n neg_quat = Quaternion(self.quat_unnormalized.data * -1)\n assert_allclose(neg_quat.to_matrix(), self.quat_unnormalized.to_matrix())\n\n def test_one_euler_angle(self):\n \"\"\"Quaternion should return a correct sequence of zyz representation\n in the case of rotations when there is only one non-zero Euler angle.\"\"\"\n rand_rot_angle = 0.123456789\n some_quat = quaternion_from_axis_rotation(rand_rot_angle, \"z\")\n assert_allclose(some_quat.to_zyz(), np.array([rand_rot_angle, 0, 0]))\n\n def test_two_euler_angle_0123456789(self):\n \"\"\"Quaternion should return a correct sequence of zyz representation\n in the case of rotations when there are only two non-zero Euler angle.\n angle = 0.123456789 \"\"\"\n rand_rot_angle = 0.123456789\n some_quat = (quaternion_from_axis_rotation(rand_rot_angle, \"z\")\n * quaternion_from_axis_rotation(np.pi, \"y\"))\n assert_allclose(some_quat.to_zyz(), np.array([rand_rot_angle, np.pi, 0]))\n\n def test_two_euler_angle_0987654321(self):\n \"\"\"Quaternion should return a correct sequence of zyz representation\n in the case of rotations when there are only two non-zero Euler angle.\n angle = 0.987654321 \"\"\"\n rand_rot_angle = 0.987654321\n some_quat = (quaternion_from_axis_rotation(rand_rot_angle, \"z\")\n * quaternion_from_axis_rotation(np.pi, \"y\"))\n assert_allclose(some_quat.to_zyz(), np.array([rand_rot_angle, np.pi, 0]))\n\n def test_quaternion_from_rotation_invalid_axis(self):\n \"\"\"Cannot generate quaternion from rotations around invalid axis.\"\"\"\n rand_axis = 'a'\n rand_angle = 0.123456789\n self.assertRaises(ValueError, quaternion_from_axis_rotation, rand_angle, rand_axis)\n\n\ndef rotation_matrix(angle, axis):\n \"\"\"Generates a rotation matrix for a given angle and axis.\n\n Args:\n angle (float): Rotation angle in radians.\n axis (str): Axis for rotation: 'x', 'y', 'z'\n\n Returns:\n ndarray: Rotation matrix.\n\n Raises:\n ValueError: Invalid input axis.\n \"\"\"\n direction = np.zeros(3, dtype=float)\n if axis == 'x':\n direction[0] = 1\n elif axis == 'y':\n direction[1] = 1\n elif axis == 'z':\n direction[2] = 1\n else:\n raise ValueError('Invalid axis.')\n direction = np.asarray(direction, dtype=float)\n sin_angle = math.sin(angle)\n cos_angle = math.cos(angle)\n rot = np.diag([cos_angle, cos_angle, cos_angle])\n rot += np.outer(direction, direction) * (1.0 - cos_angle)\n direction *= sin_angle\n rot += np.array([\n [0, -direction[2], direction[1]],\n [direction[2], 0, -direction[0]],\n [-direction[1], direction[0], 0]\n ])\n return rot\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"scipy.linalg.det",
"numpy.identity",
"numpy.outer",
"scipy.linalg.norm",
"numpy.diag"
]
]
|
synabreu/addons | [
"a8316c74854350d112a4784da8fd5db65d0ec0e0"
]
| [
"tensorflow_addons/image/utils.py"
]
| [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Image util ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef get_ndims(image):\n return image.get_shape().ndims or tf.rank(image)\n\n\[email protected]\ndef to_4D_image(image):\n \"\"\"Convert 2/3/4D image to 4D image.\n\n Args:\n image: 2/3/4D tensor.\n\n Returns:\n 4D tensor with the same type.\n \"\"\"\n # yapf:disable\n with tf.control_dependencies([\n tf.debugging.assert_rank_in(\n image, [2, 3, 4], message='`image` must be 2/3/4D tensor')\n ]):\n # yapf: enable\n ndims = image.get_shape().ndims\n if ndims is None:\n return _dynamic_to_4D_image(image)\n elif ndims == 2:\n return image[None, :, :, None]\n elif ndims == 3:\n return image[None, :, :, :]\n else:\n return image\n\n\ndef _dynamic_to_4D_image(image):\n shape = tf.shape(image)\n original_rank = tf.rank(image)\n # 4D image => [N, H, W, C] or [N, C, H, W]\n # 3D image => [1, H, W, C] or [1, C, H, W]\n # 2D image => [1, H, W, 1]\n left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)\n right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)\n # yapf: disable\n new_shape = tf.concat(\n [tf.ones(shape=left_pad, dtype=tf.int32),\n shape,\n tf.ones(shape=right_pad, dtype=tf.int32)],\n axis=0)\n # yapf: enable\n return tf.reshape(image, new_shape)\n\n\[email protected]\ndef from_4D_image(image, ndims):\n \"\"\"Convert back to an image with `ndims` rank.\n\n Args:\n image: 4D tensor.\n ndims: The original rank of the image.\n\n Returns:\n `ndims`-D tensor with the same type.\n \"\"\"\n # yapf:disable\n with tf.control_dependencies([\n tf.debugging.assert_rank(\n image, 4, message='`image` must be 4D tensor')\n ]):\n # yapf:enable\n if isinstance(ndims, tf.Tensor):\n return _dynamic_from_4D_image(image, ndims)\n elif ndims == 2:\n return tf.squeeze(image, [0, 3])\n elif ndims == 3:\n return tf.squeeze(image, [0])\n else:\n return image\n\n\ndef _dynamic_from_4D_image(image, original_rank):\n shape = tf.shape(image)\n # 4D image <= [N, H, W, C] or [N, C, H, W]\n # 3D image <= [1, H, W, C] or [1, C, H, W]\n # 2D image <= [1, H, W, 1]\n begin = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)\n end = 4 - tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)\n new_shape = shape[begin:end]\n return tf.reshape(image, new_shape)\n"
]
| [
[
"tensorflow.rank",
"tensorflow.shape",
"tensorflow.debugging.assert_rank",
"tensorflow.equal",
"tensorflow.ones",
"tensorflow.reshape",
"tensorflow.squeeze",
"tensorflow.less_equal",
"tensorflow.debugging.assert_rank_in"
]
]
|
gmiaslab/DifferentialNetworks | [
"beaaef02e109bf4f4c78ff21147eae8b3108e889"
]
| [
"heatmaps.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 22 14:15:31 2021\nThis script plots heatmaps. \n\n@author: Shuyue Xue\n\"\"\"\nimport os, matplotlib\nimport pandas as pd, matplotlib.pyplot as plt, seaborn as sns\nfrom preprocess_raw_data import configure_SLV_dataframe, configure_Bcell_datasets\nimport dir_conf\n\n# set filenames and their paths\n# dir_conf.initialize('SLV') # test code with SLV data\ncd = os.getcwd()\ndata_set = dir_conf.data_set\nnetwork_data_path = os.path.join(dir_conf.results_dir, 'network_data')\ncommunities_data_path = os.path.join(dir_conf.results_dir, 'reactome_analysis')\ncomm_gene_file_path = os.path.join(cd, network_data_path,\n 'M0.125_Qf0.75_Qc0.995_DN_full_name_communities.csv')\nsgnf_communities_file_path = os.path.join(cd, communities_data_path,\n 'M0.125_Qf0.75_Qc0.995_DN_community_reactome.xlsx')\n\n\n# load the gene expression files\nos.chdir(dir_conf.data_path)\nif 'SLV' == data_set:\n du, dt = configure_SLV_dataframe()\n treated_file = 'community_signals_h2(treated).xlsx'\n untreated_file = 'community_signals_h1(untreated).xlsx'\nif 'Bcell' == data_set:\n drt, dru, dt, du = configure_Bcell_datasets()\n treated_file = 'community_signals_treated.xlsx'\n untreated_file = 'community_signals_untreated.xlsx'\n \n# load the community files\ncomm_genes = pd.read_csv(comm_gene_file_path, header=0, index_col=0)\ncomm_xls = pd.ExcelFile(sgnf_communities_file_path)\ncomms_for_plot = comm_xls.sheet_names\n\n# change to the heatmap directory to export the plots\nos.chdir(os.path.join(cd, dir_conf.results_dir, 'network_plots'))\nif not os.path.exists('heatmaps'):\n os.makedirs('heatmaps')\nos.chdir('./heatmaps/')\n\n# group gene signals by communities with significant pathways (export in 2 files)\nwith pd.ExcelWriter(treated_file, engine='xlsxwriter') \\\n as writer:\n for comm in comms_for_plot:\n this_comm_genes = comm_genes[comm].dropna()\n \n if 'SLV' == data_set:\n comm_signals = dt.loc[ this_comm_genes ]\n \n if 'Bcell' == data_set:\n comm_signals_t = dt.loc[ this_comm_genes ]\n comm_signals_rt = drt.loc[ this_comm_genes ] \n comm_signals = 1/2*(comm_signals_t + comm_signals_rt)\n\n comm_signals.to_excel(writer, sheet_name=comm)\n\nwith pd.ExcelWriter(untreated_file, engine='xlsxwriter') \\\n as writer:\n for comm in comms_for_plot:\n this_comm_genes = comm_genes[comm].dropna()\n \n if 'SLV' == data_set:\n comm_signals = du.loc[ this_comm_genes ]\n \n if 'Bcell' == data_set:\n comm_signals_u = du.loc[ this_comm_genes ]\n comm_signals_ru = dru.loc[ this_comm_genes ] \n comm_signals = 1/2*(comm_signals_u + comm_signals_ru)\n \n comm_signals.to_excel(writer, sheet_name=comm)\n \n \ndef plot_heatmap_and_time_average(community_signal_normalized_df, raw_df, title):\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 13))\n plt.rcParams['font.size'] = 14\n plt.rcParams['font.family'] = 'serif'\n \n cmap = matplotlib.colors.LinearSegmentedColormap.from_list('CP',['steelblue','steelblue','steelblue','w','firebrick','firebrick','firebrick'], N=100)\n \n heatmap = sns.clustermap(community_signal_normalized_df,\n metric = 'euclidean',\n method = 'complete',\n yticklabels=False,\n col_cluster=False,\n cmap = cmap,\n vmin=-1, vmax=1,\n\n cbar_pos=(0.93, 0.06, 0.014, 0.91),\n )\n hm = heatmap.ax_heatmap.get_position()\n heatmap.ax_heatmap.set_position([hm.x0, hm.y0, hm.width*0.86, hm.height*1.25])\n \n row = heatmap.ax_row_dendrogram.get_position()\n heatmap.ax_row_dendrogram.set_position([row.x0, row.y0,\n row.width, row.height*1.25])\n \n file_name = title.replace(':','')\n # file_name = file_name.replace(' ','_')+'.svg'\n file_name = file_name.replace(' ','_')+'.jpeg'\n plt.savefig( file_name )\n plt.close()\n\n return\n\n\ndef normalize_df(raw_signal_df):\n \"\"\"normalize the df\"\"\"\n from sklearn.preprocessing import Normalizer\n # 1. subtract t=0 from all time points\n # normalized_df =raw_signal_df.iloc[:, 1:].sub(df[0], axis=0) # keep t=0\n normalized_df = raw_signal_df.sub(raw_signal_df[0], axis=0) # norm t=0\n\n # 2. normalize as vector norm\n normalized_df.iloc[:,:] = \\\n Normalizer(norm='l2').fit_transform(normalized_df)\n return normalized_df\n\n\ntreated_xls = pd.ExcelFile(treated_file)\nuntreated_xls = pd.ExcelFile(untreated_file)\nfor comm in treated_xls.sheet_names:\n # print('\\niteration %s' %comm)\n df_t = pd.read_excel(treated_xls, index_col=0, sheet_name=comm)\n df_ut = pd.read_excel(untreated_xls, index_col=0, sheet_name=comm)\n \n norm_df_t = normalize_df(df_t)\n norm_df_ut = normalize_df(df_ut)\n\n diff = norm_df_t - norm_df_ut \n diff.columns.name = 'Time (hours)'\n title = 'Differentail Signal: ' + comm + ' Heatmap'\n plot_heatmap_and_time_average(diff, diff, title=title)\n\n\nos.chdir(cd) # Back to the Code directory"
]
| [
[
"sklearn.preprocessing.Normalizer",
"matplotlib.pyplot.savefig",
"pandas.read_excel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"pandas.ExcelFile",
"pandas.ExcelWriter",
"pandas.read_csv",
"matplotlib.colors.LinearSegmentedColormap.from_list"
]
]
|
vcucu/wimblepong | [
"4c9353230c64e17ab38b6b46fc6e242f636e55ef"
]
| [
"train_dqn_gigiano.py"
]
| [
"import argparse\nimport sys\nimport gym\nimport time\nimport numpy as np\nimport wimblepong\nimport torch\nimport matplotlib.pyplot as plt\n\n\ndef parse_args(args=sys.argv[1:]):\n # TODO [nice-to-have] lag continue training taking a file of weights already pretrained\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dir\", type=str, help=\"Directory to agent 1 to be tested.\")\n parser.add_argument(\"--env\", type=str, default=\"WimblepongVisualSimpleAI-v0\",\n help=\"Environment to use\")\n parser.add_argument(\"--train_episodes\", type=int, default=100000,\n help=\"Number of episodes to train for\")\n parser.add_argument(\"--print_stats\", type=bool, default=True)\n parser.add_argument(\"--run_id\", type=int, default=0)\n return parser.parse_args(args)\n\n\ndef main(args):\n # Create a Gym environment\n env = gym.make(args.env)\n TARGET_UPDATE = 4\n glie_a = 4200\n num_episodes = args.train_episodes\n total_timesteps = 0\n sys.path.append(args.dir)\n from agents import DQN as model\n agent = model.DQNAgentG()\n\n cumulative_rewards = []\n for ep in range(num_episodes):\n # Initialize the environment and state\n state = env.reset()\n done = False\n eps = glie_a / (glie_a + ep)\n cum_reward = 0\n timesteps = 0\n # start = time.time()\n while not done:\n timesteps += 1\n total_timesteps += 1\n # Select and perform an action\n action = agent.get_action(state, eps)\n next_state, reward, done, _ = env.step(action)\n cum_reward += reward\n # Update the DQN\n agent.store_transition(state, action, next_state, reward, done)\n # net_update_start = time.time()\n agent.update_network()\n # net_update_end = time.time()\n # print(\"This network update took\", net_update_end - net_update_start)\n # Move to the next state\n state = next_state\n # end = time.time()\n # print(\"This episode took\", end - start)\n\n print(\"Episode:\", ep, \"Reward: \", cum_reward, \"epsilon:\", eps, \"timesteps:\", timesteps)\n cumulative_rewards.append(cum_reward)\n\n # Update the target network, copying all weights and biases in DQN\n if ep % TARGET_UPDATE == 0:\n agent.update_target_network()\n\n # Save the policy\n if ep % 1000 == 0:\n torch.save(agent.policy_net.state_dict(),\n \"weights_gigi_%s_%d.mdl\" % (\"PongEnv\", ep))\n\n plot_rewards(cumulative_rewards, agent)\n print('Complete, ran ', total_timesteps, 'timesteps in total')\n plt.ioff()\n plt.show()\n\n\ndef plot_rewards(rewards, agent):\n plt.figure(2)\n plt.clf()\n rewards_t = torch.tensor(rewards, dtype=torch.float)\n plt.title('Training...')\n plt.xlabel('Episode')\n plt.ylabel('Cumulative reward')\n plt.grid(True)\n plt.plot(rewards_t.numpy())\n # Take 100 episode averages and plot them too\n if len(rewards_t) >= 100:\n means = rewards_t.unfold(0, 100, 1).mean(1).view(-1)\n means = torch.cat((torch.zeros(99), means))\n plt.plot(means.numpy())\n\n plt.pause(0.001) # pause a bit so that plots are updated\n plt.savefig('train_plot.png')\n plt.show()\n\n\ndef find_nearest(array, value):\n return np.argmin(abs(array - value))\n\n\ndef discretize(x, th, x_grid, th_grid):\n x_ = find_nearest(x_grid, x)\n th_ = find_nearest(th_grid, th)\n return x_, th_\n\n\n# Entry point of the script\nif __name__ == \"__main__\":\n args = parse_args()\n main(args=args)\n"
]
| [
[
"torch.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"torch.tensor",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.clf"
]
]
|
jsmz97/pennylane | [
"de7b7c0b452c8d59867d11f84b9c332a36e08ab1"
]
| [
"pennylane/optimize/rotoselect.py"
]
| [
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Rotoselect gradient free optimizer\"\"\"\n\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane.utils import _flatten, unflatten\n\n\nclass RotoselectOptimizer:\n r\"\"\"Rotoselect gradient-free optimizer.\n\n The Rotoselect optimizer minimizes an objective function with respect to the rotation gates and\n parameters of a quantum circuit without the need for calculating the gradient of the function.\n The algorithm works by updating the parameters :math:`\\theta = \\theta_1, \\dots, \\theta_D`\n and rotation gate choices :math:`R = R_1,\\dots,R_D` one at a time according to a closed-form\n expression for the optimal value of the :math:`d^{th}` parameter :math:`\\theta^*_d` when the\n other parameters and gate choices are fixed:\n\n .. math:: \\theta^*_d = \\underset{\\theta_d}{\\text{argmin}}\\left<H\\right>_{\\theta_d}\n = -\\frac{\\pi}{2} - \\text{arctan2}\\left(2\\left<H\\right>_{\\theta_d=0}\n - \\left<H\\right>_{\\theta_d=\\pi/2} - \\left<H\\right>_{\\theta_d=-\\pi/2},\n \\left<H\\right>_{\\theta_d=\\pi/2} - \\left<H\\right>_{\\theta_d=-\\pi/2}\\right),\n\n where :math:`\\left<H\\right>_{\\theta_d}` is the expectation value of the objective function\n optimized over the parameter :math:`\\theta_d`. :math:`\\text{arctan2}(x, y)` computes the\n element-wise arc tangent of :math:`x/y` choosing the quadrant correctly, avoiding, in\n particular, division-by-zero when :math:`y = 0`.\n\n Which parameters and gates that should be optimized over is decided in the user-defined cost\n function, where :math:`R` is a list of parametrized rotation gates in a quantum circuit, along\n with their respective parameters :math:`\\theta` for the circuit and its gates. Note that the\n number of generators should match the number of parameters.\n\n The algorithm is described in further detail in\n `Ostaszewski et al. (2019) <https://arxiv.org/abs/1905.09692>`_.\n\n Args:\n possible_generators (list[~.Operation]): List containing the possible\n ``pennylane.ops.qubit`` operators that are allowed in the circuit.\n Default is the set of Pauli rotations :math:`\\{R_x, R_y, R_z\\}`.\n\n **Example:**\n\n Initialize the Rotoselect optimizer, set the initial values of the weights ``x``,\n choose the initial generators, and set the number of steps to optimize over.\n\n >>> opt = qml.optimize.RotoselectOptimizer()\n >>> x = [0.3, 0.7]\n >>> generators = [qml.RX, qml.RY]\n >>> n_steps = 10\n\n Set up the PennyLane circuit using the ``default.qubit`` simulator device.\n\n >>> dev = qml.device(\"default.qubit\", shots=None, wires=2)\n >>> @qml.qnode(dev)\n ... def circuit(params, generators=None): # generators will be passed as a keyword arg\n ... generators[0](params[0], wires=0)\n ... generators[1](params[1], wires=1)\n ... qml.CNOT(wires=[0, 1])\n ... return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliX(1))\n\n Define a cost function based on the above circuit.\n\n >>> def cost(x, generators):\n ... Z_1, X_2 = circuit(x, generators=generators)\n ... return 0.2 * Z_1 + 0.5 * X_2\n\n Run the optimization step-by-step for ``n_steps`` steps.\n\n >>> cost_rotosel = []\n >>> for _ in range(n_steps):\n ... cost_rotosel.append(cost(x, generators))\n ... x, generators = opt.step(cost, x, generators)\n\n The optimized values for x should now be stored in ``x`` together with the optimal gates for\n the circuit, while steps-vs-cost can be seen by plotting ``cost_rotosel``.\n \"\"\"\n # pylint: disable=too-few-public-methods\n\n def __init__(self, possible_generators=None):\n self.possible_generators = possible_generators or [qml.RX, qml.RY, qml.RZ]\n\n def step_and_cost(self, objective_fn, x, generators, **kwargs):\n \"\"\"Update trainable arguments with one step of the optimizer and return the corresponding\n objective function value prior to the step.\n\n Args:\n objective_fn (function): The objective function for optimization. It must have the\n signature ``objective_fn(x, generators=None)`` with a sequence of the values ``x``\n and a list of the gates ``generators`` as inputs, returning a single value.\n x (Union[Sequence[float], float]): sequence containing the initial values of the\n variables to be optimized over or a single float with the initial value\n generators (list[~.Operation]): list containing the initial ``pennylane.ops.qubit``\n operators to be used in the circuit and optimized over\n **kwargs : variable length of keyword arguments for the objective function.\n\n Returns:\n tuple: the new variable values :math:`x^{(t+1)}`, the new generators, and the objective\n function output prior to the step\n \"\"\"\n x_new, generators = self.step(objective_fn, x, generators, **kwargs)\n\n return x_new, generators, objective_fn(x, generators, **kwargs)\n\n def step(self, objective_fn, x, generators, **kwargs):\n r\"\"\"Update trainable arguments with one step of the optimizer.\n\n Args:\n objective_fn (function): The objective function for optimization. It must have the\n signature ``objective_fn(x, generators=None)`` with a sequence of the values ``x``\n and a list of the gates ``generators`` as inputs, returning a single value.\n x (Union[Sequence[float], float]): sequence containing the initial values of the\n variables to be optimized over or a single float with the initial value\n generators (list[~.Operation]): list containing the initial ``pennylane.ops.qubit``\n operators to be used in the circuit and optimized over\n **kwargs : variable length of keyword arguments for the objective function.\n\n Returns:\n array: The new variable values :math:`x^{(t+1)}` as well as the new generators.\n \"\"\"\n x_flat = np.fromiter(_flatten(x), dtype=float)\n # wrap the objective function so that it accepts the flattened parameter array\n objective_fn_flat = lambda x_flat, gen: objective_fn(\n unflatten(x_flat, x), generators=gen, **kwargs\n )\n\n try:\n assert len(x_flat) == len(generators)\n except AssertionError as e:\n raise ValueError(\n f\"Number of parameters {x} must be equal to the number of generators.\"\n ) from e\n\n for d, _ in enumerate(x_flat):\n x_flat[d], generators[d] = self._find_optimal_generators(\n objective_fn_flat, x_flat, generators, d\n )\n\n return unflatten(x_flat, x), generators\n\n def _find_optimal_generators(self, objective_fn, x, generators, d):\n r\"\"\"Optimizer for the generators.\n\n Optimizes for the best generator at position ``d``.\n\n Args:\n objective_fn (function): The objective function for optimization. It must have the\n signature ``objective_fn(x, generators=None)`` with a sequence of the values ``x``\n and a list of the gates ``generators`` as inputs, returning a single value.\n x (Union[Sequence[float], float]): sequence containing the initial values of the\n variables to be optimized over or a single float with the initial value\n generators (list[~.Operation]): list containing the initial ``pennylane.ops.qubit``\n operators to be used in the circuit and optimized over\n d (int): the position in the input sequence ``x`` containing the value to be optimized\n\n Returns:\n tuple: tuple containing the parameter value and generator that, at position ``d`` in\n ``x`` and ``generators``, optimizes the objective function\n \"\"\"\n params_opt_d = x[d]\n generators_opt_d = generators[d]\n params_opt_cost = objective_fn(x, generators)\n\n for generator in self.possible_generators:\n generators[d] = generator\n\n x = self._rotosolve(objective_fn, x, generators, d)\n params_cost = objective_fn(x, generators)\n\n # save the best paramter and generator for position d\n if params_cost <= params_opt_cost:\n params_opt_d = x[d]\n params_opt_cost = params_cost\n generators_opt_d = generator\n return params_opt_d, generators_opt_d\n\n @staticmethod\n def _rotosolve(objective_fn, x, generators, d):\n r\"\"\"The rotosolve step for one parameter and one set of generators.\n\n Updates the parameter :math:`\\theta_d` based on Equation 1 in\n `Ostaszewski et al. (2019) <https://arxiv.org/abs/1905.09692>`_.\n\n Args:\n objective_fn (function): The objective function for optimization. It must have the\n signature ``objective_fn(x, generators=None)`` with a sequence of the values ``x``\n and a list of the gates ``generators`` as inputs, returning a single value.\n x (Union[Sequence[float], float]): sequence containing the initial values of the\n variables to be optimized overs or a single float with the initial value\n generators (list[~.Operation]): list containing the initial ``pennylane.ops.qubit``\n operators to be used in the circuit and optimized over\n d (int): the position in the input sequence ``x`` containing the value to be optimized\n\n Returns:\n array: the input sequence ``x`` with the value at position ``d`` optimized\n \"\"\"\n # helper function for x[d] = theta\n def insert(x, d, theta):\n x[d] = theta\n return x\n\n H_0 = float(objective_fn(insert(x, d, 0), generators))\n H_p = float(objective_fn(insert(x, d, np.pi / 2), generators))\n H_m = float(objective_fn(insert(x, d, -np.pi / 2), generators))\n\n a = np.arctan2(2 * H_0 - H_p - H_m, H_p - H_m)\n\n x[d] = -np.pi / 2 - a\n\n if x[d] <= -np.pi:\n x[d] += 2 * np.pi\n return x\n"
]
| [
[
"numpy.arctan2"
]
]
|
andrewlstewart/CS231n-Convolutional-Neural-Networks- | [
"3a6066d790bd654d5fe3ad670c2308e8b2c05d93"
]
| [
"Assignment_03/test_saliency_pytorch.py"
]
| [
"import torch\nimport torchvision\nimport torchvision.transforms as T\nimport random\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter1d\nimport matplotlib.pyplot as plt\nfrom cs231n.image_utils import SQUEEZENET_MEAN, SQUEEZENET_STD\nfrom PIL import Image\n\ndef preprocess(img, size=224):\n transform = T.Compose([\n T.Resize(size),\n T.ToTensor(),\n T.Normalize(mean=SQUEEZENET_MEAN.tolist(),\n std=SQUEEZENET_STD.tolist()),\n T.Lambda(lambda x: x[None]),\n ])\n return transform(img)\n\ndef deprocess(img, should_rescale=True):\n transform = T.Compose([\n T.Lambda(lambda x: x[0]),\n T.Normalize(mean=[0, 0, 0], std=(1.0 / SQUEEZENET_STD).tolist()),\n T.Normalize(mean=(-SQUEEZENET_MEAN).tolist(), std=[1, 1, 1]),\n T.Lambda(rescale) if should_rescale else T.Lambda(lambda x: x),\n T.ToPILImage(),\n ])\n return transform(img)\n\ndef rescale(x):\n low, high = x.min(), x.max()\n x_rescaled = (x - low) / (high - low)\n return x_rescaled\n \ndef blur_image(X, sigma=1):\n X_np = X.cpu().clone().numpy()\n X_np = gaussian_filter1d(X_np, sigma, axis=2)\n X_np = gaussian_filter1d(X_np, sigma, axis=3)\n X.copy_(torch.Tensor(X_np).type_as(X))\n return X\n\n# Download and load the pretrained SqueezeNet model.\nmodel = torchvision.models.squeezenet1_1(pretrained=True)\n\n# We don't want to train the model, so tell PyTorch not to compute gradients\n# with respect to model parameters.\nfor param in model.parameters():\n param.requires_grad = False\n \n# you may see warning regarding initialization deprecated, that's fine, please continue to next steps\n\nfrom cs231n.data_utils import load_imagenet_val\nX, y, class_names = load_imagenet_val(num=5)\n\n# plt.figure(figsize=(12, 6))\n# for i in range(5):\n# plt.subplot(1, 5, i + 1)\n# plt.imshow(X[i])\n# plt.title(class_names[y[i]])\n# plt.axis('off')\n# plt.gcf().tight_layout()\n# plt.show()\n\n# Example of using gather to select one entry from each row in PyTorch\ndef gather_example():\n N, C = 4, 5\n s = torch.randn(N, C)\n y = torch.LongTensor([1, 2, 1, 3])\n print(s)\n print(y)\n print(s.gather(1, y.view(-1, 1)).squeeze())\ngather_example()\n\ndef compute_saliency_maps(X, y, model):\n \"\"\"\n Compute a class saliency map using the model for images X and labels y.\n\n Input:\n - X: Input images; Tensor of shape (N, 3, H, W)\n - y: Labels for X; LongTensor of shape (N,)\n - model: A pretrained CNN that will be used to compute the saliency map.\n\n Returns:\n - saliency: A Tensor of shape (N, H, W) giving the saliency maps for the input\n images.\n \"\"\"\n # Make sure the model is in \"test\" mode\n model.eval()\n \n # Make input tensor require gradient\n X.requires_grad_()\n \n saliency = None\n ##############################################################################\n # TODO: Implement this function. Perform a forward and backward pass through #\n # the model to compute the gradient of the correct class score with respect #\n # to each input image. You first want to compute the loss over the correct #\n # scores (we'll combine losses across a batch by summing), and then compute #\n # the gradients with a backward pass. #\n ##############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n scores = model(X)\n correct_scores = scores.gather(1, y.view(-1, 1)).squeeze()\n loss = correct_scores.sum()\n loss.backward()\n saliency = torch.max(X.grad.data.abs(), dim=1)[0]\n\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return saliency\n\ndef show_saliency_maps(X, y):\n # Convert X and y from numpy arrays to Torch Tensors\n X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)\n y_tensor = torch.LongTensor(y)\n\n # Compute saliency maps for images in X\n saliency = compute_saliency_maps(X_tensor, y_tensor, model)\n\n # Convert the saliency map from Torch Tensor to numpy array and show images\n # and saliency maps together.\n saliency = saliency.numpy()\n N = X.shape[0]\n for i in range(N):\n plt.subplot(2, N, i + 1)\n plt.imshow(X[i])\n plt.axis('off')\n plt.title(class_names[y[i]])\n plt.subplot(2, N, N + i + 1)\n plt.imshow(saliency[i], cmap=plt.cm.hot)\n plt.axis('off')\n plt.gcf().set_size_inches(12, 5)\n plt.show()\n\n# show_saliency_maps(X, y)\n\ndef make_fooling_image(X, target_y, model):\n \"\"\"\n Generate a fooling image that is close to X, but that the model classifies\n as target_y.\n\n Inputs:\n - X: Input image; Tensor of shape (1, 3, 224, 224)\n - target_y: An integer in the range [0, 1000)\n - model: A pretrained CNN\n\n Returns:\n - X_fooling: An image that is close to X, but that is classifed as target_y\n by the model.\n \"\"\"\n # Initialize our fooling image to the input image, and make it require gradient\n X_fooling = X.clone()\n X_fooling = X_fooling.requires_grad_()\n \n learning_rate = 1\n ##############################################################################\n # TODO: Generate a fooling image X_fooling that the model will classify as #\n # the class target_y. You should perform gradient ascent on the score of the #\n # target class, stopping when the model is fooled. #\n # When computing an update step, first normalize the gradient: #\n # dX = learning_rate * g / ||g||_2 #\n # #\n # You should write a training loop. #\n # #\n # HINT: For most examples, you should be able to generate a fooling image #\n # in fewer than 100 iterations of gradient ascent. #\n # You can print your progress over iterations to check your algorithm. #\n ##############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n for _ in range(100):\n score = model(X_fooling)\n if torch.argmax(score) == target_y:\n break\n correct_score = score[0, target_y]\n correct_score.backward()\n dX = learning_rate * (X_fooling.grad / torch.norm(X_fooling.grad, p=2))\n X_fooling.data += dX\n\n model.zero_grad()\n X_fooling.grad.data.zero_()\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return X_fooling\n\nidx = 0\ntarget_y = 6\n\nX_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)\nX_fooling = make_fooling_image(X_tensor[idx:idx+1], target_y, model)\n\nscores = model(X_fooling)\n# assert target_y == scores.data.max(1)[1][0].item(), 'The model is not fooled!'\n\nX_fooling_np = deprocess(X_fooling.clone())\nX_fooling_np = np.asarray(X_fooling_np).astype(np.uint8)\n\ndef jitter(X, ox, oy):\n \"\"\"\n Helper function to randomly jitter an image.\n \n Inputs\n - X: PyTorch Tensor of shape (N, C, H, W)\n - ox, oy: Integers giving number of pixels to jitter along W and H axes\n \n Returns: A new PyTorch Tensor of shape (N, C, H, W)\n \"\"\"\n if ox != 0:\n left = X[:, :, :, :-ox]\n right = X[:, :, :, -ox:]\n X = torch.cat([right, left], dim=3)\n if oy != 0:\n top = X[:, :, :-oy]\n bottom = X[:, :, -oy:]\n X = torch.cat([bottom, top], dim=2)\n return X\n\ndef create_class_visualization(target_y, model, dtype, **kwargs):\n \"\"\"\n Generate an image to maximize the score of target_y under a pretrained model.\n \n Inputs:\n - target_y: Integer in the range [0, 1000) giving the index of the class\n - model: A pretrained CNN that will be used to generate the image\n - dtype: Torch datatype to use for computations\n \n Keyword arguments:\n - l2_reg: Strength of L2 regularization on the image\n - learning_rate: How big of a step to take\n - num_iterations: How many iterations to use\n - blur_every: How often to blur the image as an implicit regularizer\n - max_jitter: How much to gjitter the image as an implicit regularizer\n - show_every: How often to show the intermediate result\n \"\"\"\n model.type(dtype)\n l2_reg = kwargs.pop('l2_reg', 1e-3)\n learning_rate = kwargs.pop('learning_rate', 25)\n num_iterations = kwargs.pop('num_iterations', 100)\n blur_every = kwargs.pop('blur_every', 10)\n max_jitter = kwargs.pop('max_jitter', 16)\n show_every = kwargs.pop('show_every', 25)\n\n # Randomly initialize the image as a PyTorch Tensor, and make it requires gradient.\n img = torch.randn(1, 3, 224, 224).mul_(1.0).type(dtype).requires_grad_()\n\n for t in range(num_iterations):\n # Randomly jitter the image a bit; this gives slightly nicer results\n ox, oy = random.randint(0, max_jitter), random.randint(0, max_jitter)\n img.data.copy_(jitter(img.data, ox, oy))\n\n ########################################################################\n # TODO: Use the model to compute the gradient of the score for the #\n # class target_y with respect to the pixels of the image, and make a #\n # gradient step on the image using the learning rate. Don't forget the #\n # L2 regularization term! #\n # Be very careful about the signs of elements in your code. #\n ########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n score = model(img)\n correct_score = score[0, target_y]\n not_image_i_star = correct_score - (l2_reg * torch.norm(img, p=2))\n # i_star = correct_score - l2_reg * img / torch.norm(img, p=2) # Isn't this the way they say to do it in the equation provided?\n not_image_i_star.backward()\n dX = learning_rate * img.grad / torch.norm(img.grad, p=2)\n img.data += dX\n\n model.zero_grad()\n img.grad.data.zero_()\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n \n # Undo the random jitter\n img.data.copy_(jitter(img.data, -ox, -oy))\n\n # As regularizer, clamp and periodically blur the image\n for c in range(3):\n lo = float(-SQUEEZENET_MEAN[c] / SQUEEZENET_STD[c])\n hi = float((1.0 - SQUEEZENET_MEAN[c]) / SQUEEZENET_STD[c])\n img.data[:, c].clamp_(min=lo, max=hi)\n if t % blur_every == 0:\n blur_image(img.data, sigma=0.5)\n \n # Periodically show the image\n if t == 0 or (t + 1) % show_every == 0 or t == num_iterations - 1:\n plt.imshow(deprocess(img.data.clone().cpu()))\n class_name = class_names[target_y]\n plt.title('%s\\nIteration %d / %d' % (class_name, t + 1, num_iterations))\n plt.gcf().set_size_inches(4, 4)\n plt.axis('off')\n plt.show()\n\n return deprocess(img.data.cpu())\n\ndtype = torch.FloatTensor\n# dtype = torch.cuda.FloatTensor # Uncomment this to use GPU\nmodel.type(dtype)\n\ntarget_y = 76 # Tarantula\n# target_y = 78 # Tick\n# target_y = 187 # Yorkshire Terrier\n# target_y = 683 # Oboe\n# target_y = 366 # Gorilla\n# target_y = 604 # Hourglass\nout = create_class_visualization(target_y, model, dtype)\n\n# target_y = 78 # Tick\n# target_y = 187 # Yorkshire Terrier\n# target_y = 683 # Oboe\n# target_y = 366 # Gorilla\n# target_y = 604 # Hourglass\ntarget_y = np.random.randint(1000)\nprint(class_names[target_y])\nX = create_class_visualization(target_y, model, dtype)\n\nprint('stall')"
]
| [
[
"torch.cat",
"torch.argmax",
"numpy.asarray",
"torch.norm",
"matplotlib.pyplot.title",
"torch.LongTensor",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.random.randint",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"torch.Tensor",
"torch.randn",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot"
]
]
|
JokerYan/ZOO-Attack | [
"f81ca0d7611e697f7d5e937da8a64c4b80ad5dbc"
]
| [
"l0_attack.py"
]
| [
"## l0_attack.py -- attack a network optimizing for l_0 distance\n##\n## Copyright (C) IBM Corp, 2017-2018\n## Copyright (C) 2016, Nicholas Carlini <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\nMAX_ITERATIONS = 1000 # number of iterations to perform gradient descent\nABORT_EARLY = True # abort gradient descent upon first valid solution\nLEARNING_RATE = 1e-2 # larger values converge faster to less accurate results\nINITIAL_CONST = 1e-3 # the first value of c to start at\nLARGEST_CONST = 2e6 # the largest value of c to go up to before giving up\nREDUCE_CONST = False # try to lower c each iteration; faster to set to false\nTARGETED = True # should we target one specific class? or just be wrong?\nCONST_FACTOR = 2.0 # f>1, rate at which we increase constant, smaller better\n\nclass CarliniL0:\n def __init__(self, sess, model,\n targeted = TARGETED, learning_rate = LEARNING_RATE,\n max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY,\n initial_const = INITIAL_CONST, largest_const = LARGEST_CONST,\n reduce_const = REDUCE_CONST, const_factor = CONST_FACTOR,\n independent_channels = False):\n \"\"\"\n The L_0 optimized attack. \n\n Returns adversarial examples for the supplied model.\n\n targeted: True if we should perform a targetted attack, False otherwise.\n learning_rate: The learning rate for the attack algorithm. Smaller values\n produce better results but are slower to converge.\n max_iterations: The maximum number of iterations. Larger values are more\n accurate; setting too small will require a large learning rate and will\n produce poor results.\n abort_early: If true, allows early aborts if gradient descent gets stuck.\n initial_const: The initial tradeoff-constant to use to tune the relative\n importance of distance and confidence. Should be set to a very small\n value (but positive).\n largest_const: The largest constant to use until we report failure. Should\n be set to a very large value.\n const_factor: The rate at which we should increase the constant, when the\n previous constant failed. Should be greater than one, smaller is better.\n independent_channels: set to false optimizes for number of pixels changed,\n set to true (not recommended) returns number of channels changed.\n \"\"\"\n\n self.model = model\n self.sess = sess\n\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.ABORT_EARLY = abort_early\n self.INITIAL_CONST = initial_const\n self.LARGEST_CONST = largest_const\n self.REDUCE_CONST = reduce_const\n self.const_factor = const_factor\n self.independent_channels = independent_channels\n\n self.grad = self.gradient_descent(sess, model)\n\n def gradient_descent(self, sess, model):\n def compare(x,y):\n if self.TARGETED:\n return x == y\n else:\n return x != y\n shape = (1,model.image_size,model.image_size,model.num_channels)\n \n # the variable to optimize over\n modifier = tf.Variable(np.zeros(shape,dtype=np.float32))\n\n # the variables we're going to hold, use for efficiency\n canchange = tf.Variable(np.zeros(shape),dtype=np.float32)\n simg = tf.Variable(np.zeros(shape,dtype=np.float32))\n original = tf.Variable(np.zeros(shape,dtype=np.float32))\n timg = tf.Variable(np.zeros(shape,dtype=np.float32))\n tlab = tf.Variable(np.zeros((1,model.num_labels),dtype=np.float32))\n const = tf.placeholder(tf.float32, [])\n\n # and the assignment to set the variables\n assign_modifier = tf.placeholder(np.float32,shape)\n assign_canchange = tf.placeholder(np.float32,shape)\n assign_simg = tf.placeholder(np.float32,shape)\n assign_original = tf.placeholder(np.float32,shape)\n assign_timg = tf.placeholder(np.float32,shape)\n assign_tlab = tf.placeholder(np.float32,(1,self.model.num_labels))\n\n # these are the variables to initialize when we run\n set_modifier = tf.assign(modifier, assign_modifier)\n setup = []\n setup.append(tf.assign(canchange, assign_canchange))\n setup.append(tf.assign(timg, assign_timg))\n setup.append(tf.assign(original, assign_original))\n setup.append(tf.assign(simg, assign_simg))\n setup.append(tf.assign(tlab, assign_tlab))\n \n newimg = (tf.tanh(modifier + simg)/2)*canchange+(1-canchange)*original\n \n output = model.predict(newimg)\n \n real = tf.reduce_sum((tlab)*output,1)\n other = tf.reduce_max((1-tlab)*output - (tlab*10000),1)\n if self.TARGETED:\n # if targetted, optimize for making the other class most likely\n loss1 = tf.maximum(0.0, other-real+.01)\n else:\n # if untargeted, optimize for making this class least likely.\n loss1 = tf.maximum(0.0, real-other+.01)\n\n # sum up the losses\n loss2 = tf.reduce_sum(tf.square(newimg-tf.tanh(timg)/2))\n loss = const*loss1+loss2\n \n outgrad = tf.gradients(loss, [modifier])[0]\n \n # setup the adam optimizer and keep track of variables we're creating\n start_vars = set(x.name for x in tf.global_variables())\n optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)\n train = optimizer.minimize(loss, var_list=[modifier])\n\n end_vars = tf.global_variables()\n new_vars = [x for x in end_vars if x.name not in start_vars]\n init = tf.variables_initializer(var_list=[modifier,canchange,simg,\n original,timg,tlab]+new_vars)\n\n \n def doit(oimgs, labs, starts, valid, CONST):\n # convert to tanh-space\n imgs = np.arctanh(np.array(oimgs)*1.999999)\n starts = np.arctanh(np.array(starts)*1.999999)\n\n # initialize the variables\n sess.run(init)\n sess.run(setup, {assign_timg: imgs, \n assign_tlab:labs, \n assign_simg: starts, \n assign_original: oimgs,\n assign_canchange: valid})\n\n while CONST < self.LARGEST_CONST:\n # try solving for each value of the constant\n print('try const', CONST)\n for step in range(self.MAX_ITERATIONS):\n feed_dict={const: CONST}\n\n # remember the old value\n oldmodifier = self.sess.run(modifier)\n\n if step%(self.MAX_ITERATIONS//10) == 0:\n print(step,*sess.run((loss1,loss2),feed_dict=feed_dict))\n\n # perform the update step\n _, works = sess.run([train, loss1], feed_dict=feed_dict)\n \n if works < .0001 and (self.ABORT_EARLY or step == CONST-1):\n # it worked previously, restore the old value and finish\n self.sess.run(set_modifier, {assign_modifier: oldmodifier})\n grads, scores, nimg = sess.run((outgrad, output,newimg),\n feed_dict=feed_dict)\n l2s=np.square(nimg-np.tanh(imgs)/2).sum(axis=(1,2,3))\n return grads, scores, nimg, CONST\n\n # we didn't succeed, increase constant and try again\n CONST *= self.const_factor\n return doit\n \n def attack(self, imgs, targets):\n \"\"\"\n Perform the L_0 attack on the given images for the given targets.\n\n If self.targeted is true, then the targets represents the target labels.\n If self.targeted is false, then targets are the original class labels.\n \"\"\"\n r = []\n for i,(img,target) in enumerate(zip(imgs, targets)):\n print(\"Attack iteration\",i)\n r.extend(self.attack_single(img, target))\n return np.array(r)\n\n def attack_single(self, img, target):\n \"\"\"\n Run the attack on a single image and label\n \"\"\"\n\n # the pixels we can change\n valid = np.ones((1,self.model.image_size,self.model.image_size,self.model.num_channels))\n\n # the previous image\n prev = np.copy(img).reshape((1,self.model.image_size,self.model.image_size,\n self.model.num_channels))\n last_solution = None\n const = self.INITIAL_CONST\n \n while True:\n # try to solve given this valid map\n res = self.grad([np.copy(img)], [target], np.copy(prev), \n valid, const)\n if res == None:\n # the attack failed, we return this as our final answer\n print(\"Final answer\",equal_count)\n return last_solution\n \n # the attack succeeded, now we pick new pixels to set to 0\n restarted = False\n gradientnorm, scores, nimg, const = res\n if self.REDUCE_CONST: const /= 2\n \n equal_count = self.model.image_size**2-np.sum(np.all(np.abs(img-nimg[0])<.0001,axis=2))\n print(\"Forced equal:\",np.sum(1-valid),\n \"Equal count:\",equal_count)\n if np.sum(valid) == 0:\n # if no pixels changed, return \n return [img]\n \n if self.independent_channels:\n # we are allowed to change each channel independently\n valid = valid.flatten()\n totalchange = abs(nimg[0]-img)*np.abs(gradientnorm[0])\n else:\n # we care only about which pixels change, not channels independently\n # compute total change as sum of change for each channel\n valid = valid.reshape((self.model.image_size**2,self.model.num_channels))\n totalchange = abs(np.sum(nimg[0]-img,axis=2))*np.sum(np.abs(gradientnorm[0]),axis=2)\n totalchange = totalchange.flatten()\n\n # set some of the pixels to 0 depending on their total change\n did = 0\n for e in np.argsort(totalchange):\n if np.all(valid[e]):\n did += 1\n valid[e] = 0\n\n if totalchange[e] > .01:\n # if this pixel changed a lot, skip\n break\n if did >= .3*equal_count**.5:\n # if we changed too many pixels, skip\n break\n\n valid = np.reshape(valid,(1,self.model.image_size,self.model.image_size,-1))\n print(\"Now forced equal:\",np.sum(1-valid))\n \n last_solution = prev = nimg\n"
]
| [
[
"numpy.copy",
"tensorflow.gradients",
"tensorflow.tanh",
"tensorflow.global_variables",
"numpy.tanh",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"numpy.reshape",
"numpy.zeros",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"numpy.argsort",
"tensorflow.assign",
"numpy.sum",
"numpy.ones",
"tensorflow.reduce_max",
"tensorflow.variables_initializer",
"numpy.abs",
"numpy.all",
"tensorflow.maximum"
]
]
|
BoscoHan/ECE493 | [
"ce02e7c1366575c6a2f2192a86f51166d732473e"
]
| [
"training_scripts/train_ga_selfplay.py"
]
| [
"# Trains an agent from scratch (no existing AI) using evolution\n# GA with no cross-over, just mutation, and random tournament selection\n# Not optimized for speed, and just uses a single CPU (mainly for simplicity)\n\nimport os\nimport json\nimport numpy as np\nimport gym\nimport slimevolleygym\nimport slimevolleygym.mlp as mlp\nfrom slimevolleygym.mlp import Model\nfrom slimevolleygym import multiagent_rollout as rollout\n\n# Settings\nrandom_seed = 612\npopulation_size = 128\ntotal_tournaments = 500000\nsave_freq = 1000\n\ndef mutate(length, mutation_rate, mutation_sigma):\n # (not used, in case I wanted to do partial mutations)\n # create an additive mutation vector of some size\n mask = np.random.randint(int(1/mutation_rate), size=length)\n mask = 1-np.minimum(mask, 1)\n noise = np.random.normal(size=length) * mutation_sigma\n return mask * noise\n\n# Log results\nlogdir = \"ga_selfplay\"\nif not os.path.exists(logdir):\n os.makedirs(logdir)\n\n\n# Create two instances of a feed forward policy we may need later.\npolicy_left = Model(mlp.games['slimevolleylite'])\npolicy_right = Model(mlp.games['slimevolleylite'])\nparam_count = policy_left.param_count\nprint(\"Number of parameters of the neural net policy:\", param_count) # 273 for slimevolleylite\n\n# store our population here\npopulation = np.random.normal(size=(population_size, param_count)) * 0.5 # each row is an agent.\nwinning_streak = [0] * population_size # store the number of wins for this agent (including mutated ones)\n\n# create the gym environment, and seed it\nenv = gym.make(\"SlimeVolley-v0\")\nenv.seed(random_seed)\nnp.random.seed(random_seed)\n\nhistory = []\nfor tournament in range(1, total_tournaments+1):\n\n m, n = np.random.choice(population_size, 2, replace=False)\n\n policy_left.set_model_params(population[m])\n policy_right.set_model_params(population[n])\n\n # the match between the mth and nth member of the population\n score, length = rollout(env, policy_right, policy_left)\n\n history.append(length)\n # if score is positive, it means policy_right won.\n if score == 0: # if the game is tied, add noise to the left agent.\n population[m] += np.random.normal(size=param_count) * 0.1\n if score > 0:\n population[m] = population[n] + np.random.normal(size=param_count) * 0.1\n winning_streak[m] = winning_streak[n]\n winning_streak[n] += 1\n if score < 0:\n population[n] = population[m] + np.random.normal(size=param_count) * 0.1\n winning_streak[n] = winning_streak[m]\n winning_streak[m] += 1\n\n if tournament % save_freq == 0:\n model_filename = os.path.join(logdir, \"ga_\"+str(tournament).zfill(8)+\".json\")\n with open(model_filename, 'wt') as out:\n record_holder = np.argmax(winning_streak)\n record = winning_streak[record_holder]\n json.dump([population[record_holder].tolist(), record], out, sort_keys=True, indent=0, separators=(',', ': '))\n\n if (tournament ) % 100 == 0:\n record_holder = np.argmax(winning_streak)\n record = winning_streak[record_holder]\n print(\"tournament:\", tournament,\n \"best_winning_streak:\", record,\n \"mean_duration\", np.mean(history),\n \"stdev:\", np.std(history),\n )\n history = []"
]
| [
[
"numpy.random.normal",
"numpy.random.choice",
"numpy.minimum",
"numpy.random.seed",
"numpy.mean",
"numpy.std",
"numpy.argmax"
]
]
|
YANHAOsTHINGSdb/QUANTAXIS | [
"791e7d6d368ab2581b6c32f5ad9918f44cd4065f"
]
| [
"QUANTAXIS/QAARP/QARisk.py"
]
| [
"# coding:utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2017 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom QUANTAXIS.QABacktest import QAAnalysis\nfrom QUANTAXIS.QAUtil import QA_util_log_expection, QA_util_log_info\nimport math\nimport numpy\nimport pandas\n\"\"\"收益性的包括年化收益率、净利润、总盈利、总亏损、有效年化收益率、资金使用率。\n\n风险性主要包括胜率、平均盈亏比、最大回撤比例、最大连续亏损次数、最大连续盈利次数、持仓时间占比、贝塔。\n\n综合性指标主要包括风险收益比,夏普比例,波动率,VAR,偏度,峰度等\"\"\"\n\n\"\"\"\nthe account datastruct should be a standard struct which can be directly sended to another function\n\"\"\"\n\n\ndef QA_risk_eva_account(message, days, client):\n cookie = message['header']['cookie']\n account = message['body']['account']\n # 绩效表现指标分析\n \"\"\" \n message= {\n 'annualized_returns':annualized_returns,\n 'benchmark_annualized_returns':benchmark_annualized_returns,\n 'benchmark_assest':benchmark_assest,\n 'vol':volatility_year,\n 'benchmark_vol':benchmark_volatility_year,\n 'sharpe':sharpe,\n 'alpha':alpha,\n 'beta':beta,\n 'max_drop':max_drop,\n 'win_rate':win_rate}\n \"\"\"\n try:\n # 1.可用资金占当前总资产比重\n risk_account_freeCash_currentAssest = QA_risk_account_freeCash_currentAssest(\n float(account['assest_free']), float(account['assest_now']))\n # 2.当前策略速动比率(流动资产/流动负债)\n risk_account_freeCash_initAssest = QA_risk_account_freeCash_initAssest(\n account['assest_free'], account['init_assest'])\n risk_account_freeCash_frozenAssest = QA_risk_account_freeCash_frozenAssest(\n float(account['assest_free']), float(account['assest_fix']))\n\n return {\"\"}\n\n except:\n QA_util_log_expection('error in risk evaluation')\n\n\ndef QA_risk_account_freeCash_initAssest(freeCash, initAssest):\n try:\n result = float(freeCash) / float(initAssest)\n return result\n except:\n return 0\n QA_util_log_expection('error in QA_risk_account_freeCash_initAssest')\n QA_util_log_expection('freeCash: ' + str(freeCash))\n QA_util_log_expection('currentAssest: ' + str(initAssest))\n QA_util_log_expection('expected result: ' +\n str(float(freeCash) / float(initAssest)))\n\n\ndef QA_risk_account_freeCash_currentAssest(freeCash, currentAssest):\n try:\n result = float(freeCash) / float(currentAssest)\n return result\n except:\n return 0\n QA_util_log_expection(\n 'error in QA_risk_account_freeCash_currentAssest')\n QA_util_log_expection('freeCash: ' + str(freeCash))\n QA_util_log_expection('currentAssest: ' + str(currentAssest))\n QA_util_log_expection('expected result: ' +\n str(float(freeCash) / float(currentAssest)))\n\n\ndef QA_risk_account_freeCash_frozenAssest(freeCash, frozenAssest):\n try:\n result = float(freeCash) / float(frozenAssest)\n return result\n except:\n return 0\n QA_util_log_expection('error in QA_risk_account_freeCash_frozenAssest')\n QA_util_log_expection('freeCash: ' + str(freeCash))\n QA_util_log_expection('currentAssest: ' + str(frozenAssest))\n QA_util_log_expection('expected result: ' +\n str(float(freeCash) / float(frozenAssest)))\n\n\ndef QA_risk_calc_assets(trade_history, assets):\n assets_d = []\n trade_date = []\n for i in range(0, len(trade_history), 1):\n if trade_history[i][0] not in trade_date:\n trade_date.append(trade_history[i][0])\n assets_d.append(assets[i])\n else:\n assets_d.pop(-1)\n assets_d.append(assets[i])\n\n return assets_d\n\n\ndef QA_risk_result_check(datelist, message):\n pass\n\n\ndef QA_risk_calc_benchmark(benchmark_data, init_assets):\n\n return list(benchmark_data['close'] / float(benchmark_data['open'][0]) * float(init_assets))\n\n\ndef QA_risk_calc_alpha(annualized_returns, benchmark_annualized_returns, beta, r):\n\n alpha = (annualized_returns - r) - (beta) * \\\n (benchmark_annualized_returns - r)\n return alpha\n\n\ndef QA_risk_calc_beta(assest_profit, benchmark_profit):\n if len(assest_profit) < len(benchmark_profit):\n for i in range(0, len(benchmark_profit) - len(assest_profit), 1):\n assest_profit.append(0)\n elif len(assest_profit) > len(benchmark_profit):\n for i in range(0, len(assest_profit) - len(benchmark_profit), 1):\n benchmark_profit.append(0)\n calc_cov = numpy.cov(assest_profit, benchmark_profit)\n beta = calc_cov[0, 1] / calc_cov[1, 1]\n return beta\n\n\ndef QA_risk_calc_profit(assest_history):\n return (assest_history[-1] / assest_history[1]) - 1\n\n\ndef QA_risk_calc_profit_per_year(assest_history, days):\n return math.pow(float(assest_history[-1]) / float(assest_history[0]), 250.0 / float(days)) - 1.0\n\n\ndef QA_risk_calc_profit_matrix(assest_history):\n assest_profit = []\n if len(assest_history) > 1:\n assest_profit = [assest_history[i + 1] / assest_history[i] -\n 1.0 for i in range(len(assest_history) - 1)]\n return assest_profit\n\n\ndef QA_risk_calc_volatility(assest_profit_matrix):\n # 策略每日收益的年化标准差\n assest_profit = assest_profit_matrix\n\n volatility_day = numpy.std(assest_profit)\n volatility_year = volatility_day * math.sqrt(250)\n return volatility_year\n\n\ndef QA_risk_calc_dropback_max(history):\n drops = []\n for i in range(1, len(history), 1):\n maxs = max(history[:i])\n cur = history[i - 1]\n drop = 1 - cur / maxs\n drops.append(drop)\n max_drop = max(drops)\n return max_drop\n\n\ndef QA_risk_calc_sharpe(annualized_returns, r, volatility_year):\n '计算夏普比率'\n return (annualized_returns - r) / volatility_year\n\n\ndef QA_risk_calc_trade_date(history):\n '计算交易日期'\n trade_date = []\n\n # trade_date_sse.index(history[-1][0])-trade_date_sse.index(history[0][0])\n for i in range(0, len(history), 1):\n if history[i][0] not in trade_date:\n trade_date.append(history[i][0])\n return trade_date\n\n\ndef QA_risk_calc_trade_time_profit():\n pass\n\n\ndef QA_risk_calc_trade_time_loss():\n pass\n\n\ndef QA_risk_calc_win_rate(profit_day):\n # 大于0的次数\n abovez = 0\n belowz = 0\n for i in range(0, len(profit_day) - 1, 1):\n if profit_day[i] > 0:\n abovez = abovez + 1\n elif profit_day[i] < 0:\n belowz = belowz + 1\n if belowz == 0:\n belowz = 1\n if abovez == 0:\n abovez = 1\n win_rate = abovez / (abovez + belowz)\n return win_rate\n\n\nclass QA_Risk():\n pass\n"
]
| [
[
"numpy.std",
"numpy.cov"
]
]
|
ZDisket/TensorflowTTS | [
"ca5032422b1436320ac2954fee0eae82c1a4218d"
]
| [
"examples/multiband_melgan/train_multiband_melgan.py"
]
| [
"# -*- coding: utf-8 -*-\n# Copyright 2020 Minh Nguyen (@dathudeptrai)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Train Multi-Band MelGAN.\"\"\"\n\nimport tensorflow as tf\n\nphysical_devices = tf.config.list_physical_devices(\"GPU\")\nfor i in range(len(physical_devices)):\n tf.config.experimental.set_memory_growth(physical_devices[i], True)\n\nimport sys\n\nsys.path.append(\".\")\n\nimport argparse\nimport logging\nimport os\n\nimport numpy as np\nimport soundfile as sf\nimport yaml\nfrom tensorflow.keras.mixed_precision import experimental as mixed_precision\n\nimport tensorflow_tts\nfrom examples.melgan.audio_mel_dataset import AudioMelDataset\nfrom examples.melgan.train_melgan import MelganTrainer, collater\nfrom tensorflow_tts.configs import (\n MultiBandMelGANDiscriminatorConfig,\n MultiBandMelGANGeneratorConfig,\n)\nfrom tensorflow_tts.losses import TFMultiResolutionSTFT\nfrom tensorflow_tts.models import (\n TFPQMF,\n TFMelGANGenerator,\n TFMelGANMultiScaleDiscriminator,\n)\nfrom tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy\n\n\nclass MultiBandMelganTrainer(MelganTrainer):\n \"\"\"Multi-Band MelGAN Trainer class based on MelganTrainer.\"\"\"\n\n def __init__(\n self,\n config,\n strategy,\n steps=0,\n epochs=0,\n is_generator_mixed_precision=False,\n is_discriminator_mixed_precision=False,\n ):\n \"\"\"Initialize trainer.\n\n Args:\n steps (int): Initial global steps.\n epochs (int): Initial global epochs.\n config (dict): Config dict loaded from yaml format configuration file.\n is_generator_mixed_precision (bool): Use mixed precision for generator or not.\n is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.\n\n \"\"\"\n super(MultiBandMelganTrainer, self).__init__(\n config=config,\n steps=steps,\n epochs=epochs,\n strategy=strategy,\n is_generator_mixed_precision=is_generator_mixed_precision,\n is_discriminator_mixed_precision=is_discriminator_mixed_precision,\n )\n\n # define metrics to aggregates data and use tf.summary logs them\n self.list_metrics_name = [\n \"adversarial_loss\",\n \"subband_spectral_convergence_loss\",\n \"subband_log_magnitude_loss\",\n \"fullband_spectral_convergence_loss\",\n \"fullband_log_magnitude_loss\",\n \"gen_loss\",\n \"real_loss\",\n \"fake_loss\",\n \"dis_loss\",\n ]\n\n self.init_train_eval_metrics(self.list_metrics_name)\n self.reset_states_train()\n self.reset_states_eval()\n\n def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer, pqmf):\n super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)\n # define loss\n self.sub_band_stft_loss = TFMultiResolutionSTFT(\n **self.config[\"subband_stft_loss_params\"]\n )\n self.full_band_stft_loss = TFMultiResolutionSTFT(\n **self.config[\"stft_loss_params\"]\n )\n\n # define pqmf module\n self.pqmf = pqmf\n\n def compute_per_example_generator_losses(self, batch, outputs):\n \"\"\"Compute per example generator losses and return dict_metrics_losses\n Note that all element of the loss MUST has a shape [batch_size] and \n the keys of dict_metrics_losses MUST be in self.list_metrics_name.\n\n Args:\n batch: dictionary batch input return from dataloader\n outputs: outputs of the model\n \n Returns:\n per_example_losses: per example losses for each GPU, shape [B]\n dict_metrics_losses: dictionary loss.\n \"\"\"\n dict_metrics_losses = {}\n per_example_losses = 0.0\n\n audios = batch[\"audios\"]\n y_mb_hat = outputs\n y_hat = self.pqmf.synthesis(y_mb_hat)\n\n y_mb = self.pqmf.analysis(tf.expand_dims(audios, -1))\n y_mb = tf.transpose(y_mb, (0, 2, 1)) # [B, subbands, T//subbands]\n y_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1])) # [B * subbands, T']\n\n y_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1)) # [B, subbands, T//subbands]\n y_mb_hat = tf.reshape(\n y_mb_hat, (-1, tf.shape(y_mb_hat)[-1])\n ) # [B * subbands, T']\n\n # calculate sub/full band spectral_convergence and log mag loss.\n sub_sc_loss, sub_mag_loss = calculate_2d_loss(\n y_mb, y_mb_hat, self.sub_band_stft_loss\n )\n sub_sc_loss = tf.reduce_mean(\n tf.reshape(sub_sc_loss, [-1, self.pqmf.subbands]), -1\n )\n sub_mag_loss = tf.reduce_mean(\n tf.reshape(sub_mag_loss, [-1, self.pqmf.subbands]), -1\n )\n full_sc_loss, full_mag_loss = calculate_2d_loss(\n audios, tf.squeeze(y_hat, -1), self.full_band_stft_loss\n )\n\n # define generator loss\n gen_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (\n full_sc_loss + full_mag_loss\n )\n\n # init adv_loss\n adv_loss = tf.zeros(shape=tf.shape(gen_loss), dtype=tf.float32)\n\n if self._gen_optimizer.iterations >= self.config[\"discriminator_train_start_steps\"]:\n p_hat = self._discriminator(y_hat)\n p = self._discriminator(tf.expand_dims(audios, 2))\n adv_loss = 0.0\n for i in range(len(p_hat)):\n adv_loss += calculate_3d_loss(\n tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss\n )\n adv_loss /= i + 1\n gen_loss += self.config[\"lambda_adv\"] * adv_loss\n\n dict_metrics_losses.update({\"gen_loss\": gen_loss})\n dict_metrics_losses.update({\"subband_spectral_convergence_loss\": sub_sc_loss})\n dict_metrics_losses.update({\"subband_log_magnitude_loss\": sub_mag_loss})\n dict_metrics_losses.update({\"fullband_spectral_convergence_loss\": full_sc_loss})\n dict_metrics_losses.update({\"fullband_log_magnitude_loss\": full_mag_loss})\n dict_metrics_losses.update({\"adversarial_loss\": adv_loss})\n\n per_example_losses = gen_loss\n return per_example_losses, dict_metrics_losses\n\n def compute_per_example_discriminator_losses(self, batch, gen_outputs):\n \"\"\"Compute per example discriminator losses and return dict_metrics_losses\n Note that all element of the loss MUST has a shape [batch_size] and \n the keys of dict_metrics_losses MUST be in self.list_metrics_name.\n\n Args:\n batch: dictionary batch input return from dataloader\n outputs: outputs of the model\n \n Returns:\n per_example_losses: per example losses for each GPU, shape [B]\n dict_metrics_losses: dictionary loss.\n \"\"\"\n y_mb_hat = gen_outputs\n y_hat = self.pqmf.synthesis(y_mb_hat)\n (\n per_example_losses,\n dict_metrics_losses,\n ) = super().compute_per_example_discriminator_losses(batch, y_hat)\n return per_example_losses, dict_metrics_losses\n\n def generate_and_save_intermediate_result(self, batch):\n \"\"\"Generate and save intermediate result.\"\"\"\n import matplotlib.pyplot as plt\n\n y_mb_batch_ = self.one_step_predict(batch) # [B, T // subbands, subbands]\n y_batch = batch[\"audios\"]\n utt_ids = batch[\"utt_ids\"]\n\n # convert to tensor.\n # here we just take a sample at first replica.\n try:\n y_mb_batch_ = y_mb_batch_.values[0].numpy()\n y_batch = y_batch.values[0].numpy()\n utt_ids = utt_ids.values[0].numpy()\n except Exception:\n y_mb_batch_ = y_mb_batch_.numpy()\n y_batch = y_batch.numpy()\n utt_ids = utt_ids.numpy()\n\n y_batch_ = self.pqmf.synthesis(y_mb_batch_).numpy() # [B, T, 1]\n\n # check directory\n dirname = os.path.join(self.config[\"outdir\"], f\"predictions/{self.steps}steps\")\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):\n # convert to ndarray\n y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()\n\n # plit figure and save it\n utt_id = utt_ids[idx]\n figname = os.path.join(dirname, f\"{utt_id}.png\")\n plt.subplot(2, 1, 1)\n plt.plot(y)\n plt.title(\"groundtruth speech\")\n plt.subplot(2, 1, 2)\n plt.plot(y_)\n plt.title(f\"generated speech @ {self.steps} steps\")\n plt.tight_layout()\n plt.savefig(figname)\n plt.close()\n\n # save as wavefile\n y = np.clip(y, -1, 1)\n y_ = np.clip(y_, -1, 1)\n sf.write(\n figname.replace(\".png\", \"_ref.wav\"),\n y,\n self.config[\"sampling_rate\"],\n \"PCM_16\",\n )\n sf.write(\n figname.replace(\".png\", \"_gen.wav\"),\n y_,\n self.config[\"sampling_rate\"],\n \"PCM_16\",\n )\n\n\ndef main():\n \"\"\"Run training process.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)\"\n )\n parser.add_argument(\n \"--train-dir\",\n default=None,\n type=str,\n help=\"directory including training data. \",\n )\n parser.add_argument(\n \"--dev-dir\",\n default=None,\n type=str,\n help=\"directory including development data. \",\n )\n parser.add_argument(\n \"--use-norm\", default=1, type=int, help=\"use norm mels for training or raw.\"\n )\n parser.add_argument(\n \"--outdir\", type=str, required=True, help=\"directory to save checkpoints.\"\n )\n parser.add_argument(\n \"--config\", type=str, required=True, help=\"yaml format configuration file.\"\n )\n parser.add_argument(\n \"--resume\",\n default=\"\",\n type=str,\n nargs=\"?\",\n help='checkpoint file path to resume training. (default=\"\")',\n )\n parser.add_argument(\n \"--verbose\",\n type=int,\n default=1,\n help=\"logging level. higher is more logging. (default=1)\",\n )\n parser.add_argument(\n \"--generator_mixed_precision\",\n default=0,\n type=int,\n help=\"using mixed precision for generator or not.\",\n )\n parser.add_argument(\n \"--discriminator_mixed_precision\",\n default=0,\n type=int,\n help=\"using mixed precision for discriminator or not.\",\n )\n parser.add_argument(\n \"--pretrained\",\n default=\"\",\n type=str,\n nargs=\"?\",\n help=\"path of .h5 mb-melgan generator to load weights from\",\n )\n args = parser.parse_args()\n\n # return strategy\n STRATEGY = return_strategy()\n\n # set mixed precision config\n if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:\n tf.config.optimizer.set_experimental_options({\"auto_mixed_precision\": True})\n\n args.generator_mixed_precision = bool(args.generator_mixed_precision)\n args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)\n\n args.use_norm = bool(args.use_norm)\n\n # set logger\n if args.verbose > 1:\n logging.basicConfig(\n level=logging.DEBUG,\n stream=sys.stdout,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n elif args.verbose > 0:\n logging.basicConfig(\n level=logging.INFO,\n stream=sys.stdout,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n level=logging.WARN,\n stream=sys.stdout,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n logging.warning(\"Skip DEBUG/INFO messages\")\n\n # check directory existence\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n # check arguments\n if args.train_dir is None:\n raise ValueError(\"Please specify --train-dir\")\n if args.dev_dir is None:\n raise ValueError(\"Please specify either --valid-dir\")\n\n # load and save config\n with open(args.config) as f:\n config = yaml.load(f, Loader=yaml.Loader)\n config.update(vars(args))\n config[\"version\"] = tensorflow_tts.__version__\n with open(os.path.join(args.outdir, \"config.yml\"), \"w\") as f:\n yaml.dump(config, f, Dumper=yaml.Dumper)\n for key, value in config.items():\n logging.info(f\"{key} = {value}\")\n\n # get dataset\n if config[\"remove_short_samples\"]:\n mel_length_threshold = config[\"batch_max_steps\"] // config[\n \"hop_size\"\n ] + 2 * config[\"multiband_melgan_generator_params\"].get(\"aux_context_window\", 0)\n else:\n mel_length_threshold = None\n\n if config[\"format\"] == \"npy\":\n audio_query = \"*-wave.npy\"\n mel_query = \"*-raw-feats.npy\" if args.use_norm is False else \"*-norm-feats.npy\"\n audio_load_fn = np.load\n mel_load_fn = np.load\n else:\n raise ValueError(\"Only npy are supported.\")\n\n # define train/valid dataset\n train_dataset = AudioMelDataset(\n root_dir=args.train_dir,\n audio_query=audio_query,\n mel_query=mel_query,\n audio_load_fn=audio_load_fn,\n mel_load_fn=mel_load_fn,\n mel_length_threshold=mel_length_threshold,\n ).create(\n is_shuffle=config[\"is_shuffle\"],\n map_fn=lambda items: collater(\n items,\n batch_max_steps=tf.constant(config[\"batch_max_steps\"], dtype=tf.int32),\n hop_size=tf.constant(config[\"hop_size\"], dtype=tf.int32),\n ),\n allow_cache=config[\"allow_cache\"],\n batch_size=config[\"batch_size\"]\n * STRATEGY.num_replicas_in_sync\n * config[\"gradient_accumulation_steps\"],\n )\n\n valid_dataset = AudioMelDataset(\n root_dir=args.dev_dir,\n audio_query=audio_query,\n mel_query=mel_query,\n audio_load_fn=audio_load_fn,\n mel_load_fn=mel_load_fn,\n mel_length_threshold=mel_length_threshold,\n ).create(\n is_shuffle=config[\"is_shuffle\"],\n map_fn=lambda items: collater(\n items,\n batch_max_steps=tf.constant(\n config[\"batch_max_steps_valid\"], dtype=tf.int32\n ),\n hop_size=tf.constant(config[\"hop_size\"], dtype=tf.int32),\n ),\n allow_cache=config[\"allow_cache\"],\n batch_size=config[\"batch_size\"] * STRATEGY.num_replicas_in_sync,\n )\n\n # define trainer\n trainer = MultiBandMelganTrainer(\n steps=0,\n epochs=0,\n config=config,\n strategy=STRATEGY,\n is_generator_mixed_precision=args.generator_mixed_precision,\n is_discriminator_mixed_precision=args.discriminator_mixed_precision,\n )\n\n with STRATEGY.scope():\n # define generator and discriminator\n generator = TFMelGANGenerator(\n MultiBandMelGANGeneratorConfig(\n **config[\"multiband_melgan_generator_params\"]\n ),\n name=\"multi_band_melgan_generator\",\n )\n\n discriminator = TFMelGANMultiScaleDiscriminator(\n MultiBandMelGANDiscriminatorConfig(\n **config[\"multiband_melgan_discriminator_params\"]\n ),\n name=\"multi_band_melgan_discriminator\",\n )\n\n pqmf = TFPQMF(\n MultiBandMelGANGeneratorConfig(\n **config[\"multiband_melgan_generator_params\"]\n ),\n dtype=tf.float32,\n name=\"pqmf\",\n )\n\n # dummy input to build model.\n fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)\n y_mb_hat = generator(fake_mels)\n y_hat = pqmf.synthesis(y_mb_hat)\n discriminator(y_hat)\n\n if len(args.pretrained) > 1:\n generator.load_weights(args.pretrained)\n logging.info(\n f\"Successfully loaded pretrained weight from {args.pretrained}.\"\n )\n\n generator.summary()\n discriminator.summary()\n\n # define optimizer\n generator_lr_fn = getattr(\n tf.keras.optimizers.schedules, config[\"generator_optimizer_params\"][\"lr_fn\"]\n )(**config[\"generator_optimizer_params\"][\"lr_params\"])\n discriminator_lr_fn = getattr(\n tf.keras.optimizers.schedules,\n config[\"discriminator_optimizer_params\"][\"lr_fn\"],\n )(**config[\"discriminator_optimizer_params\"][\"lr_params\"])\n\n gen_optimizer = tf.keras.optimizers.Adam(\n learning_rate=generator_lr_fn,\n amsgrad=config[\"generator_optimizer_params\"][\"amsgrad\"],\n )\n dis_optimizer = tf.keras.optimizers.Adam(\n learning_rate=discriminator_lr_fn,\n amsgrad=config[\"discriminator_optimizer_params\"][\"amsgrad\"],\n )\n\n _ = gen_optimizer.iterations\n _ = dis_optimizer.iterations\n\n trainer.compile(\n gen_model=generator,\n dis_model=discriminator,\n gen_optimizer=gen_optimizer,\n dis_optimizer=dis_optimizer,\n pqmf=pqmf,\n )\n\n # start training\n try:\n trainer.fit(\n train_dataset,\n valid_dataset,\n saved_path=os.path.join(config[\"outdir\"], \"checkpoints/\"),\n resume=args.resume,\n )\n except KeyboardInterrupt:\n trainer.save_checkpoint()\n logging.info(f\"Successfully saved checkpoint @ {trainer.steps}steps.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.shape",
"matplotlib.pyplot.savefig",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.squeeze",
"matplotlib.pyplot.tight_layout",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.subplot",
"tensorflow.expand_dims",
"matplotlib.pyplot.title",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.random.uniform",
"matplotlib.pyplot.close",
"numpy.clip",
"matplotlib.pyplot.plot",
"tensorflow.config.list_physical_devices",
"tensorflow.config.optimizer.set_experimental_options"
]
]
|
enricopisoni/SHERPA-training | [
"f27ef7b0b69d45e3e637061023c3932c8cfff296"
]
| [
"sherpa/read_scenarios/computeDistanceRatio.py"
]
| [
"import geopy.distance as gpd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef computeDistanceRatio(conf):\n if conf.distance == 1:\n #compute step for lat and lon\n lat = np.unique(conf.y)\n lon = np.unique(conf.x)\n step_lat = lat[1] - lat[0]\n step_lon = lon[1] - lon[0]\n #overwrite lon variable to compute how delta lat and lon change\n lon = np.repeat(10, np.size(lat)) # overwtie lonrepeat a given value, as lon does not affect result\n res_delta_lat = [gpd.distance((lat[i],lon[i]),(lat[i]+step_lat,lon[i])).km for i in range(0,np.size(lat))]\n res_delta_lon = [gpd.distance((lat[i],lon[i]),(lat[i],lon[i]+step_lon)).km for i in range(0,np.size(lat))]\n #compute ratio lat lon\n res_delta_lat_array = np.array(res_delta_lat).reshape(-1, 1)\n res_delta_lon_array = np.array(res_delta_lon).reshape(-1, 1)\n ratio_lat_lon = res_delta_lat_array / res_delta_lon_array\n #compute polynomial to approximate how ratio_lat_lon changes with lat, and save it\n ratio_pol = np.polyfit(lat, ratio_lat_lon,5)\n conf.ratioPoly = ratio_pol\n elif conf.distance == 0:\n conf.ratioPoly = np.array([0,0,0,0,0,1])\n return conf\n"
]
| [
[
"numpy.array",
"numpy.size",
"numpy.polyfit",
"numpy.unique"
]
]
|
SijanC147/Msc | [
"08a6ae6c8755a9a2392d441d8b84cfbb83bee0bf"
]
| [
"tsaplay/scripts/client.py"
]
| [
"from argparse import ArgumentParser\nfrom csv import DictReader\nfrom grpc import insecure_channel\n\nfrom tensorflow import string as tf_string\nfrom tensorflow.contrib.util import make_tensor_proto # pylint: disable=E0611\nfrom tensorflow.train import Example, Features, Feature, BytesList\nfrom tensorflow_serving.apis.input_pb2 import Input, ExampleList\nfrom tensorflow_serving.apis.predict_pb2 import PredictRequest\nfrom tensorflow_serving.apis.classification_pb2 import ClassificationRequest\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nfrom tsaplay.features import FeatureProvider\n\n\ndef parse_args():\n\n parser = ArgumentParser(\n description=\"Run targeted sentiment analysis on a sentence.\"\n )\n\n parser.add_argument(\n \"--model\",\n \"-m\",\n dest=\"model\",\n type=str,\n required=True,\n help=\"Which model to use\",\n )\n parser.add_argument(\n \"--signature\",\n \"-sig\",\n dest=\"signature\",\n default=\"inspect\",\n type=str,\n required=False,\n help=\"Specific TF Serving signature to query.\",\n )\n parser.add_argument(\n \"--batch_file\",\n \"-b\",\n dest=\"batch_file\",\n type=str,\n required=False,\n help=\"Process batch of targets,phrases in file\",\n )\n parser.add_argument(\n \"--sentence\",\n \"-s\",\n dest=\"sentence\",\n type=str,\n required=False,\n help=\"Sentence to analyze, must contain the target\",\n )\n parser.add_argument(\n \"--target\",\n \"-t\",\n dest=\"target\",\n type=str,\n required=False,\n help=\"Target to focus on, must be in the sentence\",\n )\n\n args = parser.parse_args()\n\n return (\n args.model,\n args.signature,\n args.batch_file,\n args.sentence,\n args.target,\n )\n\n\ndef byte_encode_array(array):\n return [a.encode() for a in array]\n\n\ndef main():\n\n model, signature, batch_file_path, sentence, target = parse_args()\n\n feat_dict = {\"sentences\": [], \"targets\": []}\n\n if batch_file_path is not None:\n with open(batch_file_path, \"r\") as batch_file:\n fieldnames = [\"target\", \"sentence\"]\n csvreader = DictReader(batch_file, fieldnames=fieldnames)\n for row in csvreader:\n feat_dict[\"targets\"].append(row[\"target\"].strip())\n feat_dict[\"sentences\"].append(row[\"sentence\"].strip())\n else:\n feat_dict[\"targets\"].append(target)\n feat_dict[\"sentences\"].append(sentence)\n\n l_ctxts, trgs, r_ctxts = FeatureProvider.partition_sentences(\n sentences=feat_dict[\"sentences\"],\n targets=feat_dict[\"targets\"],\n offsets=FeatureProvider.get_target_offset_array(feat_dict),\n )\n l_enc = [\n FeatureProvider.tf_encode_tokens(tokens)\n for tokens in FeatureProvider.tokenize_phrases(l_ctxts)\n ]\n trg_enc = [\n FeatureProvider.tf_encode_tokens(tokens)\n for tokens in FeatureProvider.tokenize_phrases(trgs)\n ]\n r_enc = [\n FeatureProvider.tf_encode_tokens(tokens)\n for tokens in FeatureProvider.tokenize_phrases(r_ctxts)\n ]\n\n tf_examples = []\n\n for left, target, right in zip(l_enc, trg_enc, r_enc):\n features = Features(\n feature={\n \"left\": Feature(bytes_list=BytesList(value=left)),\n \"target\": Feature(bytes_list=BytesList(value=target)),\n \"right\": Feature(bytes_list=BytesList(value=right)),\n }\n )\n tf_example = Example(features=features)\n tf_examples.append(tf_example.SerializeToString())\n\n tensor_proto = make_tensor_proto(\n tf_examples, dtype=tf_string, shape=[len(tf_examples)]\n )\n\n channel = insecure_channel(\"127.0.0.1:8500\")\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n\n # CLASSIFICATION\n classification_req = ClassificationRequest()\n inputs = Input(example_list=ExampleList(examples=[tf_example]))\n classification_req.input.CopyFrom(inputs) # pylint: disable=E1101\n classification_req.model_spec.name = \"lg\" # pylint: disable=E1101\n classification = stub.Classify(classification_req, 60.0)\n print(classification)\n\n # PREDICTION\n prediction_req = PredictRequest()\n prediction_req.inputs[\"instances\"].CopyFrom( # pylint: disable=E1101\n tensor_proto\n )\n prediction_req.model_spec.signature_name = ( # pylint: disable=E1101\n signature\n )\n prediction_req.model_spec.name = model # pylint: disable=E1101\n prediction = stub.Predict(prediction_req, 60.0)\n print(prediction)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"tensorflow.train.Example",
"tensorflow.train.BytesList"
]
]
|
MontrealAI/recsim | [
"55e50e4be736d222ffe8c2477ed1981b40f91605",
"55e50e4be736d222ffe8c2477ed1981b40f91605"
]
| [
"recsim/agents/random_agent_test.py",
"recsim/choice_model_test.py"
]
| [
"# coding=utf-8\n# coding=utf-8\n# Copyright 2019 The RecSim Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for recsim.agents.random_agent.\"\"\"\n\nfrom gym import spaces\nimport numpy as np\nfrom recsim import choice_model\nfrom recsim.agents import random_agent\nfrom recsim.environments import interest_evolution as iev\nfrom recsim.environments import interest_exploration as ie\nfrom recsim.simulator import environment\nimport tensorflow.compat.v1 as tf\n\n\nclass RandomAgentTest(tf.test.TestCase):\n\n def setUp(self):\n super(RandomAgentTest, self).setUp()\n # The maximum length of videos in response\n iev.IEvResponse.MAX_VIDEO_LENGTH = 100.0\n\n # The number of features used to represent user state.\n iev.IEvUserState.NUM_FEATURES = 10\n\n # The number of features used to represent video.\n iev.IEvVideo.NUM_FEATURES = 10\n # The maximum length of videos\n iev.IEvVideo.MAX_VIDEO_LENGTH = 100.0\n\n def test_step(self):\n # Create a simple user\n slate_size = 2\n user_model = iev.IEvUserModel(\n slate_size,\n choice_model_ctor=choice_model.MultinomialLogitChoiceModel,\n response_model_ctor=iev.IEvResponse)\n\n # Create a candidate_set with 5 items\n num_candidates = 5\n document_sampler = iev.IEvVideoSampler()\n ievsim = environment.Environment(user_model, document_sampler,\n num_candidates, slate_size)\n\n # Create agent\n action_space = spaces.MultiDiscrete(num_candidates * np.ones((slate_size,)))\n agent = random_agent.RandomAgent(action_space, random_seed=0)\n\n # This agent doesn't use the previous user response\n observation, documents = ievsim.reset()\n slate = agent.step(1, dict(user=observation, doc=documents))\n self.assertAllEqual(slate, [2, 0])\n\n def test_slate_indices_and_length(self):\n # Initialize agent\n slate_size = 2\n num_candidates = 100\n action_space = spaces.MultiDiscrete(num_candidates * np.ones((slate_size,)))\n\n user_model = iev.IEvUserModel(\n slate_size,\n choice_model_ctor=choice_model.MultinomialLogitChoiceModel,\n response_model_ctor=iev.IEvResponse)\n agent = random_agent.RandomAgent(action_space, random_seed=0)\n\n # Create a set of documents\n document_sampler = iev.IEvVideoSampler()\n ievenv = environment.Environment(user_model, document_sampler,\n num_candidates, slate_size)\n\n # Test that slate indices in correct range and length is correct\n observation, documents = ievenv.reset()\n slate = agent.step(1, dict(user=observation, doc=documents))\n self.assertLen(slate, slate_size)\n self.assertAllInSet(slate, range(num_candidates))\n\n def test_bundle_and_unbundle_trivial(self):\n action_space = spaces.MultiDiscrete(np.ones((1,)))\n agent = random_agent.RandomAgent(action_space, random_seed=0)\n self.assertFalse(agent.unbundle('', 0, {}))\n self.assertEqual({\n 'episode_num': 0\n }, agent.bundle_and_checkpoint('', 0))\n\n def test_bundle_and_unbundle(self):\n # Initialize agent\n slate_size = 1\n num_candidates = 3\n action_space = spaces.MultiDiscrete(num_candidates * np.ones((slate_size,)))\n\n user_model = ie.IEUserModel(\n slate_size,\n user_state_ctor=ie.IEUserState,\n response_model_ctor=ie.IEResponse)\n agent = random_agent.RandomAgent(action_space, random_seed=0)\n\n # Create a set of documents\n document_sampler = ie.IETopicDocumentSampler()\n documents = {}\n for i in range(num_candidates):\n video = document_sampler.sample_document()\n documents[i] = video.create_observation()\n\n # Test that slate indices in correct range and length is correct\n observation = dict(user=user_model.create_observation(), doc=documents)\n agent.step(1, observation)\n\n bundle_dict = agent.bundle_and_checkpoint('', 0)\n self.assertTrue(agent.unbundle('', 0, bundle_dict))\n self.assertEqual(bundle_dict, agent.bundle_and_checkpoint('', 0))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# coding=utf-8\n# Copyright 2019 The RecSim Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for recsim.choice_model.\"\"\"\n\nimport numpy as np\nfrom recsim import choice_model\nfrom recsim.environments import interest_evolution as evolution\nimport tensorflow.compat.v1 as tf\n\n\nclass SoftmaxTest(tf.test.TestCase):\n\n def test_softmax_single_int(self):\n self.assertAllClose(choice_model.softmax([0]), [1.0])\n\n def test_softmax_equal_ints(self):\n self.assertAllClose(\n choice_model.softmax(np.ones(4)), np.array([0.25, 0.25, 0.25, 0.25]))\n\n def test_softmax_positive_floats(self):\n self.assertAllClose(\n choice_model.softmax(np.log(np.arange(1, 5))),\n np.array([0.1, 0.2, 0.3, 0.4]))\n\n def test_softmax_negative_floats(self):\n self.assertAllClose(\n choice_model.softmax(-1.0 * np.log(np.arange(1, 5))),\n np.array([0.48, 0.24, 0.16, 0.12]))\n\n\nclass MultinomialChoiceModelTest(tf.test.TestCase):\n\n def setUp(self):\n super(MultinomialChoiceModelTest, self).setUp()\n np.random.seed(0)\n self._user_state = evolution.IEvUserState(np.array([0.8, 0.6]))\n\n def test_multinomial_logit_default(self):\n mnl_model = choice_model.MultinomialLogitChoiceModel(choice_features={})\n mnl_model.score_documents(\n self._user_state, np.array([[0.8, 0.6], [0.6, 0.8]]))\n # The logits for two documents are 1 and 0.96 respectively. When computing\n # softmax logits, we subtract the largest value, which is 1 here. So the\n # score is softmax([0, -0.04]) = [0.51, 0.49]\n self.assertAlmostEqual(mnl_model._scores[0], 0.510, delta=0.001)\n self.assertAlmostEqual(mnl_model._scores[1], 0.490, delta=0.001)\n self.assertEqual(mnl_model._score_no_click, 0)\n\n def test_multinomial_logit_no_click_mass(self):\n choice_features = dict(no_click_mass=1.0)\n mnl_model = choice_model.MultinomialLogitChoiceModel(\n choice_features=choice_features)\n # The logits for two documents are 1 and 0.96 respectively. No click mass\n # is 1.0. When computing\n # softmax logits, we subtract the largest value, which is 1 here. So the\n # score is softmax([0, -0.04, 0]) = [0.337, 0.325, 0.338]\n mnl_model.score_documents(\n self._user_state, np.array([[0.8, 0.6], [0.6, 0.8]]))\n self.assertAlmostEqual(mnl_model._scores[0], 0.338, delta=0.001)\n self.assertAlmostEqual(mnl_model._scores[1], 0.325, delta=0.001)\n self.assertAlmostEqual(mnl_model._score_no_click, 0.338, delta=0.001)\n\n def test_multinomial_proportion_choice_model_default(self):\n choice_features = dict(min_normalizer=0)\n mnp_model = choice_model.MultinomialProportionalChoiceModel(\n choice_features=choice_features)\n mnp_model.score_documents(\n self._user_state, np.array([[0.8, 0.6], [0.6, 0.8]]))\n # The scores are the dot product between user features and doc features.\n self.assertAlmostEqual(mnp_model._scores[0], 1, delta=0.001)\n self.assertAlmostEqual(mnp_model._scores[1], 0.96, delta=0.001)\n self.assertEqual(mnp_model._score_no_click, 0)\n\n def test_multinomial_proportion_min_normalizer(self):\n choice_features = dict(min_normalizer=0.5, no_click_mass=0.5)\n mnp_model = choice_model.MultinomialProportionalChoiceModel(\n choice_features=choice_features)\n mnp_model.score_documents(\n self._user_state, np.array([[0.8, 0.6], [0.6, 0.8]]))\n # The scores the dot product user features and doc features minus the\n # min_normalizer.\n self.assertAlmostEqual(mnp_model._scores[0], 0.5, delta=0.001)\n self.assertAlmostEqual(mnp_model._scores[1], 0.46, delta=0.001)\n self.assertAlmostEqual(mnp_model._score_no_click, 0, delta=0.001)\n\n\nclass CascadeChoiceModelTest(tf.test.TestCase):\n\n def setUp(self):\n super(CascadeChoiceModelTest, self).setUp()\n np.random.seed(0)\n self._user_state = evolution.IEvUserState(np.array([1.0]))\n\n def test_exponential_cascade_invalid_score_scaling(self):\n with self.assertRaises(ValueError):\n choice_features = {'attention_prob': 0.8, 'score_scaling': -1.0}\n choice_model.ExponentialCascadeChoiceModel(choice_features)\n\n def test_exponential_cascade_invalid_attenion_prob(self):\n with self.assertRaises(ValueError):\n choice_features = {'attention_prob': 2.0}\n choice_model.ExponentialCascadeChoiceModel(choice_features)\n\n def test_exponential_cascade(self):\n choice_features = {'score_scaling': 0.04}\n model = choice_model.ExponentialCascadeChoiceModel(choice_features)\n model.score_documents(self._user_state, np.array([[3.0], [2.0], [1.0]]))\n self.assertEqual(model.choose_item(), 0)\n\n def test_exponential_cascade_with_no_click(self):\n choice_features = {'attention_prob': 1.0, 'score_scaling': 0.0}\n model = choice_model.ExponentialCascadeChoiceModel(choice_features)\n model.score_documents(self._user_state, np.array([[3.0], [2.0], [1.0]]))\n self.assertEqual(model.choose_item(), None)\n\n def test_proportional_cascade_invalid_attenion_prob(self):\n with self.assertRaises(ValueError):\n choice_features = {\n 'attention_prob': 2.0,\n 'min_normalizer': -2.0,\n 'score_scaling': 0.1\n }\n choice_model.ProportionalCascadeChoiceModel(choice_features)\n\n def test_proportional_cascade_invalid_score_scaling(self):\n with self.assertRaises(ValueError):\n choice_features = {\n 'attention_prob': 0.5,\n 'min_normalizer': -2.0,\n 'score_scaling': -1.0\n }\n choice_model.ProportionalCascadeChoiceModel(choice_features)\n\n def test_proportional_cascade(self):\n choice_features = {\n 'attention_prob': 1.0,\n 'min_normalizer': -4.0,\n 'score_scaling': 0.07\n }\n model = choice_model.ProportionalCascadeChoiceModel(choice_features)\n model.score_documents(self._user_state,\n np.array([[-3.0], [-3.0], [10.0], [1.0], [-4.0]]))\n self.assertEqual(model.choose_item(), 2)\n\n def test_proportional_cascade_with_no_click(self):\n choice_features = {\n 'attention_prob': 0.5,\n 'min_normalizer': -1.0,\n 'score_scaling': 0.1\n }\n model = choice_model.ProportionalCascadeChoiceModel(choice_features)\n model.score_documents(self._user_state, np.array([[0.0], [0.0], [0.0]]))\n self.assertEqual(model.choose_item(), None)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
]
| [
[
"numpy.ones",
"tensorflow.compat.v1.test.main"
],
[
"numpy.array",
"numpy.random.seed",
"numpy.ones",
"tensorflow.compat.v1.test.main",
"numpy.arange"
]
]
|
poldrack/psych10.github.io | [
"113a4fa96998383951f94829d53227ce0fdf7ba5"
]
| [
"syllabus/mk_syllabus.py"
]
| [
"\"\"\"\ngenerate markdown version of syllabus\n\"\"\"\n\nimport os\nimport collections\nimport re\nimport time\nimport pandas\n\nfrom get_syllabus2 import get_syllabus\n\n\ndef replacemany(adict, astring):\n # from https://stackoverflow.com/questions/2392623/replace-multiple-string-in-a-file # noqa\n pat = '|'.join(re.escape(s) for s in adict)\n there = re.compile(pat)\n def onerepl(mo): return adict[mo.group()]\n return there.sub(onerepl, astring)\n\n\nlecturebase = '../lectures'\nif not os.path.exists(lecturebase):\n os.mkdir(lecturebase)\n\nsyll = get_syllabus()\n\ndf = pandas.DataFrame(syll[1:],columns=syll[0])\ndf = df.loc[df.Week!='', :] # remove empty rows\n\n# columns to use for syllabus\nsyll_columns = ['Date', 'Topic', 'Reading']\n\n# save objectives to write to a separate file listing all of them\nobjectives = collections.OrderedDict()\n\noutfile = 'index.md'\nwith open(outfile, 'w') as f:\n f.write('---\\nlayout: default\\ntitle: Psych 10: Syllabus\\n---\\n') # noqa\n f.write('## Syllabus\\n\\nClick on the date for more information about each lecture\\n\\n') # noqa\n f.write('Detailed version of the full syllabus is available [here](../full_syllabus)\\n\\n') # noqa\n f.write('| '+'|'.join(syll_columns) + '|\\n')\n\n # create separator\n sep = []\n for i in range(len(syll_columns)):\n sep.append('---')\n f.write('| ' + '|'.join(sep) + '|\\n')\n\n # loop through rows\n lecturectr = 1\n for i in df.index:\n df_row = df.loc[i,:]\n\n if df_row.Topic.lower().find('no class') > -1:\n noclass = True\n else:\n noclass = False\n\n date = df_row.Date\n topic = '**' + df_row.Topic.replace('\\n', '<br>') + '**'\n if df_row.Reading is None:\n reading = ''\n else:\n reading = df_row.Reading.replace('\\n', '<br>')\n\n # create foldout detail\n\n # add expandable section with learning objectives and links\n details = ''\n if df_row['Learning Objectives'] is not None:\n learnobj = df_row['Learning Objectives'].split('\\n')\n if len(learnobj) > 0:\n details += '<details><br>Learning Objectives:<br><br>After this lecture, you should be able to:<br>' # noqa\n groupname = df_row.Topic.split(',')[0]\n if not groupname in objectives:\n objectives[groupname] = []\n for li, l in enumerate(learnobj):\n if len(l) == 0:\n continue\n objectives[groupname].append(l)\n details += '* %s<br>' % l\n print(details)\n\n if df_row['Links'] is not None:\n links = df_row['Links'].split('\\n')\n if len(links[0]) > 0:\n details += '<br>Links:<br>'\n for li, l in enumerate(links):\n details += '* %s<br>' % l\n if details is not '':\n details += '</details><br>'\n\n if noclass:\n rowcontent = [df_row.Date, '**' + df_row.Topic + '**', '']\n else:\n rowcontent = [\n df_row.Date,\n '**' + df_row.Topic + '**' + details,\n reading]\n\n f.write('| ' + '|'.join(rowcontent) + '|\\n')\n\n# make a fully expanded version of the syllabus\n\nadict = {'<details>': '<br>', '</details>': ''}\n\nshort_syllabus = open('index.md').readlines()\nif not os.path.exists('../full_syllabus'):\n os.mkdir('../full_syllabus')\n\nprospectus = open('../prospectus/index.md').readlines()\nssyl = open('index.md').readlines()\n\nwith open('../full_syllabus/index.md', 'w') as f:\n f.write('---\\nlayout: default\\ntitle: Psych 10: Full Syllabus\\n---\\n') # noqa\n f.write('Revised %s' % time.strftime(\"%m/%d/%Y\"))\n for l in prospectus[4:]:\n f.write(l)\n f.write('## Class Schedule\\n')\n for l in short_syllabus[9:]:\n f.write(replacemany(adict, l))\n\nif not os.path.exists(\"../objectives\"):\n os.mkdir('../objectives')\nwith open('../objectives/index.md', 'w') as f:\n f.write('---\\nlayout: default\\ntitle: Psych 10: Learning Objectives\\n---\\n') # noqa\n f.write('## Learning objectives\\n\\n')\n f.write('Students should be able to do each of the following by the end of this course:\\n\\n') # noqa\n\n for k in objectives.keys():\n if len(objectives[k]) == 0:\n continue\n f.write('\\n### %s\\n' % k)\n for o in objectives[k]:\n f.write('* %s\\n' % o)\n"
]
| [
[
"pandas.DataFrame"
]
]
|
hustvl/DGCN | [
"1baa977e27aa71992923861113f5cea9bb1fb98a"
]
| [
"dsrg_cues.py"
]
| [
"import numpy as np\r\nimport os, sys\r\nimport os.path as osp\r\nimport pylab\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.ndimage import zoom\r\nimport argparse\r\nimport cPickle\r\n# import pyDRFI\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport cv2\r\n\r\n# from model import novelmodel,FeatureExtractor\r\n\r\n# RESTORE_FROM = './model_vgg_cam_rdc.pth.tar'\r\nSAVE_PATH = './cues-sal/'\r\ndef parse_args():\r\n \"\"\"\r\n Parse input arguments\r\n \"\"\"\r\n parser = argparse.ArgumentParser(description='evaluate segmentation result')\r\n parser.add_argument('--voc', dest='voc_dir',\r\n help='ground truth dir',\r\n default='/workspace2/fengjp/data/JPEGImages/', type=str)\r\n parser.add_argument('--images', dest='image_ids',\r\n help='test ids file path',\r\n default='dataset/list/input_list.txt', type=str)\r\n # parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\r\n # help=\"Where restore model parameters from.\") \r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\n\r\ndef preprocess(image, size):\r\n mean_pixel = np.array([104.008, 116.669, 122.675])\r\n\r\n image = np.array(image)\r\n H, W, _ = image.shape\r\n image = zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)\r\n image = image - mean_pixel\r\n image = image.transpose([2, 0, 1])\r\n return image\r\n\r\ndef generate_cues(localization, cam_cues, labels):\r\n cues = np.zeros_like(cam_cues)\r\n cues[0, :, :] = cam_cues[0, :, :]\r\n # cues[0, :, :] = bg\r\n\r\n present_class_index = np.where(labels[1:] == 1)[0]\r\n sum_of_calss = np.sum(localization, axis=(1,2))\r\n sum_of_present_class = sum_of_calss[labels[1:]==1]\r\n index = sorted(range(len(sum_of_present_class)), key=lambda k: sum_of_present_class[k], reverse=True)\r\n for i in index:\r\n local_index = present_class_index[i]\r\n # index_map = np.where(localization[local_index] == 1)\r\n # cues[:, index_map[0], index_map[1]] = 0\r\n # cues[local_index+1, index_map[0], index_map[1]] = 1.0\r\n\r\n index_map = np.where(cam_cues[local_index+1] == 1)\r\n cues[:, index_map[0], index_map[1]] = 0\r\n cues[local_index+1, index_map[0], index_map[1]] = 1.0\r\n\r\n return cues\r\n\r\ndef save_to_pickle(cues_dict, filename):\r\n with open(filename, 'wb') as handle:\r\n cPickle.dump(cues_dict, handle, protocol=cPickle.HIGHEST_PROTOCOL)\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n\r\n # model = novelmodel()\r\n # model_weights = torch.load(args.restore_from)\r\n # model.load_state_dict(model_weights)\r\n # model.cuda()\r\n\r\n # DRFI = pyDRFI.pyDRFI()\r\n # DRFI.load('../drfi_cpp/drfiModelCpp.data')\r\n\r\n cues_dict = {}\r\n data_file = cPickle.load(open('dataset/localization_cues-sal.pickle'))\r\n image_ids = [i.strip().split() for i in open(args.image_ids) if not i.strip() == '']\r\n for (img_name, index) in image_ids:\r\n if int(index) % 100 == 0:\r\n print('%s processd'%(index))\r\n img_id = osp.splitext(img_name)[0]\r\n img_path = os.path.join(args.voc_dir, img_id+'.jpg')\r\n \r\n image = cv2.imread(img_path, cv2.IMREAD_COLOR)\r\n H, W, _ = image.shape\r\n # sal = DRFI.getSalMap(image)\r\n # sal = zoom(sal, (41.0 / H, 41.0 / W), order=1)\r\n # threshold = 0.07\r\n # bg = (sal < threshold)\r\n\r\n # image = Variable(torch.from_numpy(preprocess(image, 321.0)).cuda()).unsqueeze(0).float()\r\n # myexactor = FeatureExtractor(model)\r\n # feature,params = myexactor(image)\r\n\r\n heat_maps = np.zeros((4, 20, 41, 41))\r\n localization = np.zeros((20, 41, 41))\r\n\r\n # for i in range(4): \r\n # feature[i] = feature[i].squeeze(0)\r\n # for j in range(20):\r\n # w = params[i][j].cpu().detach().numpy()\r\n # heat_maps[i,j, :, :] = np.sum((feature[i][j].cpu().detach().numpy()) * w[:, None, None], axis=0)\r\n # heat_maps[i,j] = heat_maps[i,j] / np.max(heat_maps[i,j].flat)\r\n\r\n # heat_maps_final = np.zeros((20, 41, 41))\r\n\r\n # for i in range(20):\r\n # heat_maps_final[i] = heat_maps[0][i] + (heat_maps[1][i]+heat_maps[2][i]+heat_maps[3][i])/3.0\r\n # localization[i, :, :] = (heat_maps_final[i, :, :] > 0.7 * np.max(heat_maps_final[i]))\r\n cam_cues = np.zeros((21, 41, 41))\r\n labels = np.zeros((21,))\r\n labels_i = data_file['%i_labels' % int(index)]\r\n labels[labels_i] = 1.0\r\n cues_i = data_file['%i_cues' % int(index)]\r\n cam_cues[cues_i[0], cues_i[1], cues_i[2]] = 1.0\r\n\r\n cues = generate_cues(localization, cam_cues, labels)\r\n\r\n cues_dict['%i_labels' % int(index)] = labels_i\r\n cues_dict['%i_cues' % int(index)] = np.where(cues==1)\r\n # cues\r\n markers_new = np.zeros((41, 41))\r\n markers_new.fill(21)\r\n pos = np.where(cues == 1)\r\n markers_new[pos[1], pos[2]] = pos[0]\r\n markers_new = zoom(markers_new, (float(H)/41.0, float(W)/41.0), order=0)\r\n save_path = osp.join(SAVE_PATH,img_id+'.png')\r\n cv2.imwrite(save_path, markers_new)\r\n # save_to_pickle(cues_dict, 'localization_cues-0.7-0.07.pickle')\r\n"
]
| [
[
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.where"
]
]
|
transientlunatic/heron | [
"6b4951af3a74e69f0adaf1a339a1d4c460c6fae9"
]
| [
"tests/test_tests.py"
]
| [
"\"\"\"\nTests for the science testing code.\n\"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom heron import testing\nfrom elk.waveform import NRWaveform, Timeseries\nfrom elk.catalogue import NRCatalogue\n\nfrom heron.models.georgebased import HeronHodlr\n\nclass MockWaveform(NRWaveform):\n def timeseries(self,\n total_mass,\n sample_rate=4096,\n f_low=None,\n distance=1,\n coa_phase=0,\n ma=None,\n t_min=None,\n t_max=None,\n f_ref=None,\n t_align=True):\n return (Timeseries(data=np.random.randn(1000)*1e-19, times=np.linspace(t_min, t_max, 1000)),\n Timeseries(data=np.random.randn(1000)*1e-19, times=np.linspace(t_min, t_max, 1000)))\n\nclass TestTests(unittest.TestCase):\n \"\"\"\n Test the science testing code.\n \"\"\"\n\n def setUp(self):\n self.model = HeronHodlr()\n self.samples_catalogue = NRCatalogue(\"GeorgiaTech\")\n mock_waveforms = [\n MockWaveform(\"spam\", {\"q\": 1.0,\n \"tag\": \"test\",\n \"mass_ratio\": 1.0,\n \"spin_1x\": 0, \"spin_1y\": 0, \"spin_1z\": 0,\n \"spin_2x\": 0, \"spin_2y\": 0, \"spin_2z\": 0,\n \"s1x\": 0, \"s1y\": 0, \"s1z\": 0,\n \"s2x\": 0, \"s2y\": 0, \"s2z\": 0\n }),\n MockWaveform(\"eggs\", {\"q\": 0.8,\n \"tag\": \"test2\",\n \"mass_ratio\": 1.0,\n \"spin_1x\": 0, \"spin_1y\": 0, \"spin_1z\": 0,\n \"spin_2x\": 0, \"spin_2y\": 0, \"spin_2z\": 0,\n \"s1x\": 0, \"s1y\": 0, \"s1z\": 0,\n \"s2x\": 0, \"s2y\": 0, \"s2z\": 0\n\n })\n ]\n \n self.samples_catalogue.waveforms = mock_waveforms\n\n def test_nrcat_match(self):\n \"\"\"Test the NR catalogue matcher.\"\"\"\n matches = testing.nrcat_match(self.model, self.samples_catalogue)\n self.assertEqual(len(matches.values()), 2)\n"
]
| [
[
"numpy.linspace",
"numpy.random.randn"
]
]
|
NVIDIA/transformer-ls | [
"9badc4b6a611abb73f4d1a4e9708fc56ba7c5924"
]
| [
"autoregressive/model_lib/layer.py"
]
| [
"# Copyright (c) 2021 NVIDIA CORPORATION. Licensed under the MIT license.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\n\nfrom fairseq.modules.layer_norm import LayerNorm\n\nimport pdb\n\n\nclass ChunkedLSAttention(nn.Module):\n def __init__(self, d_model, n_head, chunk_size, chunk_rank, window_len, dropout,\n grad_chk=False, use_bias=False, dp_attn=0,\n probing=False):\n nn.Module.__init__(self)\n\n self.dropout = nn.Dropout(dropout)\n self.dp_attn = nn.Dropout(dp_attn)\n\n assert d_model % n_head == 0\n assert chunk_size > 0\n self.n_head = n_head\n self.head_dim = d_model // n_head\n self.window_len = window_len\n\n self.chunk_rank = chunk_rank\n self.chunk_size = chunk_size\n self.n_head = n_head\n self.d_h = d_model // n_head\n self.d_model = d_model\n\n self.dconv_1 = nn.Linear(d_model, n_head * chunk_rank)\n\n self.r_net = nn.Linear(d_model, d_model, bias=False)\n self.r_net_chunk = nn.Linear(d_model, d_model)\n self.d_head = d_model // self.n_head\n # Positional bias as in Transformer-XL.\n self.r_r_bias = nn.Parameter(torch.FloatTensor(1, self.n_head, 1, self.d_head))\n self.r_w_bias = nn.Parameter(torch.FloatTensor(1, self.n_head, 1, 1, self.d_head))\n\n self.grad_chk = grad_chk\n\n self.proj_query = nn.Linear(d_model, d_model, bias=use_bias)\n nn.init.xavier_normal_(self.proj_query.weight)\n self.proj_out = nn.Linear(d_model, d_model, bias=use_bias)\n nn.init.xavier_normal_(self.proj_out.weight)\n self.proj_val = nn.Linear(d_model, d_model, bias=use_bias)\n nn.init.xavier_normal_(self.proj_val.weight)\n self.proj_key = nn.Linear(d_model, d_model, bias=use_bias)\n nn.init.xavier_normal_(self.proj_key.weight)\n\n self.dual_ln_dproj = LayerNorm(d_model, export=probing)\n self.dual_ln_win = LayerNorm(d_model, export=probing)\n\n nn.init.zeros_(self.r_r_bias)\n nn.init.zeros_(self.r_w_bias)\n if use_bias:\n nn.init.zeros_(self.proj_query.bias)\n nn.init.zeros_(self.proj_out.bias)\n nn.init.zeros_(self.proj_val.bias)\n nn.init.zeros_(self.proj_key.bias)\n\n def head_reshape(self, x):\n K = self.n_head\n D = self.head_dim\n x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D\n x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D\n return x\n\n def compute_scores(self, h_vecs):\n # h_vecs: B x L x H\n bsz = h_vecs.shape[0]\n n_chunks = h_vecs.shape[1] // self.chunk_size\n h_scores = self.dconv_1(h_vecs).view(bsz, n_chunks, self.chunk_size, self.n_head, self.chunk_rank)\n # bsz x num_heads x n_chunks x chunk_rank x chunk_size\n h_scores = h_scores.permute(0, 3, 1, 4, 2)\n h_scores = F.softmax(h_scores.float(), dim=-1).type_as(h_scores)\n return h_scores\n\n def compress_chunks(self, h_vecs, h_scores):\n # Reshape hvecs to be compatible with the weights\n # h_vecs: B x L x H\n bsz = h_vecs.shape[0]\n n_chunks = h_vecs.shape[1] // self.chunk_size\n # bsz x n_heads x n_chunks x chunk_size x d_h\n h_vecs = h_vecs.view(-1, n_chunks, self.chunk_size, self.n_head, self.d_h).permute(0, 3, 1, 2, 4)\n # bsz x n_heads x n_chunks x chunk_rank x d_h\n h_vecs = h_scores.matmul(h_vecs).view(bsz, self.n_head, n_chunks * self.chunk_rank, self.d_h)\n return h_vecs\n\n def get_tiles(self, x, n_queries, transpose=False):\n # input: bsz x win_bp_len x d\n bsz, win_bp_len, d = x.shape\n in_strides = x.stride()\n out_strides = (in_strides[0], self.window_len*in_strides[1], in_strides[1], d//self.n_head, 1)\n out_shape = (bsz, n_queries//self.window_len, 2*self.window_len, self.n_head, d//self.n_head)\n x = x.as_strided(size=out_shape, stride=out_strides)\n if transpose:\n # shape: bsz x n_heads x n_queries//wlen x d//n_heads x 2*wlen\n return x.permute(0, 3, 1, 4, 2)\n else:\n # shape: bsz x n_heads x n_queries//wlen x 2*wlen x d//n_heads\n return x.permute(0, 3, 1, 2, 4)\n\n def put_tiles(self, x):\n # input: bsz x n_heads x bp_len x self.window_len\n bsz, n_heads, bp_len, window_len = x.shape\n if bp_len > window_len:\n x = x.view(bsz, n_heads, bp_len//window_len, window_len, window_len)\n out_size = (bsz, n_heads, bp_len//window_len, window_len, 2*window_len)\n x = F.pad(x, (1, window_len))\n else:\n x = x.view(bsz, n_heads, 1, bp_len, window_len)\n out_size = (bsz, n_heads, 1, bp_len, window_len + bp_len)\n x = F.pad(x, (1, bp_len))\n\n stride = x.stride()\n out_stride = (stride[0], stride[1], stride[2], stride[3]-1, stride[4])\n return x.as_strided(size=out_size, stride=out_stride)\n\n def compute_pv(self, attn, val):\n # attn: bsz x n_head x seqlen//wlen x wlen x 2*wlen\n # val: bsz x n_head x seqlen//wlen x 2*wlen x d_h\n bsz, n_head, chunks, wlen, _ = attn.shape\n out = attn.matmul(val)\n return out.view(bsz, n_head, int(chunks*wlen), -1)\n\n def get_diagonals(self, attn):\n # attn: bsz x n_heads x bp_len//self.window_len x self.window_len x 2*self.window_len\n # takes the upper diagonal with length self.window_len from attn, ignoring the diagonal\n bsz, n_heads, n_tiles, n_query, _ = attn.shape\n out_size = (bsz, n_heads, n_tiles, n_query, self.window_len)\n in_stride = attn.stride()\n out_stride = (in_stride[0], in_stride[1], in_stride[2], in_stride[3]+1, 1)\n return attn.as_strided(size=out_size, stride=out_stride, storage_offset=1).contiguous().view(\n bsz, n_heads, -1, self.window_len)\n\n def _rel_shift_chunked(self, x, chunk_size, chunk_rank):\n # x: bsz x n_head x n_query x (n_chunks * chunk_rank)\n # out: same size but shifted to the left, relative position encoding\n bsz, n_head, n_query, n_c_vecs = x.shape\n n_q_chunks = n_query // chunk_size\n x = x.view(bsz, n_head, n_q_chunks, chunk_size, n_c_vecs).transpose(2, 3).contiguous()\n x = F.pad(x, [0, chunk_rank])\n p_stride = x.stride()\n out_shape = list(x.shape)\n out_shape[-1] -= chunk_rank\n out_strides = (p_stride[0], p_stride[1], p_stride[2], p_stride[3]-chunk_rank, p_stride[4])\n\n x = x.as_strided(size=out_shape, stride=out_strides, storage_offset=n_q_chunks*chunk_rank)\n return x.transpose(2, 3).contiguous().view(bsz, n_head, n_query, n_c_vecs)\n\n def attn(self, query, key_window, val_window, key_compressed, value_compressed,\n pos_embed_chunks, pos_embed_window, chunk_attn_mask=None):\n # query size = bsz x n_heads x M x H\n # key, value sizes = bsz x (seq_len + cache_len) x (n_heads * H)\n # key_compressed: bsz x n_heads x (M+L)//chunk_size*chunk_rank x H\n bsz, n_heads, seq_len, d_h = query.shape\n assert (self.window_len > 0 or self.chunk_size > 1)\n\n query = query / math.sqrt(self.d_model // self.n_head)\n\n # get the keys, values for the local window attention\n if seq_len > self.window_len:\n query_tile = query.view(bsz, n_heads, seq_len // self.window_len, self.window_len, d_h)\n key_window = self.get_tiles(key_window, seq_len, transpose=True)\n val_window = self.get_tiles(val_window, seq_len,\n transpose=False) # bsz x n_heads x n_queries//wlen x 2*wlen x d//n_heads\n else:\n query_tile = query.view(bsz, n_heads, 1, seq_len, d_h)\n key_window = key_window.view(bsz, -1, self.n_head, d_h).permute(0, 2, 3, 1)[:, :, None, :, :]\n val_window = val_window.view(bsz, -1, self.n_head, d_h).permute(0, 2, 1, 3)[:, :, None, :, :]\n # bsz x n_heads x bp_len//self.window_len x self.window_len x 2*self.window_len\n attn_window = (query_tile+self.r_w_bias).matmul(key_window)\n attn_window = self.get_diagonals(attn_window)\n\n pos_trans = self.r_net(pos_embed_window).view(1, self.window_len, self.n_head, self.d_head).permute(0, 2, 3, 1)\n attn_window_pos = (query+self.r_r_bias).matmul(pos_trans)\n attn_window = attn_window + attn_window_pos\n\n # Compute the long-range attention.\n n_chunks = key_compressed.shape[2]\n # compute attention from context\n # bsz x n_heads x seq_len x (n_chunks*chunk_rank)\n attn_cont = torch.matmul(query, key_compressed.transpose(-1, -2))\n pos_chunks = self.r_net_chunk(pos_embed_chunks).view(1, n_chunks, self.n_head, self.d_head).permute(0, 2, 3, 1)\n\n attn_pos = torch.matmul(query, pos_chunks) # B x H x M x L_pos\n attn_pos = self._rel_shift_chunked(attn_pos, self.chunk_size, self.chunk_rank)\n\n attn_compress = attn_cont + attn_pos\n if chunk_attn_mask is not None:\n attn_compress = attn_compress.view(\n bsz, n_heads, seq_len//self.chunk_size, self.chunk_size, -1)\n attn_compress = attn_compress.masked_fill(chunk_attn_mask, float('-inf'))\n attn_compress = attn_compress.view(bsz, n_heads, seq_len, -1)\n\n # Get the softmax score of both short-term and long-range attentions.\n full_attn = torch.cat([attn_compress, attn_window], dim=3)\n full_attn = F.softmax(full_attn.float(), dim=-1).type_as(full_attn)\n full_attn = self.dp_attn(full_attn)\n\n attn_compress = full_attn[:, :, :, :attn_compress.shape[3]]\n attn_window = full_attn[:, :, :, attn_compress.shape[3]:]\n\n attn_window = self.put_tiles(attn_window)\n out = torch.matmul(attn_compress, value_compressed) \\\n + self.compute_pv(attn_window, val_window)\n\n return out\n\n def forward(self, h, h_cache, key_pe, pos_embed_window, chunked_attn_mask=None):\n if self.grad_chk:\n out = cp.checkpoint(self.forward_, *[\n h, h_cache, key_pe, pos_embed_window, chunked_attn_mask\n ])\n else:\n out = self.forward_(h, h_cache, key_pe, pos_embed_window, chunked_attn_mask)\n return out\n\n def forward_(self, h, h_cache, key_pe, pos_embed_window, chunked_attn_mask=None):\n # h = bsz x seq_len x H\n # h_cache = bsz x cache_len x H\n bsz = h.size(0)\n seqlen = h.size(1)\n\n query = self.proj_query(h)\n query = self.head_reshape(query)\n\n # sequence length and cache length should be divisible by the chunk size\n assert seqlen % self.chunk_size == 0 and h_cache.shape[1] % self.chunk_size == 0\n\n cache_scores = self.compute_scores(h_cache)\n h_cache_compressed = self.compress_chunks(h_cache, cache_scores)\n\n # The projection for the cache can be compressed using dynamic projection\n h_cache_merge = h_cache_compressed.view(\n bsz, self.n_head, -1, self.d_h).transpose(1, 2).contiguous().view(\n bsz, -1, self.d_model)\n # Apply projections to the compressed sequence.\n val_cache = self.proj_val(h_cache_merge)\n key_cache = self.proj_key(h_cache_merge)\n # DualLN (dproj)\n key_cache = self.dual_ln_dproj(key_cache)\n val_cache = self.dual_ln_dproj(val_cache)\n val_cache = self.head_reshape(val_cache)\n key_cache = self.head_reshape(key_cache)\n\n # Apply window attention\n val_window_bp = self.proj_val(h)\n key_window_bp = self.proj_key(h)\n\n # better using multipliers of 8\n h_cache_win = h_cache[:, -self.window_len:]\n key_cache_win = self.proj_key(h_cache_win)\n val_cache_win = self.proj_val(h_cache_win)\n key_window = torch.cat([key_cache_win, key_window_bp], dim=1)\n val_window = torch.cat([val_cache_win, val_window_bp], dim=1)\n\n # DualLN (window)\n key_window = self.dual_ln_win(key_window)\n val_window = self.dual_ln_win(val_window)\n\n bp_scores = self.compute_scores(h)\n # Compress the projeced keys and values.\n val_bp_compressed = self.compress_chunks(val_window_bp, bp_scores)\n key_bp_compressed = self.compress_chunks(key_window_bp, bp_scores)\n\n # DualLN (dproj)\n val_bp_compressed = self.dual_ln_dproj(\n val_bp_compressed.transpose(1, 2).contiguous().view(bsz, -1, self.d_model))\n key_bp_compressed = self.dual_ln_dproj(\n key_bp_compressed.transpose(1, 2).contiguous().view(bsz, -1, self.d_model))\n val_bp_compressed = self.head_reshape(val_bp_compressed)\n key_bp_compressed = self.head_reshape(key_bp_compressed)\n\n val_compressed = torch.cat([val_cache, val_bp_compressed], dim=2)\n key_compressed = torch.cat([key_cache, key_bp_compressed], dim=2)\n\n out = self.attn(query, key_window, val_window, key_compressed, val_compressed, key_pe, pos_embed_window, chunked_attn_mask) # B_K x M x D\n\n out = out.transpose(1, 2).contiguous() # B x M x K x D\n out = out.view(bsz, seqlen, -1) # B x M x K_D\n out = self.proj_out(out)\n out = self.dropout(out)\n return out\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.FloatTensor",
"torch.nn.Module.__init__",
"torch.utils.checkpoint.checkpoint",
"torch.nn.functional.pad",
"torch.nn.init.zeros_",
"torch.nn.init.xavier_normal_",
"torch.matmul"
]
]
|
GavinatorK/pssummitwkshp | [
"5244364e74e11e535b023691e3f38696b3b342c3"
]
| [
"tensorflow/tfModelCode.py"
]
| [
"print(\"starting imports\")\nimport tensorflow as tf\nimport json\nimport numpy as np\nimport os\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\n# new additions\nfrom smdebug.tensorflow import KerasHook\nimport time\n\nprint(\"done with imports\")\nprint(tf.__version__)\n\nINPUT_TENSOR_NAME = \"input_16_input\" # Watch out, it needs to match the name of the first layer + \"_input\"\nHEIGHT = 250\nWIDTH = 250\nDEPTH = 3\nIM_SIZE = (250, 250)\nNUM_CLASSES = 3\nBATCH_SIZE = 8\nCLASSES = [\"Priority\", \"Roundabout\", \"Signal\"]\n\n# new additions star\ndef between_steps_bottleneck():\n time.sleep(1)\n\n\nclass CustomCallback(tf.keras.callbacks.Callback):\n def on_train_batch_begin(self, batch, logs=None):\n if 10 <= batch < 20:\n between_steps_bottleneck()\n \n#END\n \ndef keras_model_fn(train_batches, val_batches, enable_bottleneck):\n \n data_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n ])\n \n preprocess_input = tf.keras.applications.efficientnet.preprocess_input \n \n IMG_SIZE=IM_SIZE + (3,)\n \n base_model = tf.keras.applications.efficientnet.EfficientNetB7(\n include_top=False,\n weights=\"imagenet\",\n input_shape=IMG_SIZE,classes=3\n )\n \n image_batch, label_batch = next(iter(train_dataset))\n feature_batch = base_model(image_batch)\n\n global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\n feature_batch_average = global_average_layer(feature_batch)\n \n prediction_layer = tf.keras.layers.Dense(len(CLASSES), activation='softmax',name='softmax')\n prediction_batch = prediction_layer(feature_batch_average)\n inputs = tf.keras.Input(shape=(250, 250, 3))\n x = data_augmentation(inputs)\n x = preprocess_input(x)\n x = base_model(x, training=False)\n x = global_average_layer(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = prediction_layer(x)\n model = tf.keras.Model(inputs, outputs)\n base_learning_rate = 0.0002\n model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),\n optimizer = tf.keras.optimizers.SGD(),\n metrics=['accuracy'])\n\n # Estimate class weights for unbalanced dataset\n # class_weights = class_weight.compute_class_weight(\n # 'balanced',\n # np.unique(train_batches.classes),\n # train_batches.classes)\n\n ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=5, min_lr=3e-4)\n\n if enable_bottleneck:\n callbacks = [CustomCallback()] if enable_bottleneck else []\n callbacks.append(ReduceLR)\n\n model.fit(train_batches,\n validation_data=val_batches,\n epochs=1,\n callbacks=callbacks)\n return model\n\n\ndef train_input_fn(training_dir, hyperparameters):\n return _input(tf.estimator.ModeKeys.TRAIN, batch_size=BATCH_SIZE, data_dir=training_dir)\n\n\ndef eval_input_fn(training_dir, hyperparameters):\n return _input(tf.estimator.ModeKeys.EVAL, batch_size=BATCH_SIZE, data_dir=training_dir)\n\n\nimport os\n# from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\n\n\n\ndef _input(mode, batch_size, data_dir):\n assert os.path.exists(data_dir), (\"Unable to find images resources for input, are you sure you downloaded them ?\")\n\n\n train_dataset = image_dataset_from_directory(data_dir + '/train',\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IM_SIZE,label_mode='categorical')\n \n images, labels = next(iter(train_dataset))\n\n return {INPUT_TENSOR_NAME: images}, labels\n\n\ndef serving_input_fn(hyperparameters):\n # Here it concerns the inference case where we just need a placeholder to store\n # the incoming images ...\n tensor = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH, DEPTH])\n inputs = {INPUT_TENSOR_NAME: tensor}\n return tf.estimator.export.ServingInputReceiver(inputs, inputs)\n\n\ndef _parse_args():\n import argparse\n\n parser = argparse.ArgumentParser()\n\n # Data, model, and output directories\n # model_dir is always passed in from SageMaker. By default this is a S3 path under the default bucket.\n parser.add_argument('--model_dir', type=str)\n parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))\n parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))\n\n return parser.parse_known_args()\n\n\nif __name__ == \"__main__\":\n print(\"starting in main\")\n args, unknown = _parse_args()\n\n data_dir = args.train\n \n train_dataset=image_dataset_from_directory(data_dir + '/train',\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IM_SIZE,label_mode='categorical')\n\n\n valid_dataset = image_dataset_from_directory(data_dir + '/test',\n image_size=IM_SIZE,\n label_mode='categorical', shuffle=False,\n batch_size=BATCH_SIZE)\n\n # Create the Estimator\n print(\"calling model fit\")\n #added True to enable_bottleneck\n junction_classifier = keras_model_fn(train_dataset, valid_dataset, True)\n print(\"about to save\")\n\n if args.current_host == args.hosts[0]:\n \n # save model to an S3 directory with version number '00000001'\n # sound_classifier.save(os.path.join(args.sm_model_dir, '000000001'), 'sound_model.h5')\n tf.keras.models.save_model(junction_classifier, os.path.join(args.sm_model_dir, 'tf000000001/1'))\n"
]
| [
[
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.applications.efficientnet.EfficientNetB7",
"tensorflow.keras.layers.experimental.preprocessing.RandomFlip",
"tensorflow.keras.preprocessing.image_dataset_from_directory",
"tensorflow.keras.layers.experimental.preprocessing.RandomRotation",
"tensorflow.keras.layers.Dropout",
"tensorflow.estimator.export.ServingInputReceiver",
"tensorflow.keras.Model",
"tensorflow.placeholder",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.Input",
"tensorflow.keras.callbacks.ReduceLROnPlateau"
]
]
|
prateekstark/openai-gym-games | [
"7b25938cf7bb59f1e9197e8a040893a7011687ff"
]
| [
"Bipedal Walker/bipedal_walker_gradient_monte_carlo.py"
]
| [
"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# Gradient Monte Carlo Algorithm Implementation\n\ndef epsilon_greedy_action(Q, state, e):\n\tif(np.random.uniform(0, 1) < e):\n\t\taction = env.action_space.sample()\n\telse:\n\t\taction = np.argmax(Q[state, :])\n\treturn action\n\ndef get_epsilon(episode, num_episodes):\n\treturn math.exp(-2.4*episode/num_episodes)\n\n# Algorithm parameters: step size alpha from (0, 1], small epsilon > 0\nalpha = 0.5\nepsilon = 1\nnum_episodes =50000\ngamma = 0.95\n\nenv = gym.make('BipedalWalker-v3')\n\n# Initialize Q(s, a), for all s belongs to S+, a belongs to A(s), arbitarily except that Q(terminal, .) = 0\n# Q = np.zeros((env.observation_space.n, env.action_space.n))\nw = np.zeros((24, 4))\nprint((env.action_space))\n# score_list = []\n# timesteps = []\n# # Loop for each episode\nfor episode in range(num_episodes):\n# \t# Initialize S\n\tstate = env.reset()\n\n# \t# Loop for each step of episode\n\twhile(True):\n\t\tenv.render()\n\t\t# print(env.observation_space.high.shape)\n\t\tstate = state.reshape(1, 24)\n\t\t# print(state)\n# \t\t# Decaying epsilon\n# \t\tepsilon = get_epsilon(episode, num_episodes)\n# \t\t# print(epsilon)\n# \t\t# Choose A from S using policy derived from Q (epsilon greedy)\n# \t\taction = epsilon_greedy_action(Q, state, epsilon)\n\t\taction = env.action_space.sample()\n# \t\t# Take action A, observe R and S`\n\t\tobservation, reward, done, info = env.step(action)\n\n# \t\t# Q(S, A) = Q(S, A) + alpha[R + gamma*maxa(Q(S`, A)) - Q(S, A)]\n# \t\tQ[state, action] = Q[state, action] + alpha*(reward + (gamma*max(Q[observation, :])) - Q[state, action])\n# \t\t# S <- S`\n# \t\tstate = observation\n\t\t\n# \t\t# Terminal State\n\t\tif(done):\n# \t\t\tprint(\"Episode finished after {} timesteps\".format(t+1))\n# \t\t\tscore_list.append(reward)\n# \t\t\ttimesteps.append(t)\n\t\t\tprint(\"Reward: \" + str(reward))\n# \t\t\t# print(Q)\n\t\t\tbreak\n\n# performance_list = []\n# for i in range(num_episodes):\n# \tif(i%100 == 0):\n# \t\tif(i != 0):\n# \t\t\tperformance_list.append(sum(temp_score)/100.0)\n# \t\ttemp_score = [score_list[i]]\n# \telse:\n# \t\ttemp_score.append(score_list[i])\n\n\n# print(\"Printing Q-table: \")\n# print(Q)\n# plt.plot(performance_list)\n# # plt.plot(score_list)\n# # plt.plot(timesteps)\n# plt.ylabel('Performance')\n# plt.xlabel('Episodes')\n# plt.savefig('FrozenLake-v0_q_learning.png')\n# plt.show()\nenv.close()"
]
| [
[
"numpy.random.uniform",
"numpy.argmax",
"numpy.zeros"
]
]
|
TopGun666/FlowVO | [
"382bf31e9acc49dcb448713cb8e7e79eb4ae9e8b"
]
| [
"FlowNet2_src/flownet_sd/flownet_sd.py"
]
| [
"from ..net import Net, Mode\nfrom ..utils import LeakyReLU, average_endpoint_error, pad, antipad, crop_features\nfrom ..downsample import downsample\nimport math\nimport tensorflow as tf\nslim = tf.contrib.slim\n\n\n# (Yuliang) Replace antipad() with crop_features()\nclass FlowNetSD(Net):\n\n def __init__(self, mode=Mode.TRAIN, debug=False):\n super(FlowNetSD, self).__init__(mode=mode, debug=debug)\n\n def model(self, inputs, training_schedule, trainable=True):\n _, height, width, _ = inputs['input_a'].shape.as_list()\n with tf.variable_scope('FlowNetSD'):\n concat_inputs = tf.concat([inputs['input_a'], inputs['input_b']], axis=3)\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],\n # Only backprop this network if trainable\n trainable=trainable,\n # He (aka MSRA) weight initialization\n weights_initializer=slim.variance_scaling_initializer(),\n activation_fn=LeakyReLU,\n # We will do our own padding to match the original Caffe code\n padding='VALID'):\n\n weights_regularizer = slim.l2_regularizer(training_schedule['weight_decay'])\n with slim.arg_scope([slim.conv2d], weights_regularizer=weights_regularizer):\n conv0 = slim.conv2d(pad(concat_inputs), 64, 3, scope='conv0')\n conv1 = slim.conv2d(pad(conv0), 64, 3, stride=2, scope='conv1')\n conv1_1 = slim.conv2d(pad(conv1), 128, 3, scope='conv1_1')\n conv2 = slim.conv2d(pad(conv1_1), 128, 3, stride=2, scope='conv2')\n conv2_1 = slim.conv2d(pad(conv2), 128, 3, scope='conv2_1')\n conv3 = slim.conv2d(pad(conv2_1), 256, 3, stride=2, scope='conv3')\n conv3_1 = slim.conv2d(pad(conv3), 256, 3, scope='conv3_1')\n conv4 = slim.conv2d(pad(conv3_1), 512, 3, stride=2, scope='conv4')\n conv4_1 = slim.conv2d(pad(conv4), 512, 3, scope='conv4_1')\n conv5 = slim.conv2d(pad(conv4_1), 512, 3, stride=2, scope='conv5')\n conv5_1 = slim.conv2d(pad(conv5), 512, 3, scope='conv5_1')\n conv6 = slim.conv2d(pad(conv5_1), 1024, 3, stride=2, scope='conv6')\n conv6_1 = slim.conv2d(pad(conv6), 1024, 3, scope='conv6_1')\n\n \"\"\" START: Refinement Network \"\"\"\n with slim.arg_scope([slim.conv2d_transpose], biases_initializer=None):\n # H/64 x W/64\n predict_flow6 = slim.conv2d(pad(conv6_1), 2, 3, scope='predict_flow6', activation_fn=None)\n #deconv5 = antipad(slim.conv2d_transpose(conv6_1, 512, 4, stride=2, scope='deconv5'))\n #upsample_flow6to5 = antipad(slim.conv2d_transpose(predict_flow6, 2, 4, stride=2, scope='upsample_flow6to5', activation_fn=None))\n deconv5 = slim.conv2d_transpose(conv6_1, 512, 4, stride=2, scope='deconv5')\n upsample_flow6to5 = slim.conv2d_transpose(predict_flow6, 2, 4, stride=2, scope='upsample_flow6to5', activation_fn=None)\n deconv5 = crop_features(deconv5, tf.shape(conv5_1))\n upsample_flow6to5 = crop_features(upsample_flow6to5, tf.shape(conv5_1))\n concat5 = tf.concat([conv5_1, deconv5, upsample_flow6to5], axis=3)\n interconv5 = slim.conv2d(pad(concat5), 512, 3, activation_fn=None, scope='interconv5')\n\n # H/32 x W/32\n predict_flow5 = slim.conv2d(pad(interconv5), 2, 3, scope='predict_flow5', activation_fn=None)\n #deconv4 = antipad(slim.conv2d_transpose(concat5, 256, 4, stride=2, scope='deconv4'))\n #upsample_flow5to4 = antipad(slim.conv2d_transpose(predict_flow5, 2, 4, stride=2, scope='upsample_flow5to4', activation_fn=None))\n deconv4 = slim.conv2d_transpose(concat5, 256, 4, stride=2, scope='deconv4')\n upsample_flow5to4 = slim.conv2d_transpose(predict_flow5, 2, 4, stride=2, scope='upsample_flow5to4', activation_fn=None)\n deconv4 = crop_features(deconv4, tf.shape(conv4_1))\n upsample_flow5to4 = crop_features(upsample_flow5to4, tf.shape(conv4_1))\n concat4 = tf.concat([conv4_1, deconv4, upsample_flow5to4], axis=3)\n interconv4 = slim.conv2d(pad(concat4), 256, 3, activation_fn=None, scope='interconv4')\n\n # H/16 x W/16\n predict_flow4 = slim.conv2d(pad(interconv4), 2, 3, scope='predict_flow4', activation_fn=None)\n #deconv3 = antipad(slim.conv2d_transpose(concat4, 128, 4, stride=2, scope='deconv3'))\n #upsample_flow4to3 = antipad(slim.conv2d_transpose(predict_flow4, 2, 4, stride=2, scope='upsample_flow4to3', activation_fn=None))\n deconv3 = slim.conv2d_transpose(concat4, 128, 4, stride=2, scope='deconv3')\n upsample_flow4to3 = slim.conv2d_transpose(predict_flow4, 2, 4, stride=2, scope='upsample_flow4to3', activation_fn=None)\n deconv3 = deconv3 = crop_features(deconv3, tf.shape(conv3_1))\n upsample_flow4to3 = crop_features(upsample_flow4to3, tf.shape(conv3_1))\n concat3 = tf.concat([conv3_1, deconv3, upsample_flow4to3], axis=3)\n interconv3 = slim.conv2d(pad(concat3), 128, 3, activation_fn=None, scope='interconv3')\n\n # H/8 x W/8\n predict_flow3 = slim.conv2d(pad(interconv3), 2, 3, scope='predict_flow3', activation_fn=None)\n #deconv2 = antipad(slim.conv2d_transpose(concat3, 64, 4, stride=2, scope='deconv2'))\n #upsample_flow3to2 = antipad(slim.conv2d_transpose(predict_flow3, 2, 4, stride=2, scope='upsample_flow3to2', activation_fn=None))\n deconv2 = slim.conv2d_transpose(concat3, 64, 4, stride=2, scope='deconv2')\n upsample_flow3to2 = slim.conv2d_transpose(predict_flow3, 2, 4, stride=2, scope='upsample_flow3to2', activation_fn=None)\n deconv2 = crop_features(deconv2, tf.shape(conv2))\n upsample_flow3to2 = crop_features(upsample_flow3to2, tf.shape(conv2))\n concat2 = tf.concat([conv2, deconv2, upsample_flow3to2], axis=3)\n interconv2 = slim.conv2d(pad(concat2), 64, 3, activation_fn=None, scope='interconv2')\n\n predict_flow2 = slim.conv2d(pad(interconv2), 2, 3, scope='predict_flow2', activation_fn=None)\n \"\"\" END: Refinement Network \"\"\"\n\n flow = predict_flow2 * 0.05\n # TODO: Look at Accum (train) or Resample (deploy) to see if we need to do something different\n flow = tf.image.resize_bilinear(flow,\n tf.stack([height, width]),\n align_corners=True)\n\n return {\n 'predict_flow6': predict_flow6,\n 'predict_flow5': predict_flow5,\n 'predict_flow4': predict_flow4,\n 'predict_flow3': predict_flow3,\n 'predict_flow2': predict_flow2,\n 'flow': flow,\n }\n\n def loss(self, flow, predictions):\n flow = flow * 20.0\n\n losses = []\n INPUT_HEIGHT, INPUT_WIDTH = float(flow.shape[1].value), float(flow.shape[2].value)\n\n # L2 loss between predict_flow6, blob23 (weighted w/ 0.32)\n predict_flow6 = predictions['predict_flow6']\n size = [predict_flow6.shape[1], predict_flow6.shape[2]]\n downsampled_flow6 = downsample(flow, size)\n losses.append(average_endpoint_error(downsampled_flow6, predict_flow6))\n\n # L2 loss between predict_flow5, blob28 (weighted w/ 0.08)\n predict_flow5 = predictions['predict_flow5']\n size = [predict_flow5.shape[1], predict_flow5.shape[2]]\n downsampled_flow5 = downsample(flow, size)\n losses.append(average_endpoint_error(downsampled_flow5, predict_flow5))\n\n # L2 loss between predict_flow4, blob33 (weighted w/ 0.02)\n predict_flow4 = predictions['predict_flow4']\n size = [predict_flow4.shape[1], predict_flow4.shape[2]]\n downsampled_flow4 = downsample(flow, size)\n losses.append(average_endpoint_error(downsampled_flow4, predict_flow4))\n\n # L2 loss between predict_flow3, blob38 (weighted w/ 0.01)\n predict_flow3 = predictions['predict_flow3']\n size = [predict_flow3.shape[1], predict_flow3.shape[2]]\n downsampled_flow3 = downsample(flow, size)\n losses.append(average_endpoint_error(downsampled_flow3, predict_flow3))\n\n # L2 loss between predict_flow2, blob43 (weighted w/ 0.005)\n predict_flow2 = predictions['predict_flow2']\n size = [predict_flow2.shape[1], predict_flow2.shape[2]]\n downsampled_flow2 = downsample(flow, size)\n losses.append(average_endpoint_error(downsampled_flow2, predict_flow2))\n\n loss = tf.losses.compute_weighted_loss(losses, [0.32, 0.08, 0.02, 0.01, 0.005])\n\n # Return the 'total' loss: loss fns + regularization terms defined in the model\n return tf.losses.get_total_loss()\n"
]
| [
[
"tensorflow.losses.compute_weighted_loss",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.losses.get_total_loss",
"tensorflow.variable_scope",
"tensorflow.stack"
]
]
|
British-Oceanographic-Data-Centre/NEMO-ENTRUST | [
"41ed278e56428404ab8ec41d74a9a3a761e308ae"
]
| [
"unit_testing/test_eof_methods.py"
]
| [
"\"\"\"\n\n\"\"\"\n\n# IMPORT modules. Must have unittest, and probably coast.\nimport coast\nimport unittest\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport unit_test_files as files\nimport datetime\n\n\nclass test_eof_methods(unittest.TestCase):\n def test_compute_regular_eof(self):\n nemo_t = coast.Gridded(\n fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid\n )\n eofs = coast.compute_eofs(nemo_t.dataset.ssh)\n\n ssh_reconstruction = (eofs.EOF * eofs.temporal_proj).sum(dim=\"mode\").sum(dim=[\"x_dim\", \"y_dim\"])\n ssh_anom = (nemo_t.dataset.ssh - nemo_t.dataset.ssh.mean(dim=\"t_dim\")).sum(dim=[\"x_dim\", \"y_dim\"])\n\n # Check ssh anomaly is reconstructed at each time point\n check1 = np.allclose(ssh_reconstruction, ssh_anom, rtol=0.0001)\n var_cksum = eofs.variance.sum(dim=\"mode\").compute().item()\n check2 = np.isclose(var_cksum, 100)\n\n self.assertTrue(check1, \"check1\")\n self.assertTrue(check2, \"check2\")\n\n def test_compute_heofs(self):\n nemo_t = coast.Gridded(\n fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid\n )\n heofs = coast.compute_hilbert_eofs(nemo_t.dataset.ssh)\n\n ssh_reconstruction = (\n (heofs.EOF_amp * heofs.temporal_amp * np.exp(1j * np.radians(heofs.EOF_phase + heofs.temporal_phase)))\n .sum(dim=\"mode\")\n .real.sum(dim=[\"x_dim\", \"y_dim\"])\n )\n\n ssh_anom = (nemo_t.dataset.ssh - nemo_t.dataset.ssh.mean(dim=\"t_dim\")).sum(dim=[\"x_dim\", \"y_dim\"])\n\n # Check ssh anomaly is reconstructed at each time point\n check1 = np.allclose(ssh_reconstruction, ssh_anom, rtol=0.0001)\n var_cksum = heofs.variance.sum(dim=\"mode\").item()\n check2 = np.isclose(var_cksum, 100)\n\n self.assertTrue(check1, \"check1\")\n self.assertTrue(check2, \"check2\")\n"
]
| [
[
"numpy.allclose",
"numpy.isclose",
"numpy.radians"
]
]
|
danaraujocr/trendfitter | [
"b91f0c4fb0be8479d6a1a385b65196cf537fcad5"
]
| [
"trendfitter/models/MLSMB_PLS.py"
]
| [
"from numpy import array, isnan, nansum, nan_to_num, multiply, sum, sqrt, append, zeros, place, nan, concatenate, mean, nanvar, std, unique, where, nanmean, identity\nfrom numpy.linalg import norm, pinv\nfrom sklearn.model_selection import KFold\nfrom pandas import DataFrame, Series\nfrom trendfitter.auxiliary.tf_aux import scores_with_missing_values\n\nclass MLSMB_PLS:\n \"\"\"\n A sklearn-like class for the Sequential Multi-Block Projection to Latent Structures.\n\n Parameters\n ----------\n cv_splits_number : int, optional\n number of splits used for cross validation in case latent_variables is None\n tol : float, optional\n value used to decide if model has converged\n loop_limit : int, optional\n maximum number of loops for the extraction of one latent variable\n missing_values_method : str, optional\n string to define the method that the model will deal with missing values\n\n Attributes\n ----------\n latent_variables : [int]\n list of number of latent variables deemed relevant from each block. \n block_divs : [int]\n list with the index of every block final position. ex. [2,4] means two blocks with \n columns 0 and 1 on the first block and columns 2 and 3 on the second block. \n Assigned when fit method runs.\n p_loadings_block : [array_like]\n list of p loadings arrays of every block with all the extracted latent variables \n superlevel_p_loadings : array_like\n array of all latent variables extracted p loadings for the super level\n weights_block : [array_like]\n list of weights arrays of every block with all the extracted latent variables \n weights_super : array_like\n array of all latent variables extracted weights for the super level\n c_loadings : array_like\n array of c_loadings for all extracted latent variables\n q2 : [float]\n list of r² coefficients extracted during cross validation\n feature_importances : [float]\n list of values that represent how important is each variable in the same order \n of the X columns on the first matrix\n\n Methods\n -------\n fit(X, blocks_sep, Y)\n Applies the NIPALS like method to find the best parameters that adjust the model \n to the data\n transform(X)\n Transforms the original data to the latent variable space in the super level \n transform_inv(scores)\n Returns the superlevel scores to the original data space\n transform_b(X, block)\n Transforms the original data to the latent variable space in the block level for\n all blocks\n predict(X)\n Predicts Y values \n score(X, Y)\n calculates the r² value for Y\n Hotellings_T2(X)\n Calculates Hotellings_T2 values for the X data in the super level\n Hotellings_T2_blocks(X)\n Calculates Hotellings_T2 values for in the block level for all blocks\n SPEs_X(X)\n Calculates squared prediction errors on the X side for the super level\n SPEs_X_blocks(X)\n Calculates squared prediction errors for the block level for all blocks\n SPEs_Y(X, Y)\n Calculates squared prediction errors for the predictions\n contributions_scores(X)\n calculates the contributions of each variable to the scores on the super level\n contributions_scores_b(X)\n calculates the contributions of each variable to the scores on the super level\n contributions_SPE(X)\n calculates the contributions of each variable to the SPE on the X side for\n the super level\n contributions_SPE_b(X)\n calculates the contributions of each variable to the SPE on the X side for\n the block level for all blocks\n\n \"\"\"\n def __init__(self, cv_splits_number = 7, tol = 1e-16, loop_limit = 1000, missing_values_method = 'TSM'):\n \n # Parameters\n \n self.cv_splits_number = cv_splits_number # number of splits for latent variable cross-validation\n self.tol = tol # criteria for convergence\n self.loop_limit = loop_limit # maximum number of loops before convergence is decided to be not attainable\n self.q2y = [] # list of cross validation scores\n self.missing_values_method = missing_values_method\n \n # Attributes\n\n self.latent_variables = None # number of principal components to be extracted\n self.third_level_divs = None\n self.second_level_divs = None\n self.p_loadings_3 = None\n self.p_loadings_2 = None\n self.p_loadings_1 = None\n self.weights_3 = None\n self.weights_2 = None\n self.weights_1 = None\n self.x_weights_star = None\n self.x_weights = None\n self.c_loadings = None \n self.VIPs = None\n self.coefficients = None\n self.omega = None # score covariance matrix for missing value estimation\n self.training_scores_3 = None\n self.training_scores_2 = None\n self.training_scores_1 = None\n self.training_scores_y = None\n \n def fit(self, X, third_level_divs, second_level_divs, Y, latent_variables = None, deflation = 'both', int_call = False, random_state = None):\n \"\"\"\n Adjusts the model parameters to best fit the Y using the algorithm defined in \n \n\n Parameters\n ----------\n X : array_like\n Matrix with all the data to be used as predictors in one only object\n block_divs : [int]\n list with the index of every block final position. ex. [2,4] means two blocks with \n columns 0 and 1 on the first block and columns 2 and 3 on the second block.\n Y : array_like\n Matrix with all the data to be predicted in one only object\n latent_variables : [int], optional\n list of number of latent variables deemed relevant from each block. If left unspecified\n a cross validation routine will define the number during fitting\n deflation : str, optional\n string defining method of deflation, only Y or both X and Y \n int_call : Boolean, optional\n Flag to define if it is an internal call on the cross validation routine and decide\n if it is necessary to calculate the VIP values\n\n References \n ----------\n \n\n \"\"\"\n\n \n # check if X and Y make sense\n # check if block_divs make sense \n # check if latent_variables make sense\n\n\n self.third_level_divs = third_level_divs\n self.second_level_divs = second_level_divs\n third_level_pairs = (*zip([0] + third_level_divs[:-1], third_level_divs),)\n second_level_pairs = (*zip(second_level_divs[:-1], second_level_divs[1:]),)\n\n if isinstance(X, DataFrame):# If X data is a pandas Dataframe\n X = array(X.to_numpy(), ndmin = 2)\n \n if isinstance(Y, DataFrame) or isinstance(Y, Series): # If Y data is a pandas Dataframe or Series \n Y = array(Y.to_numpy(), ndmin = 2).T\n \n Orig_X = X.copy()\n Orig_Y = Y.copy()\n \n \"\"\"------------------- model Calculation -------------\"\"\"\n\n all_blocks = set(third_level_pairs).union(set(second_level_pairs))\n max_LVs = [min([pair[1] - pair[0] for pair in third_level_pairs])] + [pair[1] - pair[0] for pair in second_level_pairs]\n\n for block, LVs in enumerate(max_LVs):\n for LV in range(LVs):\n if LV >= latent_variables[block]: break\n\n if block == 0: \n result = self._MLSMBPLS_1LV(X,third_level_pairs, second_level_pairs, Y, 0)\n else:\n missing_values_list = [0 for _ in second_level_pairs]\n result = self._SMBPLS_1LV(X, second_level_pairs[block-1:], Y, missing_values_list, block)\n\n \n #------------------------------------deflation----------------------------\n\n if deflation == 'both' :\n X -= result['t1l'] @ result['p1l']\n Y -= result['t1l'] @ result['c']\n elif deflation == 'Y':\n Y -= result['t1l'] @ result['c']\n else : raise Exception(f'Deflation method \"{deflation}\" non-existing.')\n \n #--------------------------Property assignment section--------------------\n if LV == 0 and block == 0 :\n self.superlevel_weights = result['w1l']\n #self.x_weights = result['w']\n #self.block_weights = result['wb']\n #self.block_p_loadings = result['pb']\n self.superlevel_p_loadings = result['p1l']\n self.c_loadings = result['c']\n self.training_superlevel_scores = result['t1l']\n #self.training_block_scores = result['tb']\n self.training_y_scores = result['u']\n #self.x_weights_star2 = result['w']\n #for bl, (start, end) in enumerate(block_coord_pairs):\n #G[bl] = G[bl] - result['w'][:, start:end].T @ result['p'][:, start:end]\n\n \n else: \n self.superlevel_weights = append(self.superlevel_weights, result['w1l'], axis = 0)\n #self.x_weights = append(self.x_weights, result['w'], axis = 0)\n #self.block_weights = append(self.block_weights, result['wb'], axis = 0)\n #self.block_p_loadings = append(self.block_p_loadings, result['pb'], axis = 0)\n self.superlevel_p_loadings = append(self.superlevel_p_loadings, result['p1l'], axis = 0)\n self.c_loadings = append(self.c_loadings, result['c'], axis = 0)\n self.training_superlevel_scores = append(self.training_superlevel_scores, result['t1l'], axis = 1)\n #self.training_block_scores = append(self.training_block_scores, result['tb'], axis = 1)\n self.training_y_scores = append(self.training_y_scores, result['u'], axis = 1)\n\n #for bl, (start, end) in enumerate(block_coord_pairs[block:]):\n #if bl == 0: x_weights_star2 = array(G[block + bl] @ result['w'][:, start:end].T, ndmin = 2).T\n #else: x_weights_star2 = append(x_weights_star2, array(G[block + bl] @ result['w'][:, start:end].T, ndmin = 2 ).T, axis = 1)\n #if block > 0 and bl == 0: x_weights_star2 = append(zeros((1, start)), x_weights_star2, axis = 1)\n\n #G[block + bl] = G[block + bl] - result['w'][:, start:end].T @ result['p'][:, start:end]\n \n #self.x_weights_star2 = append(self.x_weights_star2, x_weights_star2, axis = 0)\n \n #self.block_weights_star = (self.block_weights @ pinv(self.block_p_loadings.T @ self.block_weights))\n #self.x_weights_star = (self.x_weights @ pinv(self.superlevel_p_loadings.T @ self.x_weights))\n\n return\n\n def transform(self, X, latent_variables = None): \n \n \"\"\"\n Transforms the X matrix to the model-fitted space returning scores\n of the super level.\n\n Parameters\n ----------\n X : array_like\n Matrix with all the data to be used as predictors in one only object\n latent_variables : [int], optional\n list with number of latent variables to be used. \n\n Returns\n -------\n result : array_like of shape (X.shape[0], sum(latent_variables))\n Scores for the X values transformed on the super level\n\n \"\"\"\n\n if isinstance(X, DataFrame): \n X_values = X.to_numpy()\n else:\n X_values = X \n\n if latent_variables == None : \n latent_variables = sum(self.latent_variables)\n else:\n latent_variables = sum(latent_variables)\n \n if isnan(sum(X_values)):\n \n result = zeros((X_values.shape[0], latent_variables))\n X_nan = isnan(X_values)\n variables_missing_mask = unique(X_nan, axis = 0)\n\n for row_mask in variables_missing_mask:\n \n rows_indexes = where((X_nan == row_mask).all(axis = 1)) \n \n if sum(row_mask) == 0: \n\n result[rows_indexes, :] = X[rows_indexes, :] @ self.x_weights_star[:latent_variables, :].T \n \n else:\n \n result[rows_indexes, :] = scores_with_missing_values(self.omega, self.x_weights_star[:, ~row_mask], X[rows_indexes[0][:, None], ~row_mask], \n LVs = latent_variables, method = self.missing_values_method)\n \n else : result = X_values @ self.x_weights_star[:latent_variables, :].T\n\n # TO DO : check if X makes sense with latent variables\n return result\n\n def transform_inv(self, scores, latent_variables = None):\n\n \"\"\"\n Transforms the superlevel scores matrix to the original X.\n\n Parameters\n ----------\n scores : array_like\n Matrix with all the scores to be used to rebuild X\n latent_variables : int, optional\n number of latent variables to be used. \n\n Returns\n -------\n result : array_like \n matrix of rebuilt X from scores\n \"\"\"\n \n if latent_variables == None : \n latent_variables = sum(self.latent_variables)\n else:\n latent_variables = sum(latent_variables)\n\n result = scores @ self.p[:latent_variables, :] \n \n return result\n \n def predict( self, X, latent_variables = None): \n\n \"\"\"\n Predicts Y values using X array.\n\n Parameters\n ----------\n X : array_like\n Samples Matrix\n latent_variables : [int], optional\n number of latent variables to be used. \n\n Returns\n -------\n preds : array_like \n returns predictions\n \"\"\"\n\n if isinstance( X, DataFrame ) : X = X.to_numpy() \n if latent_variables == None : \n latent_variables = sum(self.latent_variables)\n else:\n latent_variables = sum(latent_variables)\n\n preds = self.transform(X, latent_variables = latent_variables) @ self.c_loadings[ :latent_variables, :]\n \n return preds\n\n if latent_variables == None : latent_variables = self.latent_variables\n \n Y_hat = self.transform(X, latent_variables = latent_variables) @ self.c_loadings[:, :sum(latent_variables)].T\n\n return Y_hat\n\n def score(self, X, Y, latent_variables = None): \n\n \"\"\"\n Return the coefficient of determination R^2 of the prediction.\n\n R² is defined as 1 - Variance(Error) / Variance(Y) with Error = Y - predictions(X)\n\n Parameters\n ----------\n X : array_like\n Matrix with all the X to be used\n Y : array_like\n Matrix with all the Y ground truth values\n latent_variables : [int], optional\n number of latent variables to be used. \n\n Returns\n -------\n score : float \n returns calculated r².\n \"\"\"\n\n if latent_variables == None : latent_variables = self.latent_variables\n if isinstance(Y, DataFrame) or isinstance(Y, Series): \n Y_values = array(Y.to_numpy(), ndmin = 2).T\n else: \n Y_values = Y\n\n Y_hat = self.predict(X, latent_variables = latent_variables)\n F = Y_values - Y_hat\n score = 1 - nanvar(F) / nanvar(Y_values)\n\n return score\n \n def _MLSMBPLS_1LV(self, X, third_level_pairs, second_level_pairs, Y, missing_values):\n \n num_blocks3l = len(third_level_pairs)\n conv = 1\n loop = 0 \n p_3rd = zeros((1, third_level_pairs[-1][-1] - third_level_pairs[0][0]))\n wb_2nd = zeros((1, second_level_pairs[-1][-1] - second_level_pairs[0][0] + num_blocks3l))\n p_2nd = zeros((1, second_level_pairs[-1][-1] - second_level_pairs[0][0] + num_blocks3l))\n\n u = array(Y[:, 0], ndmin = 2).T\n\n while conv > self.tol and loop < self.loop_limit:\n \n wb_3rd, wb_2nd[0, 0:num_blocks3l], t3_final, superT = self._3rdlevel_part(X, third_level_pairs, u, 0)\n\n start = num_blocks3l\n T1l = superT.copy()\n for i, pair in enumerate(second_level_pairs): #consequence of the second level stuff\n wbcorr, T_scores = self._correlated_part(superT, X[:, pair[0]:pair[1]], u, 0)\n #correlated part\n end = start + pair[1] - pair[0]\n wb_2nd[0, start:end] = wbcorr\n start = end\n T1l = concatenate([T1l, T_scores], axis = 1)\n \n #superlevel stuff \n # calculating the superlevel weights and scores\n wt, tT = self._superlevel_part(T1l, u)\n\n # calculating the y side loadings and scores\n q, u_new = self._y_part(tT, Y, 0)\n\n conv = norm(u - u_new)\n u = u_new\n loop +=1\n\n \n for i, pair in enumerate(third_level_pairs): #third level\n p_3rd[0, pair[0]:pair[1]] = self._p_loadings(X[:, pair[0]:pair[1]], t3_final[:,i], 0)\n \n p_2nd[0, 0:num_blocks3l] = self._p_loadings(t3_final, T1l[:,0], 0)\n\n start = num_blocks3l\n for i, pair in enumerate(second_level_pairs):\n end = start + pair[1] - pair[0]\n p_2nd[0, start:end] = self._p_loadings(X[:, pair[0]:pair[1]], T1l[:,i+1], 0)\n start = end\n\n\n result_dict = {'w3l':wb_3rd, #weights 3rd level\n 'w2l':wb_2nd, #weights 2rd level\n 'w1l':wt.T, #weights 1rd level\n 'p3l':p_3rd, #p loadings 3rd level\n 'p2l':p_2nd, #p loadings 2rd level\n 'p1l':self._p_loadings(X, tT, 0), #p loadings 1rd level\n 't3l':t3_final, #scores 3rd level\n 't2l':superT, #scores 2nd level\n 't1l':tT, #scores 1st level\n 'u':u, #y training scores\n 'c':q.T} #c loadings\n\n return result_dict\n\n def _SMBPLS_1LV(self, X, block_coord_pairs, Y, missing_values, block):\n \n conv = 1\n loops = 0\n\n y_scores1 = nan_to_num(array(Y[ :,0 ], ndmin = 2).T) #handling possible NaNs that come from faulty datasets - Step 1\n superlevel_scores1 = nan_to_num(y_scores1) #initializing superlevel scores vector\n\n while (conv > self.tol and loops < self.loop_limit) :\n\n first_block_done = False\n for missing, (start, end) in zip(missing_values, block_coord_pairs) :\n\n # calculates the block weights and scores for the uncorrelated part \n if not first_block_done: \n block_weights, T_scores = self._uncorrelated_part(X[:, start:end], y_scores1, missing)\n first_block_done = True\n continue\n\n # calculating the block weights and scores for the blocks following the correlated parts of the following blocks\n corr_block_weights, corr_T_scores = self._correlated_part(array(T_scores[:, 0], ndmin = 2).T, X[:, start:end], y_scores1, missing)\n block_weights = append(block_weights, corr_block_weights, axis = 1)\n T_scores = append(T_scores, corr_T_scores, axis = 1)\n \n # calculating the superlevel weights and scores\n superlevel_weights, superlevel_scores = self._superlevel_part(T_scores, y_scores1)\n\n # calculating the y side loadings and scores\n c_loadings, y_scores = self._y_part(superlevel_scores, Y, missing_values[-1])\n\n conv = norm(superlevel_scores1 - superlevel_scores) / norm(superlevel_scores1)\n superlevel_scores1 = superlevel_scores\n y_scores1 = y_scores\n\n # Calculating the p_loadings that connect from the raw X values to the superlevel scores\n start, end = block_coord_pairs[0][0], block_coord_pairs[-1][-1]\n superlevel_p_loadings = self._p_loadings(X[:, start:end], superlevel_scores, True in missing_values[:-1]).T\n x_weights = self._p_loadings(X[:, start:end], y_scores, True in missing_values).T\n \n x_weights = x_weights / norm(x_weights)\n y_scores_test = X[:, start:end] @ x_weights\n\n for missing, scores, (start, end) in zip(missing_values, T_scores.T, block_coord_pairs) :\n \n if start == block_coord_pairs[0][0] : \n block_p_loadings = self._p_loadings(X[:, start:end], array(scores, ndmin = 2).T, missing)\n if start > 0:\n block_p_loadings = append(zeros((1, start)), block_p_loadings, axis = 1 )\n superlevel_p_loadings = append(zeros((start, 1)), superlevel_p_loadings, axis = 0 )\n x_weights = append(zeros((start, 1)), x_weights, axis = 0 )\n superlevel_weights = append(zeros((block, 1)), superlevel_weights, axis = 0 )\n block_weights = append(zeros((1, start)), block_weights, axis = 1 )\n\n else : \n block_p_loadings = append(block_p_loadings, self._p_loadings(X[:, start:end], array(scores, ndmin = 2).T, missing), axis = 1)\n\n result_dict = {'w2l':block_weights,\n 'w1l':x_weights.T,\n 'p2l':block_p_loadings,\n 't2l':T_scores,\n 'w1l':superlevel_weights.T,\n 'p1l':superlevel_p_loadings.T,\n 't1l':superlevel_scores,\n 'u':y_scores,\n 'c':c_loadings.T}\n\n return result_dict\n \n def _3rdlevel_part(self, X, third_level_pairs, y_scores, missing_value):\n \n wb_3rd = zeros((1, third_level_pairs[-1][-1] - third_level_pairs[0][0]))\n for i, pair in enumerate(third_level_pairs): #third level\n \n wb = array(X[:, pair[0]:pair[1]].T @ y_scores / (y_scores.T @ y_scores), ndmin = 2).T\n wb /= norm(wb)\n wb_3rd[0, pair[0]:pair[1]] = wb\n t = X[:, pair[0]:pair[1]] @ wb.T\n if i < 1: \n t3_final = t\n else:\n t3_final = concatenate([t3_final, t], axis = 1)\n\n #consequence of the third level\n wb12 = array(t3_final.T @ y_scores / (y_scores.T @ y_scores), ndmin = 2).T\n wb12 /= norm(wb12)\n superT = t3_final @ wb12.T\n\n\n return wb_3rd, wb12, t3_final, superT\n\n def _uncorrelated_part(self, X, y_scores, missing_value):\n\n if missing_value:\n block_weights = array(nansum((X * y_scores), axis = 0), ndmin = 2) \n block_weights = block_weights / nansum((array(~isnan(sum(X, axis = 1)), ndmin = 2).T * y_scores) ** 2, axis = 0)\n block_weights = block_weights / norm(block_weights)\n T_scores = array(nansum(X * block_weights, axis = 1) / nansum(((~isnan(X) * block_weights) ** 2), axis = 1), ndmin = 2).T\n \n else:\n block_weights = X.T @ y_scores / (y_scores.T @ y_scores) # calculating Xb block weights (as step 2.1 in L-G's 2018 paper)\n block_weights = (block_weights / norm(block_weights)).T\n T_scores = X @ block_weights.T\n \n return block_weights, T_scores\n \n def _correlated_part(self, scores, X, y_scores, missing_value):\n \n X_corr_coeffs = (scores / (scores.T @ scores)) @ scores.T\n \n if missing_value : \n X_corr = X_corr_coeffs @ nan_to_num(X) # Attention on this part\n place(X_corr, isnan(X), nan) # Keeping the NaN value as an NaN\n block_weights = array(nansum((X_corr * y_scores), axis = 0), ndmin = 2) \n block_weights = block_weights / nansum((array(~isnan(sum(X, axis = 1)), ndmin = 2).T * y_scores) ** 2, axis = 0)\n block_weights = block_weights / norm(block_weights) #step 2.6\n T_scores = array(nansum(X_corr * block_weights.T, axis = 1), ndmin = 2).T # step 2.7\n else : \n X_corr = X_corr_coeffs @ X # finishing step 2.4 for no missing data\n block_weights = X_corr.T @ y_scores / (y_scores.T @ y_scores) # step 2.5\n block_weights = (block_weights / norm(block_weights)).T #step 2.6\n T_scores = X_corr @ block_weights.T # step 2.7 \n \n return block_weights, T_scores\n \n def _superlevel_part(self, T_scores, y_scores):\n \n superlevel_weights = T_scores.T @ y_scores / (y_scores.T @ y_scores) #step 2.9\n superlevel_weights = superlevel_weights / norm(superlevel_weights) #step 2.10\n superlevel_scores = T_scores @ superlevel_weights / (superlevel_weights.T @ superlevel_weights) #step 2.11\n\n return superlevel_weights, superlevel_scores\n\n def _y_part(self, superlevel_scores, Y, missing_value_Y):\n\n if missing_value_Y :\n c_loadings = nansum((Y.T * superlevel_scores).T, axis = 0) \n c_loadings = c_loadings / nansum(((isnan(Y).T * superlevel_scores) ** 2).T, axis = 0)\n else : c_loadings = Y.T @ superlevel_scores / (superlevel_scores.T @ superlevel_scores) # step 2.12\n \n y_scores = Y @ c_loadings / (c_loadings.T @ c_loadings) # step 2.13\n\n return c_loadings, y_scores\n\n def _p_loadings(self, X, scores, missing_value):\n\n if missing_value:\n p_loadings = nansum(X * scores, axis = 0) \n p_loadings = array(p_loadings / nansum(scores ** 2), ndmin = 2) #Step 3.1\n \n else: p_loadings = array(X.T @ scores / (scores.T @ scores), ndmin = 2) #Step 3.1\n if p_loadings.shape[0]> p_loadings.shape[1] : p_loadings = p_loadings.T\n return p_loadings\n\n def _cross_validate(self, X_orig, block_divs, Y_orig, LVs, random_state):\n\n \n cv_folds = KFold(n_splits = self.cv_splits_number, shuffle = True, random_state = random_state)\n q2 = []\n\n for train_index, test_index in cv_folds.split(X_orig):\n cv_model = SMB_PLS(tol = self.tol)\n cv_model.fit(X_orig[train_index], block_divs, Y_orig[train_index], latent_variables = LVs, int_call = True)\n q2.append(cv_model.score(X_orig[test_index], Y_orig[test_index]))\n\n q2 = mean(q2)\n\n return q2\n\n def _VIPs_calc( self, X, Y ): # code for calculation of VIPs\n\n \"\"\"\n Calculates the VIP scores for all the variables for the prediction\n \"\"\"\n \n SSY = sum((Y - nanmean(Y, axis = 0)) ** 2)\n for i in range(1, self.x_weights_star.shape[1] + 1):\n pred = self.predict(X, latent_variables = i)\n res = Y - pred\n SSY.loc[i] = sum(((res - res.mean(axis = 0)) ** 2))\n \n SSYdiff = SSY.iloc[:-1] - SSY.iloc[1:]\n VIPs = (((self.x_weights ** 2) @ SSYdiff.values) * self.weights.shape[1] / (SSY[0] - SSY[-1]) ** 1 / 2)\n \n return VIPs"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.nan_to_num",
"numpy.isnan",
"numpy.zeros",
"numpy.sum",
"numpy.nansum",
"numpy.mean",
"numpy.nanvar",
"numpy.nanmean",
"numpy.append",
"sklearn.model_selection.KFold",
"numpy.unique"
]
]
|
atiroms/chainerrl | [
"02c15c55e7994e69468270fcddf0c08837806d64"
]
| [
"chainerrl/policies/gaussian_policy.py"
]
| [
"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import * # NOQA\nfrom future import standard_library\nstandard_library.install_aliases()\n\nfrom abc import abstractmethod\nfrom logging import getLogger\nlogger = getLogger(__name__)\n\nimport chainer\nfrom chainer import functions as F\nfrom chainer import links as L\nimport numpy as np\n\nfrom chainerrl import distribution\nfrom chainerrl.functions.bound_by_tanh import bound_by_tanh\nfrom chainerrl.initializers import LeCunNormal\nfrom chainerrl import links\nfrom chainerrl.policy import Policy\n\n\nclass GaussianPolicy(Policy):\n \"\"\"Abstract Gaussian policy.\"\"\"\n\n @abstractmethod\n def compute_mean_and_var(self, x):\n \"\"\"Compute mean and variance.\n\n Returns:\n tuple of two ~chainer.Variable: mean and variance\n \"\"\"\n raise NotImplementedError()\n\n def __call__(self, x):\n mean, var = self.compute_mean_and_var(x)\n return distribution.GaussianDistribution(mean=mean, var=var)\n\n\nclass FCGaussianPolicy(chainer.ChainList, GaussianPolicy):\n \"\"\"Gaussian policy that consists of fully-connected layers.\n\n This model has two output layers: the mean layer and the variance layer.\n The mean of the Gaussian is computed as follows:\n Let y as the output of the mean layer.\n If bound_mean=False:\n mean = y (if bound_mean=False)\n If bound_mean=True:\n mean = min_action + tanh(y) * (max_action - min_action) / 2\n The variance of the Gaussian is computed as follows:\n Let y as the output of the variance layer.\n variance = softplus(y) + min_var\n\n Args:\n n_input_channels (int): Number of input channels.\n action_size (int): Number of dimensions of the action space.\n n_hidden_layers (int): Number of hidden layers.\n n_hidden_channels (int): Number of hidden channels.\n min_action (ndarray): Minimum action. Used only when bound_mean=True.\n max_action (ndarray): Maximum action. Used only when bound_mean=True.\n var_type (str): Type of parameterization of variance. It must be\n 'spherical' or 'diagonal'.\n nonlinearity (callable): Nonlinearity placed between layers.\n mean_wscale (float): Scale of weight initialization of the mean layer.\n var_wscale (float): Scale of weight initialization of the variance\n layer.\n var_bias (float): The initial value of the bias parameter for the\n variance layer.\n min_var (float): Minimum value of the variance.\n \"\"\"\n\n def __init__(self, n_input_channels, action_size,\n n_hidden_layers=0, n_hidden_channels=None,\n min_action=None, max_action=None, bound_mean=False,\n var_type='spherical', nonlinearity=F.relu,\n mean_wscale=1, var_wscale=1, var_bias=0,\n min_var=0):\n\n self.n_input_channels = n_input_channels\n self.action_size = action_size\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_channels = n_hidden_channels\n self.min_action = min_action\n self.max_action = max_action\n self.bound_mean = bound_mean\n self.nonlinearity = nonlinearity\n self.min_var = min_var\n var_size = {'spherical': 1, 'diagonal': action_size}[var_type]\n\n self.hidden_layers = []\n if n_hidden_layers > 0:\n self.hidden_layers.append(\n L.Linear(n_input_channels, n_hidden_channels))\n for i in range(n_hidden_layers - 1):\n self.hidden_layers.append(\n L.Linear(n_hidden_channels, n_hidden_channels))\n self.mean_layer = L.Linear(n_hidden_channels, action_size,\n initialW=LeCunNormal(mean_wscale))\n self.var_layer = L.Linear(n_hidden_channels, var_size,\n initialW=LeCunNormal(var_wscale),\n initial_bias=var_bias)\n else:\n self.mean_layer = L.Linear(n_input_channels, action_size,\n initialW=LeCunNormal(mean_wscale))\n self.var_layer = L.Linear(n_input_channels, var_size,\n initialW=LeCunNormal(var_wscale),\n initial_bias=var_bias)\n\n super().__init__(\n self.mean_layer, self.var_layer, *self.hidden_layers)\n\n def compute_mean_and_var(self, x):\n h = x\n for layer in self.hidden_layers:\n h = self.nonlinearity(layer(h))\n mean = self.mean_layer(h)\n if self.bound_mean:\n mean = bound_by_tanh(mean, self.min_action, self.max_action)\n var = F.broadcast_to(F.softplus(self.var_layer(h)), mean.shape) + \\\n self.min_var\n return mean, var\n\n def __call__(self, x):\n mean, var = self.compute_mean_and_var(x)\n return distribution.GaussianDistribution(mean, var=var)\n\n\nclass FCGaussianPolicyWithStateIndependentCovariance(\n chainer.Chain, GaussianPolicy):\n \"\"\"Gaussian policy that consists of FC layers with parametrized covariance.\n\n This model has one output layers: the mean layer.\n The mean of the Gaussian is computed in the same way as FCGaussianPolicy.\n\n Args:\n n_input_channels (int): Number of input channels.\n action_size (int): Number of dimensions of the action space.\n n_hidden_layers (int): Number of hidden layers.\n n_hidden_channels (int): Number of hidden channels.\n min_action (ndarray): Minimum action. Used only when bound_mean=True.\n max_action (ndarray): Maximum action. Used only when bound_mean=True.\n var_type (str): Type of parameterization of variance. It must be\n 'spherical' or 'diagonal'.\n nonlinearity (callable): Nonlinearity placed between layers.\n mean_wscale (float): Scale of weight initialization of the mean layer.\n var_func (callable): Callable that computes the variance from the var\n parameter. It should always return positive values.\n var_param_init (float): Initial value the var parameter.\n \"\"\"\n\n def __init__(self, n_input_channels, action_size,\n n_hidden_layers=0, n_hidden_channels=None,\n min_action=None, max_action=None, bound_mean=False,\n var_type='spherical',\n nonlinearity=F.relu,\n mean_wscale=1,\n var_func=F.softplus,\n var_param_init=0,\n ):\n\n self.n_input_channels = n_input_channels\n self.action_size = action_size\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_channels = n_hidden_channels\n self.min_action = min_action\n self.max_action = max_action\n self.bound_mean = bound_mean\n self.nonlinearity = nonlinearity\n self.var_func = var_func\n var_size = {'spherical': 1, 'diagonal': action_size}[var_type]\n\n layers = []\n layers.append(L.Linear(n_input_channels, n_hidden_channels))\n for _ in range(n_hidden_layers - 1):\n layers.append(self.nonlinearity)\n layers.append(L.Linear(n_hidden_channels, n_hidden_channels))\n layers.append(self.nonlinearity)\n # The last layer is used to compute the mean\n layers.append(\n L.Linear(n_hidden_channels, action_size,\n initialW=LeCunNormal(mean_wscale)))\n\n if self.bound_mean:\n layers.append(lambda x: bound_by_tanh(\n x, self.min_action, self.max_action))\n\n super().__init__()\n with self.init_scope():\n self.hidden_layers = links.Sequence(*layers)\n self.var_param = chainer.Parameter(\n initializer=var_param_init, shape=(var_size,))\n\n def __call__(self, x):\n mean = self.hidden_layers(x)\n var = F.broadcast_to(self.var_func(self.var_param), mean.shape)\n return distribution.GaussianDistribution(mean, var)\n\n\nclass FCGaussianPolicyWithFixedCovariance(links.Sequence, GaussianPolicy):\n \"\"\"Gaussian policy that consists of FC layers with fixed covariance.\n\n This model has one output layers: the mean layer.\n The mean of the Gaussian is computed in the same way as FCGaussianPolicy.\n The variance of the Gaussian must be specified as an argument.\n\n Args:\n n_input_channels (int): Number of input channels.\n action_size (int): Number of dimensions of the action space.\n var (float or ndarray): Variance of the Gaussian distribution.\n n_hidden_layers (int): Number of hidden layers.\n n_hidden_channels (int): Number of hidden channels.\n min_action (ndarray): Minimum action. Used only when bound_mean=True.\n max_action (ndarray): Maximum action. Used only when bound_mean=True.\n nonlinearity (callable): Nonlinearity placed between layers.\n mean_wscale (float): Scale of weight initialization of the mean layer.\n \"\"\"\n\n def __init__(self, n_input_channels, action_size, var,\n n_hidden_layers=0, n_hidden_channels=None,\n min_action=None, max_action=None, bound_mean=False,\n nonlinearity=F.relu, mean_wscale=1):\n\n self.n_input_channels = n_input_channels\n self.action_size = action_size\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_channels = n_hidden_channels\n self.min_action = min_action\n self.max_action = max_action\n self.bound_mean = bound_mean\n self.nonlinearity = nonlinearity\n if np.isscalar(var):\n self.var = np.full(action_size, var, dtype=np.float32)\n else:\n self.var = var\n layers = []\n if n_hidden_layers > 0:\n # Input to hidden\n layers.append(L.Linear(n_input_channels, n_hidden_channels))\n layers.append(self.nonlinearity)\n for _ in range(n_hidden_layers - 1):\n # Hidden to hidden\n layers.append(L.Linear(n_hidden_channels, n_hidden_channels))\n layers.append(self.nonlinearity)\n # The last layer is used to compute the mean\n layers.append(\n L.Linear(n_hidden_channels, action_size,\n initialW=LeCunNormal(mean_wscale)))\n else:\n # There's only one layer for computing the mean\n layers.append(\n L.Linear(n_input_channels, action_size,\n initialW=LeCunNormal(mean_wscale)))\n\n if self.bound_mean:\n layers.append(lambda x: bound_by_tanh(\n x, self.min_action, self.max_action))\n\n def get_var_array(shape):\n self.var = self.xp.asarray(self.var)\n return self.xp.broadcast_to(self.var, shape)\n\n layers.append(lambda x: distribution.GaussianDistribution(\n x, get_var_array(x.shape)))\n super().__init__(*layers)\n\n\nclass LinearGaussianPolicyWithDiagonalCovariance(\n chainer.ChainList, GaussianPolicy):\n \"\"\"Linear Gaussian policy whose covariance matrix is diagonal.\"\"\"\n\n def __init__(self, n_input_channels, action_size):\n\n self.n_input_channels = n_input_channels\n self.action_size = action_size\n\n self.mean_layer = L.Linear(n_input_channels, action_size)\n self.var_layer = L.Linear(n_input_channels, action_size)\n\n super().__init__(self.mean_layer, self.var_layer)\n\n def compute_mean_and_var(self, x):\n # mean = self.mean_layer(x)\n mean = F.tanh(self.mean_layer(x)) * 2.0\n var = F.softplus(self.var_layer(x))\n return mean, var\n\n\nclass LinearGaussianPolicyWithSphericalCovariance(\n chainer.ChainList, GaussianPolicy):\n \"\"\"Linear Gaussian policy whose covariance matrix is spherical.\"\"\"\n\n def __init__(self, n_input_channels, action_size):\n\n self.n_input_channels = n_input_channels\n self.action_size = action_size\n\n self.mean_layer = L.Linear(n_input_channels, action_size)\n self.var_layer = L.Linear(n_input_channels, 1)\n\n super().__init__(self.mean_layer, self.var_layer)\n\n def compute_mean_and_var(self, x):\n # mean = self.mean_layer(x)\n mean = F.tanh(self.mean_layer(x)) * 2.0\n var = F.softplus(F.broadcast_to(self.var_layer(x), mean.data.shape))\n return mean, var\n"
]
| [
[
"numpy.full",
"numpy.isscalar"
]
]
|
electricitymap/electricitymap-contrib | [
"6572b12d1cef72c734b80273598e156ebe3c22ea"
]
| [
"parsers/DO.py"
]
| [
"#!/usr/bin/env python3\n\nimport logging\nfrom collections import defaultdict\nfrom math import isnan\nfrom operator import itemgetter\n\nimport arrow\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n# This parser gets hourly electricity generation data from oc.org.do for the Dominican Republic.\n# The data is in MWh but since it is updated hourly we can view it as MW.\n# Solar generation now has some data available but multiple projects are planned/under construction.\n\nurl = \"https://apps.oc.org.do/reportesgraficos/reportepostdespacho.aspx\"\n\ntotal_mapping = {\n \"Total T\\xe9rmico\": \"Thermal\",\n \"Total E\\xf3lico\": \"Wind\",\n \"Total Hidroel\\xe9ctrica\": \"Hydro\",\n \"Total Solar\": \"Solar\",\n \"Total Generado\": \"Generated\",\n}\n\n# Power plant types\n# http://www.sie.gob.do/images/Estadisticas/MEM/GeneracionDiariaEnero2017/\n# Reporte_diario_de_generacion_31_enero_2017_merged2.pdf\n\nthermal_plants = {\n \"AES ANDRES\": \"gas\",\n \"BARAHONA CARBON\": \"coal\",\n \"BERSAL\": \"oil\",\n \"CEPP 1\": \"oil\",\n \"CEPP 2\": \"oil\",\n \"CESPM 1 FO\": \"oil\",\n \"CESPM 1 GN\": \"gas\",\n \"CESPM 2 FO\": \"oil\",\n \"CESPM 2 GN\": \"gas\",\n \"CESPM 3 FO\": \"oil\",\n \"CESPM 3 GN\": \"gas\",\n \"ESTRELLA DEL MAR 2 CFO\": \"oil\",\n \"ESTRELLA DEL MAR 2 CGN\": \"gas\",\n \"ESTRELLA DEL MAR 2 SFO\": \"oil\",\n \"ESTRELLA DEL MAR 2 SGN\": \"gas\",\n \"ESTRELLA DEL MAR 3\": \"gas\",\n \"GENERACI\\xD3N DE EMERGENCIA AES ANDR\\xC9S\": \"gas\",\n \"HAINA TG\": \"oil\",\n \"INCA KM22\": \"oil\",\n \"ITABO 1\": \"coal\",\n \"ITABO 2\": \"coal\",\n \"LA VEGA\": \"oil\",\n \"LOS MINA 5\": \"gas\",\n \"LOS MINA 6\": \"gas\",\n \"LOS MINA 7\": \"gas\",\n \"LOS OR\\xcdGENES POWER PLANT FUEL OIL\": \"oil\",\n \"LOS OR\\xcdGENES POWER PLANT GAS NATURAL\": \"gas\",\n \"METALDOM\": \"oil\",\n \"MONTE RIO\": \"oil\",\n \"PALAMARA\": \"oil\",\n \"PALENQUE\": \"oil\",\n \"PARQUE ENERGETICO LOS MINA CC PARCIAL\": \"gas\",\n \"PARQUE ENERGETICO LOS MINA CC TOTAL\": \"gas\",\n \"PIMENTEL 1\": \"oil\",\n \"PIMENTEL 2\": \"oil\",\n \"PIMENTEL 3\": \"oil\",\n \"PUNTA CATALINA 1\": \"coal\",\n \"PUNTA CATALINA 2\": \"coal\",\n \"QUISQUEYA 1B SAN PEDRO GN\": \"gas\",\n \"QUISQUEYA 1 FO\": \"oil\",\n \"QUISQUEYA 1 GN\": \"gas\",\n \"QUISQUEYA 2 FO\": \"oil\",\n \"QUISQUEYA 2 GN\": \"gas\",\n \"QUISQUEYA 1 SAN PEDRO FO\": \"oil\",\n \"QUISQUEYA 1 SAN PEDRO GN\": \"gas\",\n \"RIO SAN JUAN\": \"oil\",\n \"SAN FELIPE\": \"oil\",\n \"SAN FELIPE CC\": \"gas\",\n \"SAN FELIPE VAP\": \"oil\",\n \"SAN LORENZO 1\": \"gas\",\n \"SAN PEDRO BIO-ENERGY\": \"biomass\",\n \"SAN PEDRO VAPOR\": \"oil\",\n \"SULTANA DEL ESTE\": \"oil\",\n}\n\n\ndef get_data(session=None) -> list:\n \"\"\"\n Makes a request to source url.\n Finds main table and creates a list of all table elements in string format.\n \"\"\"\n\n data = []\n s = session or requests.Session()\n data_req = s.get(url)\n soup = BeautifulSoup(data_req.content, \"lxml\")\n\n tbs = soup.find(\"table\", id=\"PostdespachoUnidadesTermicasGrid_DXMainTable\")\n rows = tbs.find_all(\"td\")\n\n for row in rows:\n num = row.getText().strip()\n data.append(str(num))\n\n return data\n\n\ndef floater(item):\n \"\"\"\n Attempts to convert any item given to a float.\n Returns item if it fails.\n \"\"\"\n\n try:\n return float(item)\n except ValueError:\n return item\n\n\ndef chunker(big_lst) -> dict:\n \"\"\"\n Breaks a big list into a list of lists.\n Removes any list with no data then turns remaining\n lists into key: value pairs with first element from the list being the key.\n \"\"\"\n\n chunks = [big_lst[x : x + 27] for x in range(0, len(big_lst), 27)]\n\n # Remove the list if it contains no data.\n for chunk in chunks:\n if any(chunk):\n continue\n else:\n chunks.remove(chunk)\n\n chunked_list = {words[0]: words[1:] for words in chunks}\n\n return chunked_list\n\n\ndef data_formatter(data) -> dict:\n \"\"\"\n Takes data and finds relevant sections.\n Formats and breaks data into usable parts.\n \"\"\"\n\n find_thermal_index = data.index(\"GRUPO: T\\xe9rmica\")\n find_totals_index = data.index(\"Total T\\xe9rmico\")\n find_totals_end = data.index(\"Total Programado\")\n\n ufthermal = data[find_thermal_index + 3 : find_totals_index - 59]\n total_data = data[find_totals_index:find_totals_end]\n\n # Remove all company names.\n for val in ufthermal:\n if \":\" in val:\n i = ufthermal.index(val)\n del ufthermal[i : i + 3]\n\n formatted_thermal = chunker([floater(item) for item in ufthermal])\n mapped_totals = [total_mapping.get(x, x) for x in total_data]\n formatted_totals = chunker([floater(item) for item in mapped_totals])\n\n return {\"totals\": formatted_totals, \"thermal\": formatted_thermal}\n\n\ndef data_parser(formatted_data):\n \"\"\"\n Converts formatted data into a pandas dataframe.\n Removes any empty rows.\n Returns a DataFrame.\n \"\"\"\n\n hours = list(range(1, 24)) + [0] + [25, 26]\n dft = pd.DataFrame(formatted_data, index=hours)\n\n dft = dft.drop(dft.index[[-1, -2]])\n dft = dft.replace(\"\", np.nan)\n dft = dft.dropna(how=\"all\")\n\n return dft\n\n\ndef thermal_production(df, logger) -> dict:\n \"\"\"\n Takes DataFrame and finds thermal generation for each hour.\n Removes any non generating plants then maps plants to type.\n \"\"\"\n\n therms = []\n unmapped = set()\n for hour in df.index.values:\n dt = hour\n currentt = df.loc[[hour]]\n\n # Create current plant output.\n tp = {}\n for item in list(df):\n v = currentt.iloc[0][item]\n tp[item] = v\n\n current_plants = {k: tp[k] for k in tp if not isnan(tp[k])}\n\n for plant in current_plants.keys():\n if plant not in thermal_plants.keys():\n unmapped.add(plant)\n\n mapped_plants = [\n (thermal_plants.get(plant, \"unknown\"), val)\n for plant, val in current_plants.items()\n ]\n\n thermalDict = defaultdict(lambda: 0.0)\n\n # Sum values for duplicate keys.\n for key, val in mapped_plants:\n thermalDict[key] += val\n\n thermalDict[\"datetime\"] = dt\n thermalDict = dict(thermalDict)\n therms.append(thermalDict)\n\n for plant in unmapped:\n logger.warning(\n \"{} is missing from the DO plant mapping!\".format(plant),\n extra={\"key\": \"DO\"},\n )\n\n return therms\n\n\ndef total_production(df) -> dict:\n \"\"\"Takes DataFrame and finds generation totals for each hour.\"\"\"\n\n vals = []\n # The Dominican Republic does not observe daylight savings time.\n for hour in df.index.values:\n dt = hour\n current = df.loc[[hour]]\n hydro = current.iloc[0][\"Hydro\"]\n wind = current.iloc[0][\"Wind\"]\n solar = current.iloc[0][\"Solar\"]\n if wind > -10:\n wind = max(wind, 0)\n\n # Wind and hydro totals do not always update exactly on the new hour.\n # In this case we set them to None because they are unknown rather than zero.\n if isnan(wind):\n wind = None\n if isnan(hydro):\n hydro = None\n\n prod = {\"wind\": wind, \"hydro\": hydro, \"solar\": solar, \"datetime\": dt}\n vals.append(prod)\n\n return vals\n\n\ndef merge_production(thermal, total) -> defaultdict:\n \"\"\"\n Takes thermal generation and total generation and merges them using 'datetime' key.\n \"\"\"\n\n d = defaultdict(dict)\n for each in (thermal, total):\n for elem in each:\n d[elem[\"datetime\"]].update(elem)\n\n final = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n def get_datetime(hour):\n at = arrow.now(\"America/Dominica\").floor(\"day\")\n dt = (at.shift(hours=int(hour) - 1)).datetime\n return dt\n\n for item in final:\n i = item[\"datetime\"]\n j = get_datetime(i)\n item[\"datetime\"] = j\n\n return final\n\n\ndef fetch_production(\n zone_key=\"DO\",\n session=None,\n target_datetime=None,\n logger=logging.getLogger(__name__),\n) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n\n dat = data_formatter(get_data(session=None))\n tot = data_parser(dat[\"totals\"])\n th = data_parser(dat[\"thermal\"])\n thermal = thermal_production(th, logger)\n total = total_production(tot)\n merge = merge_production(thermal, total)\n\n production_mix_by_hour = []\n for hour in merge:\n production_mix = {\n \"zoneKey\": zone_key,\n \"datetime\": hour[\"datetime\"],\n \"production\": {\n \"biomass\": hour.get(\"biomass\", 0.0),\n \"coal\": hour.get(\"coal\", 0.0),\n \"gas\": hour.get(\"gas\", 0.0),\n \"hydro\": hour.get(\"hydro\", 0.0),\n \"nuclear\": 0.0,\n \"oil\": hour.get(\"oil\", 0.0),\n \"solar\": hour.get(\"solar\", 0.0),\n \"wind\": hour.get(\"wind\", 0.0),\n \"geothermal\": 0.0,\n \"unknown\": hour.get(\"unknown\", 0.0),\n },\n \"storage\": {\n \"hydro\": None,\n },\n \"source\": \"oc.org.do\",\n }\n production_mix_by_hour.append(production_mix)\n\n return production_mix_by_hour\n\n\nif __name__ == \"__main__\":\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print(\"fetch_production() ->\")\n print(fetch_production())\n"
]
| [
[
"pandas.DataFrame"
]
]
|
Laurae2/pygbm | [
"69e96fb76e62024df96f2b71ad7f0b5b669e9017"
]
| [
"pygbm/histogram.py"
]
| [
"import numpy as np\nfrom numba import njit\n\nHISTOGRAM_DTYPE = np.dtype([\n ('sum_gradients', np.float32),\n ('sum_hessians', np.float32),\n ('count', np.uint32),\n])\n\n\n@njit\ndef _build_histogram_naive(n_bins, sample_indices, binned_feature,\n ordered_gradients, ordered_hessians):\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n for i, sample_idx in enumerate(sample_indices):\n bin_idx = binned_feature[sample_idx]\n histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_idx]['sum_hessians'] += ordered_hessians[i]\n histogram[bin_idx]['count'] += 1\n return histogram\n\n\n@njit\ndef _subtract_histograms(n_bins, hist_a, hist_b):\n \"\"\"Return hist_a - hist_b\"\"\"\n\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n\n sg = 'sum_gradients'\n sh = 'sum_hessians'\n c = 'count'\n\n for i in range(n_bins):\n histogram[i][sg] = hist_a[i][sg] - hist_b[i][sg]\n histogram[i][sh] = hist_a[i][sh] - hist_b[i][sh]\n histogram[i][c] = hist_a[i][c] - hist_b[i][c]\n\n return histogram\n\n\n@njit\ndef _build_histogram(n_bins, sample_indices, binned_feature, ordered_gradients,\n ordered_hessians):\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n n_node_samples = sample_indices.shape[0]\n unrolled_upper = (n_node_samples // 4) * 4\n\n for i in range(0, unrolled_upper, 4):\n bin_0 = binned_feature[sample_indices[i]]\n bin_1 = binned_feature[sample_indices[i + 1]]\n bin_2 = binned_feature[sample_indices[i + 2]]\n bin_3 = binned_feature[sample_indices[i + 3]]\n\n histogram[bin_0]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_1]['sum_gradients'] += ordered_gradients[i + 1]\n histogram[bin_2]['sum_gradients'] += ordered_gradients[i + 2]\n histogram[bin_3]['sum_gradients'] += ordered_gradients[i + 3]\n\n histogram[bin_0]['sum_hessians'] += ordered_hessians[i]\n histogram[bin_1]['sum_hessians'] += ordered_hessians[i + 1]\n histogram[bin_2]['sum_hessians'] += ordered_hessians[i + 2]\n histogram[bin_3]['sum_hessians'] += ordered_hessians[i + 3]\n\n histogram[bin_0]['count'] += 1\n histogram[bin_1]['count'] += 1\n histogram[bin_2]['count'] += 1\n histogram[bin_3]['count'] += 1\n\n for i in range(unrolled_upper, n_node_samples):\n bin_idx = binned_feature[sample_indices[i]]\n histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_idx]['sum_hessians'] += ordered_hessians[i]\n histogram[bin_idx]['count'] += 1\n\n return histogram\n\n\n@njit\ndef _build_histogram_no_hessian(n_bins, sample_indices, binned_feature,\n ordered_gradients):\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n n_node_samples = sample_indices.shape[0]\n unrolled_upper = (n_node_samples // 4) * 4\n\n for i in range(0, unrolled_upper, 4):\n bin_0 = binned_feature[sample_indices[i]]\n bin_1 = binned_feature[sample_indices[i + 1]]\n bin_2 = binned_feature[sample_indices[i + 2]]\n bin_3 = binned_feature[sample_indices[i + 3]]\n\n histogram[bin_0]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_1]['sum_gradients'] += ordered_gradients[i + 1]\n histogram[bin_2]['sum_gradients'] += ordered_gradients[i + 2]\n histogram[bin_3]['sum_gradients'] += ordered_gradients[i + 3]\n\n histogram[bin_0]['count'] += 1\n histogram[bin_1]['count'] += 1\n histogram[bin_2]['count'] += 1\n histogram[bin_3]['count'] += 1\n\n for i in range(unrolled_upper, n_node_samples):\n bin_idx = binned_feature[sample_indices[i]]\n histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_idx]['count'] += 1\n\n return histogram\n\n\n@njit\ndef _build_histogram_root_no_hessian(n_bins, binned_feature, all_gradients):\n \"\"\"Special case for the root node\n\n The root node has to find the a split among all the samples from the\n training set. binned_feature and all_gradients already have a consistent\n ordering.\n \"\"\"\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n n_node_samples = binned_feature.shape[0]\n unrolled_upper = (n_node_samples // 4) * 4\n\n for i in range(0, unrolled_upper, 4):\n bin_0 = binned_feature[i]\n bin_1 = binned_feature[i + 1]\n bin_2 = binned_feature[i + 2]\n bin_3 = binned_feature[i + 3]\n\n histogram[bin_0]['sum_gradients'] += all_gradients[i]\n histogram[bin_1]['sum_gradients'] += all_gradients[i + 1]\n histogram[bin_2]['sum_gradients'] += all_gradients[i + 2]\n histogram[bin_3]['sum_gradients'] += all_gradients[i + 3]\n\n histogram[bin_0]['count'] += 1\n histogram[bin_1]['count'] += 1\n histogram[bin_2]['count'] += 1\n histogram[bin_3]['count'] += 1\n\n for i in range(unrolled_upper, n_node_samples):\n bin_idx = binned_feature[i]\n histogram[bin_idx]['sum_gradients'] += all_gradients[i]\n histogram[bin_idx]['count'] += 1\n\n return histogram\n\n\n@njit\ndef _build_histogram_root(n_bins, binned_feature, all_gradients,\n all_hessians):\n \"\"\"Special case for the root node\n\n The root node has to find the a split among all the samples from the\n training set. binned_feature and all_gradients already have a consistent\n ordering.\n \"\"\"\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n n_node_samples = binned_feature.shape[0]\n unrolled_upper = (n_node_samples // 4) * 4\n\n for i in range(0, unrolled_upper, 4):\n bin_0 = binned_feature[i]\n bin_1 = binned_feature[i + 1]\n bin_2 = binned_feature[i + 2]\n bin_3 = binned_feature[i + 3]\n\n histogram[bin_0]['sum_gradients'] += all_gradients[i]\n histogram[bin_1]['sum_gradients'] += all_gradients[i + 1]\n histogram[bin_2]['sum_gradients'] += all_gradients[i + 2]\n histogram[bin_3]['sum_gradients'] += all_gradients[i + 3]\n\n histogram[bin_0]['sum_hessians'] += all_hessians[i]\n histogram[bin_1]['sum_hessians'] += all_hessians[i + 1]\n histogram[bin_2]['sum_hessians'] += all_hessians[i + 2]\n histogram[bin_3]['sum_hessians'] += all_hessians[i + 3]\n\n histogram[bin_0]['count'] += 1\n histogram[bin_1]['count'] += 1\n histogram[bin_2]['count'] += 1\n histogram[bin_3]['count'] += 1\n\n for i in range(unrolled_upper, n_node_samples):\n bin_idx = binned_feature[i]\n histogram[bin_idx]['sum_gradients'] += all_gradients[i]\n histogram[bin_idx]['sum_hessians'] += all_hessians[i]\n histogram[bin_idx]['count'] += 1\n\n return histogram\n"
]
| [
[
"numpy.dtype",
"numpy.zeros"
]
]
|
huylenguyen806/vnasrg | [
"caee2e79d0eb9d1a997f5df8e2ba3ee7f0a1bcae"
]
| [
"tensorflow_asr/losses/rnnt_loss.py"
]
| [
"# Copyright 2020 Huy Le Nguyen (@usimarit) and M. Yusuf Sarıgöz (@monatis)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# RNNT loss implementation in pure TensorFlow is borrowed from [iamjanvijay's repo](https://github.com/iamjanvijay/rnnt)\n\nimport tensorflow as tf\nfrom tensorflow.python.ops.gen_array_ops import matrix_diag_part_v2\n\nfrom tensorflow_asr.utils import env_util\n\nlogger = tf.get_logger()\n\nLOG_0 = float(\"-inf\")\n\ntry:\n from warprnnt_tensorflow import rnnt_loss as warp_rnnt_loss\n\n use_warprnnt = True\n logger.info(\"Use RNNT loss in WarpRnnt\")\nexcept ImportError:\n logger.info(\"Use RNNT loss in TensorFlow\")\n use_warprnnt = False\n\n\nclass RnntLoss(tf.keras.losses.Loss):\n def __init__(\n self,\n blank=0,\n global_batch_size=None,\n name=None,\n ):\n super(RnntLoss, self).__init__(reduction=tf.keras.losses.Reduction.NONE, name=name)\n self.blank = blank\n self.global_batch_size = global_batch_size\n\n def call(self, y_true, y_pred):\n loss = rnnt_loss(\n logits=y_pred[\"logits\"],\n logit_length=y_pred[\"logits_length\"],\n labels=y_true[\"labels\"],\n label_length=y_true[\"labels_length\"],\n blank=self.blank,\n name=self.name,\n )\n return tf.nn.compute_average_loss(loss, global_batch_size=self.global_batch_size)\n\n\[email protected]\ndef rnnt_loss(\n logits,\n labels,\n label_length,\n logit_length,\n blank=0,\n name=None,\n):\n if use_warprnnt:\n return rnnt_loss_warprnnt(\n logits=logits, labels=labels, label_length=label_length, logit_length=logit_length, blank=blank\n )\n else:\n return rnnt_loss_tf(\n logits=logits,\n labels=labels,\n label_length=label_length,\n logit_length=logit_length,\n name=name,\n )\n\n\ndef rnnt_loss_warprnnt(\n logits,\n labels,\n label_length,\n logit_length,\n blank=0,\n):\n if not env_util.has_devices([\"GPU\", \"TPU\"]):\n logits = tf.nn.log_softmax(logits)\n loss = warp_rnnt_loss(\n acts=tf.cast(logits, tf.float32),\n label_lengths=tf.cast(label_length, tf.int32),\n labels=tf.cast(labels, tf.int32),\n input_lengths=tf.cast(logit_length, tf.int32),\n blank_label=blank,\n )\n return loss\n\n\ndef nan_to_zero(\n input_tensor,\n):\n return tf.where(tf.math.is_nan(input_tensor), tf.zeros_like(input_tensor), input_tensor)\n\n\ndef reduce_logsumexp(\n input_tensor,\n axis,\n):\n maximum = tf.reduce_max(input_tensor, axis=axis)\n input_tensor = nan_to_zero(input_tensor - maximum)\n return tf.math.log(tf.reduce_sum(tf.exp(input_tensor), axis=axis)) + maximum\n\n\ndef extract_diagonals(\n log_probs,\n):\n time_steps = tf.shape(log_probs)[1] # T\n output_steps = tf.shape(log_probs)[2] # U + 1\n reverse_log_probs = tf.reverse(log_probs, axis=[-1])\n paddings = [[0, 0], [0, 0], [time_steps - 1, 0]]\n padded_reverse_log_probs = tf.pad(reverse_log_probs, paddings, \"CONSTANT\", constant_values=LOG_0)\n diagonals = matrix_diag_part_v2(\n padded_reverse_log_probs,\n k=(0, time_steps + output_steps - 2),\n padding_value=LOG_0,\n )\n\n return tf.transpose(diagonals, perm=[1, 0, 2])\n\n\ndef transition_probs(\n one_hot_labels,\n log_probs,\n):\n \"\"\"\n :return: blank_probs with shape batch_size x input_max_len x target_max_len\n truth_probs with shape batch_size x input_max_len x (target_max_len-1)\n \"\"\"\n blank_probs = log_probs[:, :, :, 0]\n truth_probs = tf.reduce_sum(tf.multiply(log_probs[:, :, :-1, :], one_hot_labels), axis=-1)\n\n return blank_probs, truth_probs\n\n\ndef forward_dp(\n bp_diags,\n tp_diags,\n batch_size,\n input_max_len,\n target_max_len,\n):\n \"\"\"\n :return: forward variable alpha with shape batch_size x input_max_len x target_max_len\n \"\"\"\n\n def next_state(x, trans_probs):\n blank_probs = trans_probs[0]\n truth_probs = trans_probs[1]\n\n x_b = tf.concat([LOG_0 * tf.ones(shape=[batch_size, 1]), x[:, :-1] + blank_probs], axis=1)\n x_t = x + truth_probs\n\n x = tf.math.reduce_logsumexp(tf.stack([x_b, x_t], axis=0), axis=0)\n return x\n\n initial_alpha = tf.concat(\n [\n tf.zeros(shape=[batch_size, 1]),\n tf.ones(shape=[batch_size, input_max_len - 1]) * LOG_0,\n ],\n axis=1,\n )\n\n fwd = tf.scan(next_state, (bp_diags[:-1, :, :-1], tp_diags), initializer=initial_alpha)\n\n alpha = tf.transpose(tf.concat([tf.expand_dims(initial_alpha, axis=0), fwd], axis=0), perm=[1, 2, 0])\n alpha = matrix_diag_part_v2(alpha, k=(0, target_max_len - 1), padding_value=LOG_0)\n alpha = tf.transpose(tf.reverse(alpha, axis=[1]), perm=[0, 2, 1])\n\n return alpha\n\n\ndef backward_dp(\n bp_diags,\n tp_diags,\n batch_size,\n input_max_len,\n target_max_len,\n label_length,\n logit_length,\n blank_sl,\n):\n \"\"\"\n :return: backward variable beta with shape batch_size x input_max_len x target_max_len\n \"\"\"\n\n def next_state(x, mask_and_trans_probs):\n mask_s, blank_probs_s, truth_probs = mask_and_trans_probs\n\n beta_b = tf.concat([x[:, 1:] + blank_probs_s, LOG_0 * tf.ones(shape=[batch_size, 1])], axis=1)\n beta_t = tf.concat([x[:, :-1] + truth_probs, LOG_0 * tf.ones(shape=[batch_size, 1])], axis=1)\n\n beta_next = reduce_logsumexp(tf.stack([beta_b, beta_t], axis=0), axis=0)\n masked_beta_next = nan_to_zero(beta_next * tf.expand_dims(mask_s, axis=1)) + nan_to_zero(\n x * tf.expand_dims((1.0 - mask_s), axis=1)\n )\n return tf.reshape(masked_beta_next, shape=tf.shape(x))\n\n # Initial beta for batches.\n initial_beta_mask = tf.one_hot(logit_length - 1, depth=input_max_len + 1)\n initial_beta = tf.expand_dims(blank_sl, axis=1) * initial_beta_mask + nan_to_zero(LOG_0 * (1.0 - initial_beta_mask))\n\n # Mask for scan iterations.\n mask = tf.sequence_mask(\n logit_length + label_length - 1,\n input_max_len + target_max_len - 2,\n dtype=tf.dtypes.float32,\n )\n mask = tf.transpose(mask, perm=[1, 0])\n\n bwd = tf.scan(\n next_state,\n (mask, bp_diags[:-1, :, :], tp_diags),\n initializer=initial_beta,\n reverse=True,\n )\n\n beta = tf.transpose(tf.concat([bwd, tf.expand_dims(initial_beta, axis=0)], axis=0), perm=[1, 2, 0])[:, :-1, :]\n beta = matrix_diag_part_v2(beta, k=(0, target_max_len - 1), padding_value=LOG_0)\n beta = tf.transpose(tf.reverse(beta, axis=[1]), perm=[0, 2, 1])\n\n return beta\n\n\ndef compute_rnnt_loss_and_grad_helper(logits, labels, label_length, logit_length):\n batch_size = tf.shape(logits)[0]\n input_max_len = tf.shape(logits)[1]\n target_max_len = tf.shape(logits)[2]\n vocab_size = tf.shape(logits)[3]\n\n one_hot_labels = tf.one_hot(\n tf.tile(tf.expand_dims(labels, axis=1), multiples=[1, input_max_len, 1]),\n depth=vocab_size,\n )\n\n log_probs = tf.nn.log_softmax(logits)\n blank_probs, truth_probs = transition_probs(one_hot_labels, log_probs)\n bp_diags = extract_diagonals(blank_probs)\n tp_diags = extract_diagonals(truth_probs)\n\n label_mask = tf.expand_dims(\n tf.sequence_mask(label_length + 1, maxlen=target_max_len, dtype=tf.float32),\n axis=1,\n )\n small_label_mask = tf.expand_dims(tf.sequence_mask(label_length, maxlen=target_max_len, dtype=tf.float32), axis=1)\n input_mask = tf.expand_dims(tf.sequence_mask(logit_length, maxlen=input_max_len, dtype=tf.float32), axis=2)\n small_input_mask = tf.expand_dims(\n tf.sequence_mask(logit_length - 1, maxlen=input_max_len, dtype=tf.float32),\n axis=2,\n )\n mask = label_mask * input_mask\n grad_blank_mask = (label_mask * small_input_mask)[:, :-1, :]\n grad_truth_mask = (small_label_mask * input_mask)[:, :, :-1]\n\n alpha = forward_dp(bp_diags, tp_diags, batch_size, input_max_len, target_max_len) * mask\n\n indices = tf.stack([logit_length - 1, label_length], axis=1)\n blank_sl = tf.gather_nd(blank_probs, indices, batch_dims=1)\n\n beta = (\n backward_dp(\n bp_diags,\n tp_diags,\n batch_size,\n input_max_len,\n target_max_len,\n label_length,\n logit_length,\n blank_sl,\n )\n * mask\n )\n beta = tf.where(tf.math.is_nan(beta), tf.zeros_like(beta), beta)\n final_state_probs = beta[:, 0, 0]\n\n # Compute gradients of loss w.r.t. blank log-probabilities.\n grads_blank = (\n -tf.exp(\n (\n alpha[:, :-1, :]\n + beta[:, 1:, :]\n - tf.reshape(final_state_probs, shape=[batch_size, 1, 1])\n + blank_probs[:, :-1, :]\n )\n * grad_blank_mask\n )\n * grad_blank_mask\n )\n grads_blank = tf.concat([grads_blank, tf.zeros(shape=(batch_size, 1, target_max_len))], axis=1)\n last_grads_blank = -1 * tf.scatter_nd(\n tf.concat(\n [\n tf.reshape(tf.range(batch_size, dtype=tf.int64), shape=[batch_size, 1]),\n tf.cast(indices, dtype=tf.int64),\n ],\n axis=1,\n ),\n tf.ones(batch_size, dtype=tf.float32),\n [batch_size, input_max_len, target_max_len],\n )\n grads_blank = grads_blank + last_grads_blank\n\n # Compute gradients of loss w.r.t. truth log-probabilities.\n grads_truth = (\n -tf.exp(\n (alpha[:, :, :-1] + beta[:, :, 1:] - tf.reshape(final_state_probs, shape=[batch_size, 1, 1]) + truth_probs)\n * grad_truth_mask\n )\n * grad_truth_mask\n )\n\n # Compute gradients of loss w.r.t. activations.\n a = tf.tile(\n tf.reshape(\n tf.range(target_max_len - 1, dtype=tf.int64),\n shape=(1, 1, target_max_len - 1, 1),\n ),\n multiples=[batch_size, 1, 1, 1],\n )\n b = tf.cast(\n tf.reshape(labels - 1, shape=(batch_size, 1, target_max_len - 1, 1)),\n dtype=tf.int64,\n )\n if not env_util.has_devices([\"GPU\", \"TPU\"]):\n b = tf.where(tf.equal(b, -1), tf.zeros_like(b), b) # for cpu testing (index -1 on cpu will raise errors)\n c = tf.concat([a, b], axis=3)\n d = tf.tile(c, multiples=(1, input_max_len, 1, 1))\n e = tf.tile(\n tf.reshape(tf.range(input_max_len, dtype=tf.int64), shape=(1, input_max_len, 1, 1)),\n multiples=(batch_size, 1, target_max_len - 1, 1),\n )\n f = tf.concat([e, d], axis=3)\n g = tf.tile(\n tf.reshape(tf.range(batch_size, dtype=tf.int64), shape=(batch_size, 1, 1, 1)),\n multiples=[1, input_max_len, target_max_len - 1, 1],\n )\n scatter_idx = tf.concat([g, f], axis=3)\n # TODO - improve the part of code for scatter_idx computation.\n probs = tf.exp(log_probs)\n grads_truth_scatter = tf.scatter_nd(\n scatter_idx,\n grads_truth,\n [batch_size, input_max_len, target_max_len, vocab_size - 1],\n )\n grads = tf.concat(\n [\n tf.reshape(grads_blank, shape=(batch_size, input_max_len, target_max_len, -1)),\n grads_truth_scatter,\n ],\n axis=3,\n )\n grads_logits = grads - probs * (tf.reduce_sum(grads, axis=3, keepdims=True))\n\n loss = -final_state_probs\n return loss, grads_logits\n\n\ndef rnnt_loss_tf(\n logits,\n labels,\n label_length,\n logit_length,\n name=None,\n):\n name = \"rnnt_loss\" if name is None else name\n with tf.name_scope(name):\n logits = tf.convert_to_tensor(logits, name=\"logits\")\n labels = tf.convert_to_tensor(labels, name=\"labels\")\n label_length = tf.convert_to_tensor(label_length, name=\"label_length\")\n logit_length = tf.convert_to_tensor(logit_length, name=\"logit_length\")\n\n args = [logits, labels, label_length, logit_length]\n\n @tf.custom_gradient\n def compute_rnnt_loss_and_grad(logits_t, labels_t, label_length_t, logit_length_t):\n \"\"\"Compute RNN-T loss and gradients.\"\"\"\n logits_t.set_shape(logits.shape)\n labels_t.set_shape(labels.shape)\n label_length_t.set_shape(label_length.shape)\n logit_length_t.set_shape(logit_length.shape)\n kwargs = dict(\n logits=logits_t,\n labels=labels_t,\n label_length=label_length_t,\n logit_length=logit_length_t,\n )\n result = compute_rnnt_loss_and_grad_helper(**kwargs)\n\n def grad(grad_loss):\n grads = [tf.reshape(grad_loss, [-1, 1, 1, 1]) * result[1]]\n grads += [None] * (len(args) - len(grads))\n return grads\n\n return result[0], grad\n\n return compute_rnnt_loss_and_grad(*args)\n"
]
| [
[
"tensorflow.exp",
"tensorflow.ones",
"tensorflow.reshape",
"tensorflow.scatter_nd",
"tensorflow.reverse",
"tensorflow.zeros_like",
"tensorflow.python.ops.gen_array_ops.matrix_diag_part_v2",
"tensorflow.stack",
"tensorflow.tile",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.math.is_nan",
"tensorflow.pad",
"tensorflow.scan",
"tensorflow.nn.log_softmax",
"tensorflow.get_logger",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.expand_dims",
"tensorflow.gather_nd",
"tensorflow.name_scope",
"tensorflow.reduce_sum",
"tensorflow.nn.compute_average_loss",
"tensorflow.sequence_mask",
"tensorflow.multiply",
"tensorflow.convert_to_tensor",
"tensorflow.equal",
"tensorflow.reduce_max"
]
]
|
imagineagents/qmt | [
"5e8a7001cc020979636e492448abcfd894396038"
]
| [
"qmt/geometry/freecad/objectConstruction.py"
]
| [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Functions that perform composite executions.\"\"\"\n\nimport numpy as np\nfrom six import iteritems, text_type\n\nimport logging\n# ~ logging.getLogger().setLevel(logging.DEBUG) # toggle debug logging for this file\n\nimport FreeCAD\nimport Draft\n\n# TODO: use namespace in code\nfrom qmt.geometry.freecad.auxiliary import *\nfrom qmt.geometry.freecad.fileIO import exportCAD\nfrom qmt.geometry.freecad.geomUtils import (\n extrude, copy_move, genUnion, # make_solid,\n getBB, makeBB, makeHexFace,\n extrudeBetween, draftOffset, intersect,\n checkOverlap, subtract,\n crossSection\n )\nfrom qmt.geometry.freecad.sketchUtils import (\n findSegments, splitSketch, extendSketch,\n findEdgeCycles\n )\n\nfrom qmt.data.geometry import Geo3DData, store_serial\n\n\nDBG_OUT = (logging.getLogger().level <= logging.DEBUG)\n\n\ndef set_params(doc, paramDict):\n # TODO: support passthrough params\n ''' Update the parameters in the modelParams spreadsheet to reflect the\n current value in the dict.\n '''\n if not paramDict:\n return\n try:\n spreadSheet = doc.modelParams\n spreadSheet.clearAll() # clear existing spreadsheet\n except:\n # Cave: unconditional removeObject on spreadSheet breaks param dependencies.\n doc.removeObject('modelParams') # it was not a good spreadsheet\n spreadSheet = doc.addObject('Spreadsheet::Sheet', 'modelParams')\n spreadSheet.set('A1', 'paramName')\n spreadSheet.set('B1', 'paramValue')\n spreadSheet.setColumnWidth('A', 200)\n spreadSheet.setStyle('A1:B1', 'bold', 'add')\n for i, key in enumerate(paramDict):\n paramType = paramDict[key][1]\n if paramType == 'freeCAD':\n idx = str(i + 2)\n spreadSheet.set('A' + idx, key)\n spreadSheet.set('B' + idx, str(paramDict[key][0]))\n spreadSheet.setAlias('B' + idx, str(key))\n elif paramType == 'python':\n pass\n else:\n raise ValueError('Unknown geometric parameter type.')\n\n doc.recompute()\n\n\nclass DummyInfo:\n def __init__(self):\n self.trash = []\n self.litho_setup_done = False\n\n\ndef build(opts):\n '''Build the 3D geometry in FreeCAD.\n\n :param dict opts: Options dict in the QMT Geometry3D.__init__ input format.\n :return geo: Geo3DData object with the built objects.\n '''\n doc = FreeCAD.ActiveDocument\n geo = Geo3DData()\n\n # Schedule for deletion all objects not explicitly selected by the user\n input_parts_names = []\n for part in opts['input_parts']:\n if part.fc_name is None:\n obj_list = doc.getObjectsByLabel(part.label)\n assert len(obj_list) == 1\n fc_name = obj_list[0].Name\n part.fc_name = fc_name\n else:\n fc_name = part.fc_name\n input_parts_names += [fc_name]\n\n blacklist = []\n for obj in doc.Objects:\n if (obj.Name not in input_parts_names) and (obj.TypeId != 'Spreadsheet::Sheet'):\n blacklist.append(obj)\n\n # Update the model parameters\n if 'params' in opts:\n # Extend params dictionary to original parts schema\n fcdict = {key: (value, 'freeCAD') for (key, value) in opts['params'].items()}\n set_params(doc, fcdict)\n\n if 'built_part_names' not in opts:\n opts['built_part_names'] = {}\n if 'serial_stp_parts' not in opts:\n opts['serial_stp_parts'] = {}\n\n # Build the parts\n info_holder = DummyInfo() # temporary workaround to support old litho code\n built_parts = []\n for input_part in opts['input_parts']:\n\n if input_part.directive == 'extrude':\n part = build_extrude(input_part)\n elif input_part.directive == 'SAG':\n part = build_sag(input_part)\n elif input_part.directive == 'wire':\n part = build_wire(input_part)\n elif input_part.directive == 'wire_shell':\n part = build_wire_shell(input_part)\n elif input_part.directive == 'lithography':\n part = build_lithography(input_part, opts, info_holder)\n elif input_part.directive == '3d_shape':\n part = build_pass(input_part)\n else:\n raise ValueError('Directive ' + input_part.directive +\n ' is not a recognized directive type.')\n\n assert part is not None\n doc.recompute()\n built_parts.append(part)\n opts['built_part_names'][input_part.label] = part.Name # needed for litho steps\n\n # Cleanup\n if not DBG_OUT:\n collect_garbage(info_holder)\n for obj in blacklist:\n delete(obj)\n doc.recompute()\n\n # Subtraction (removes the need for subtractlists)\n for i, (input_part, part) in enumerate(zip(opts['input_parts'], built_parts)):\n if input_part.domain_type == 'virtual':\n continue\n for other_input_part, other_part in zip(opts['input_parts'][0:i], built_parts[0:i]):\n if other_input_part.domain_type == 'virtual':\n continue\n if checkOverlap([part, other_part]):\n cut = subtract(part, copy_move(other_part), consumeInputs=True\n if not DBG_OUT else False)\n simple_copy = doc.addObject('Part::Feature', \"simple_copy\")\n simple_copy.Shape = cut.Shape # no solid, just its shape (can be disjoint)\n delete(cut)\n part = simple_copy\n built_parts[i] = simple_copy\n\n # Update names and store the built parts\n built_parts_dict = {} # dict for cross sections\n for input_part, built_part in zip(opts['input_parts'], built_parts):\n built_part.Label = input_part.label # here it's collision free\n input_part.serial_stp = store_serial([built_part], exportCAD, 'stp')\n input_part.built_fc_name = built_part.Name\n geo.add_part(input_part.label, input_part)\n built_parts_dict[input_part.label] = built_part # dict for cross sections\n\n # Build cross sections:\n for xsec_name in opts['xsec_dict']:\n axis = opts['xsec_dict'][xsec_name]['axis']\n distance = opts['xsec_dict'][xsec_name]['distance']\n polygons = buildCrossSection(xsec_name, axis, distance, built_parts_dict)\n geo.add_xsec(xsec_name,polygons,axis=axis,distance=distance)\n\n # Store the FreeCAD document\n geo.set_data('fcdoc', doc)\n\n return geo\n\n\ndef build_pass(part):\n '''Pass a part unchanged.'''\n assert part.directive == '3d_shape'\n existing_part = FreeCAD.ActiveDocument.getObject(part.fc_name)\n assert existing_part is not None\n return existing_part\n\n\ndef build_extrude(part):\n '''Build an extrude part.'''\n assert part.directive == 'extrude'\n z0 = part.z0\n deltaz = part.thickness\n doc = FreeCAD.ActiveDocument\n sketch = doc.getObject(part.fc_name)\n splitSketches = splitSketch(sketch)\n extParts = []\n for sketch in splitSketches:\n extParts.append(extrudeBetween(sketch, z0, z0 + deltaz, name=part.label))\n delete(sketch)\n doc.recompute()\n return genUnion(extParts, consumeInputs=True\n if not DBG_OUT else False)\n\n\ndef build_sag(part, offset=0.):\n '''Build a SAG part.'''\n assert part.directive == 'SAG'\n zBot = part.z0\n zMid = part.z_middle\n zTop = part.thickness + zBot\n tIn = part.t_in\n tOut = part.t_out\n doc = FreeCAD.ActiveDocument\n sketch = doc.getObject(part.fc_name)\n sag = makeSAG(sketch, zBot, zMid, zTop, tIn, tOut, offset=offset)\n sag.Label = part.label\n doc.recompute()\n return sag\n\n\ndef build_wire(part, offset=0.):\n '''Build a wire part.'''\n assert part.directive == 'wire'\n doc = FreeCAD.ActiveDocument\n zBottom = part.z0\n width = part.thickness\n sketch = doc.getObject(part.fc_name)\n wire = buildWire(sketch, zBottom, width, offset=offset)\n wire.Label = part.label\n return wire\n\n\ndef build_wire_shell(part, offset=0.):\n '''Build a wire shell part.'''\n assert part.directive == 'wire_shell'\n doc = FreeCAD.ActiveDocument\n zBottom = part.target_wire.z0\n radius = part.target_wire.thickness\n wireSketch = doc.getObject(part.target_wire.fc_name)\n shell_verts = part.shell_verts\n thickness = part.thickness\n\n if part.depo_mode == 'depo':\n depoZone = doc.getObject(part.fc_name)\n etchZone = None\n elif part.depo_mode == 'etch':\n depoZone = None\n etchZone = doc.getObject(part.fc_name)\n else:\n raise ValueError('Unknown depo_mode ' + part.depo_mode)\n\n shell = buildAlShell(\n wireSketch,\n zBottom,\n radius,\n shell_verts,\n thickness,\n depoZone=depoZone,\n etchZone=etchZone,\n offset=offset)\n shell.Label = part.label\n return shell\n\n\ndef build_lithography(part, opts, info_holder):\n \"\"\"Build a lithography part.\"\"\"\n assert part.directive == 'lithography'\n if not info_holder.litho_setup_done:\n initialize_lithography(info_holder, opts, fillShells=True)\n info_holder.litho_setup_done = True\n\n if DBG_OUT:\n FreeCAD.ActiveDocument.saveAs('tmp_after_init.fcstd')\n layerNum = part.layer_num\n returnObjs = []\n for objID in info_holder.lithoDict['layers'][layerNum]['objIDs']:\n if part.fc_name == info_holder.lithoDict['layers'][layerNum]['objIDs'][objID]['partName']:\n returnObjs.append(gen_G(info_holder, opts, layerNum, objID))\n\n logging.debug([o.Name for o in returnObjs])\n return genUnion(returnObjs, consumeInputs=True\n if not DBG_OUT else False)\n\n\n\n################################################################################\n\n\ndef buildWire(sketch, zBottom, width, faceOverride=None, offset=0.0):\n \"\"\"Given a line segment, build a nanowire of given cross-sectional width\n with a bottom location at zBottom. Offset produces an offset with a specified\n offset.\n \"\"\"\n doc = FreeCAD.ActiveDocument\n if faceOverride is None:\n face = makeHexFace(sketch, zBottom - offset, width + 2 * offset)\n else:\n face = faceOverride\n sketchForSweep = extendSketch(sketch, offset)\n mySweepTemp = doc.addObject('Part::Sweep', sketch.Name + '_wire')\n mySweepTemp.Sections = [face]\n mySweepTemp.Spine = sketchForSweep\n mySweepTemp.Solid = True\n doc.recompute()\n mySweep = copy_move(mySweepTemp)\n deepRemove(mySweepTemp)\n return mySweep\n\n\ndef buildAlShell(sketch, zBottom, width, verts, thickness,\n depoZone=None, etchZone=None, offset=0.0):\n \"\"\"Builds a shell on a nanowire parameterized by sketch, zBottom, and width.\n\n Here, verts describes the vertices that are covered, and thickness describes\n the thickness of the shell. depoZone, if given, is extruded and intersected\n with the shell (for an etch). Note that offset here *is not* a real offset -\n for simplicity we keep this a thin shell that lies cleanly on top of the\n bigger wire offset. There's no need to include the bottom portion since that's\n already taken up by the wire.\n \"\"\"\n lineSegments = findSegments(sketch)[0]\n x0, y0, z0 = lineSegments[0]\n x1, y1, z1 = lineSegments[1]\n dx = x1 - x0\n dy = y1 - y0\n rAxis = np.array([-dy, dx, 0])\n # axis perpendicular to the wire in the xy plane\n rAxis /= np.sqrt(np.sum(rAxis ** 2))\n zAxis = np.array([0, 0, 1.])\n doc = FreeCAD.ActiveDocument\n shellList = []\n for vert in verts:\n # Make the original wire (including an offset if applicable)\n originalWire = buildWire(sketch, zBottom, width, offset=offset)\n # Now make the shifted wire:\n angle = vert * np.pi / 3.\n dirVec = rAxis * np.cos(angle) + zAxis * np.sin(angle)\n shiftVec = (thickness) * dirVec\n transVec = FreeCAD.Vector(tuple(shiftVec))\n face = makeHexFace(sketch, zBottom - offset, width +\n 2 * offset) # make the bigger face\n shiftedFace = Draft.move(face, transVec, copy=False)\n extendedSketch = extendSketch(sketch, offset)\n # The shell offset is handled manually since we are using faceOverride to\n # input a shifted starting face:\n shiftedWire = buildWire(extendedSketch, zBottom,\n width, faceOverride=shiftedFace)\n delete(extendedSketch)\n shellCut = doc.addObject(\n \"Part::Cut\", sketch.Name + \"_cut_\" + str(vert))\n shellCut.Base = shiftedWire\n shellCut.Tool = originalWire\n doc.recompute()\n shell = Draft.move(shellCut, FreeCAD.Vector(0., 0., 0.), copy=True)\n doc.recompute()\n delete(shellCut)\n delete(originalWire)\n delete(shiftedWire)\n shellList.append(shell)\n if len(shellList) > 1:\n coatingUnion = doc.addObject(\n \"Part::MultiFuse\", sketch.Name + \"_coating\")\n coatingUnion.Shapes = shellList\n doc.recompute()\n coatingUnionClone = copy_move(coatingUnion)\n doc.removeObject(coatingUnion.Name)\n for shell in shellList:\n doc.removeObject(shell.Name)\n elif len(shellList) == 1:\n coatingUnionClone = shellList[0]\n else:\n raise NameError(\n 'Trying to build an empty Al shell. If no shell is desired, omit the AlVerts key from '\n 'the json.')\n if (depoZone is None) and (etchZone is None):\n return coatingUnionClone\n\n elif depoZone is not None:\n coatingBB = getBB(coatingUnionClone)\n zMin = coatingBB[4]\n zMax = coatingBB[5]\n depoVol = extrudeBetween(depoZone, zMin, zMax)\n etchedCoatingUnionClone = intersect(\n [depoVol, coatingUnionClone], consumeInputs=True\n if not DBG_OUT else False)\n return etchedCoatingUnionClone\n else: # etchZone instead\n coatingBB = getBB(coatingUnionClone)\n zMin = coatingBB[4]\n zMax = coatingBB[5]\n etchVol = extrudeBetween(etchZone, zMin, zMax)\n etchedCoatingUnionClone = subtract(\n coatingUnionClone, etchVol, consumeInputs=True\n if not DBG_OUT else False)\n return etchedCoatingUnionClone\n\n\ndef makeSAG(sketch, zBot, zMid, zTop, tIn, tOut, offset=0.):\n doc = FreeCAD.ActiveDocument\n # First, compute the geometric quantities we will need:\n a = zTop - zMid # height of the top part\n b = tOut + tIn # width of one of the trianglular pieces of the top\n alpha = np.abs(np.arctan(a / np.float(b))) # lower angle of the top part\n c = a + 2 * offset # height of the top part including the offset\n # horizontal width of the trianglular part of the top after offset\n d = c / np.tan(alpha)\n # horizontal shift in the triangular part of the top after an offset\n f = offset / np.sin(alpha)\n\n sketchList = splitSketch(sketch)\n returnParts = []\n for tempSketch in sketchList:\n # TODO: right now, if we try to taper the top of the SAG wire to a point, this\n # breaks, since the offset of topSketch is empty. We should detect and handle this.\n # For now, just make sure that the wire has a small flat top.\n botSketch = draftOffset(tempSketch, offset) # the base of the wire\n midSketch = draftOffset(tempSketch, f + d - tIn) # the base of the cap\n topSketch = draftOffset(tempSketch, -tIn + f) # the top of the cap\n delete(tempSketch) # remove the copied sketch part\n # Make the bottom wire:\n rectPartTemp = extrude(botSketch, zMid - zBot)\n rectPart = copy_move(rectPartTemp, moveVec=(0., 0., zBot - offset))\n delete(rectPartTemp)\n # make the cap of the wire:\n topSketchTemp = copy_move(topSketch, moveVec=(\n 0., 0., zTop - zMid + 2 * offset))\n capPartTemp = doc.addObject('Part::Loft', sketch.Name + '_cap')\n capPartTemp.Sections = [midSketch, topSketchTemp]\n capPartTemp.Solid = True\n doc.recompute()\n capPart = copy_move(capPartTemp, moveVec=(0., 0., zMid - offset))\n delete(capPartTemp)\n delete(topSketchTemp)\n delete(topSketch)\n delete(midSketch)\n delete(botSketch)\n returnParts += [capPart, rectPart]\n returnPart = genUnion(returnParts, consumeInputs=True\n if not DBG_OUT else False)\n return returnPart\n\n\ndef initialize_lithography(info, opts, fillShells=True):\n doc = FreeCAD.ActiveDocument\n info.fillShells = fillShells\n # The lithography step requires some infrastructure to track things\n # throughout.\n info.lithoDict = {} # dictionary containing objects for the lithography step\n info.lithoDict['layers'] = {}\n # Dictionary for containing the substrate. () indicates un-offset objects,\n # and subsequent tuples are offset by t_i for each index in the tuple.\n info.lithoDict['substrate'] = {(): []}\n\n # To start, we need to collect up all the lithography directives, and\n # organize them by layerNum and objectIDs within layers.\n baseSubstrateParts = []\n for part in opts['input_parts']:\n # If this part is a litho step\n if part.directive == 'lithography':\n layerNum = part.layer_num # layerNum of this part\n # Add the layerNum to the layer dictionary:\n if layerNum not in info.lithoDict['layers']:\n info.lithoDict['layers'][layerNum] = {'objIDs': {}}\n layerDict = info.lithoDict['layers'][layerNum]\n # Generate the base and thickness of the layer:\n layerBase = float(part.z0)\n layerThickness = float(part.thickness)\n # All parts within a given layer number are required to have\n # identical thickness and base, so check that:\n if 'base' in layerDict:\n assert layerBase == layerDict['base']\n else:\n layerDict['base'] = layerBase\n if 'thickness' in layerDict:\n assert layerThickness == layerDict['thickness']\n else:\n layerDict['thickness'] = layerThickness\n # A given part references a base sketch. However, we need to split\n # the sketch here into possibly disjoint sub-sketches to work\n # with them:\n sketch = doc.getObject(part.fc_name)\n splitSketches = splitSketch(sketch)\n for mySplitSketch in splitSketches:\n objID = len(layerDict['objIDs'])\n objDict = {}\n objDict['partName'] = part.fc_name\n objDict['sketch'] = mySplitSketch\n info.trash.append(mySplitSketch)\n info.lithoDict['layers'][layerNum]['objIDs'][objID] = objDict\n # Add the base substrate to the appropriate dictionary\n baseSubstrateParts += part.litho_base\n\n # Get rid of any duplicates:\n baseSubstrateParts = list(set(baseSubstrateParts))\n\n # Now convert the part names for the substrate into 3D freeCAD objects, which\n # should have already been rendered.\n for baseSubstrate in baseSubstrateParts:\n try:\n built_part_name = opts['built_part_names'][baseSubstrate.label]\n except:\n raise KeyError(\"No substrate built for '\" + str(baseSubstrate.label) + \"'\")\n info.lithoDict['substrate'][()] += [doc.getObject(built_part_name)]\n # ~ import sys\n # ~ sys.stderr.write(\">>> litdic \" + str(info.lithoDict) + \"\\n\")\n\n # Now that we have ordered the primitives, we need to compute a few\n # aux quantities that we will need. First, we compute the total bounding\n # box of the lithography procedure:\n thicknesses = []\n bases = []\n for layerNum in info.lithoDict['layers'].keys():\n thicknesses.append(info.lithoDict['layers'][layerNum]['thickness'])\n bases.append(info.lithoDict['layers'][layerNum]['base'])\n bottom = min(bases)\n totalThickness = sum(thicknesses)\n assert len(info.lithoDict['substrate'][\n ()]) > 0 # Otherwise, we don't have a reference for the lateral BB\n substrateUnion = genUnion(info.lithoDict['substrate'][()],\n consumeInputs=False) # total substrate\n BB = list(getBB(substrateUnion)) # bounding box\n BB[4] = min([bottom, BB[4]])\n BB[5] = max([BB[5] + totalThickness, bottom + totalThickness])\n BB = tuple(BB)\n constructionZone = makeBB(BB) # box that encompases the whole domain.\n info.lithoDict['boundingBox'] = [BB, constructionZone]\n delete(substrateUnion) # not needed for next steps\n delete(constructionZone) # not needed for next steps ... WHY?\n\n # Next, we add two prisms for each sketch. The first, which we denote \"B\",\n # is bounded by the base from the bottom and the layer thickness on the top.\n # These serve as \"stencils\" that would be the deposited shape if no other.\n # objects got in the way. The second set of prisms, denoted \"C\", covers the\n # base of the layer to the top of the entire domain box. This is used for\n # forming the volumes occupied when substrate objects are offset and\n # checking for overlaps.\n for layerNum in info.lithoDict['layers'].keys():\n base = info.lithoDict['layers'][layerNum]['base']\n thickness = info.lithoDict['layers'][layerNum]['thickness']\n for objID in info.lithoDict['layers'][layerNum]['objIDs']:\n sketch = info.lithoDict['layers'][layerNum]['objIDs'][objID]['sketch']\n B = extrudeBetween(sketch, base, base + thickness)\n C = extrudeBetween(sketch, base, BB[5])\n info.lithoDict['layers'][layerNum]['objIDs'][objID]['B'] = B\n info.lithoDict['layers'][layerNum]['objIDs'][objID]['C'] = C\n info.trash.append(B)\n info.trash.append(C)\n # In addition, add a hook for the HDict, which will contain the \"H\"\n # constructions for this object, but offset to thicknesses of various\n # layers, according to the keys.\n info.lithoDict['layers'][layerNum]['objIDs'][objID]['HDict'] = {}\n\n\ndef gen_offset(opts, obj, offsetVal):\n \"\"\"Generates an offset non-destructively.\"\"\"\n doc = FreeCAD.ActiveDocument\n # First, we need to check if the object needs special treatment:\n treatment = 'standard'\n try:\n partname = next(label for (label, built_name) in\n opts['built_part_names'].iteritems() if built_name == obj.Name)\n input_part = next(input_part for input_part in\n opts['input_parts'] if input_part.label == partname)\n treatment = input_part.directive\n except:\n pass\n\n if treatment == 'extrude' or treatment == 'lithography':\n treatment = 'standard'\n\n if treatment == 'standard':\n # Apparently the offset function is buggy for very small offsets...\n if offsetVal < 1e-5:\n offsetDupe = copy_move(obj)\n else:\n offset = doc.addObject(\"Part::Offset\")\n offset.Source = obj\n offset.Value = offsetVal\n offset.Mode = 0\n offset.Join = 2\n doc.recompute()\n offsetDupe = copy_move(offset)\n doc.recompute()\n delete(offset)\n elif treatment == 'wire':\n offsetDupe = build_wire(input_part, offset=offsetVal)\n elif treatment == 'wire_shell':\n offsetDupe = build_wire_shell(input_part, offset=offsetVal)\n elif treatment == 'SAG':\n offsetDupe = build_sag(input_part, offset=offsetVal)\n doc.recompute()\n\n try:\n logging.debug(\"%s (%s) -> %s (%s) [from %s]\", obj.Name, obj.Label,\n offsetDupe.Name, offsetDupe.Label, input_part.label)\n except:\n logging.debug(\"%s (%s) -> %s (%s)\", obj.Name, obj.Label,\n offsetDupe.Name, offsetDupe.Label)\n\n return offsetDupe\n\n\ndef screened_H_union_list(info, opts, obj, m, j, offsetTuple, checkOffsetTuple):\n \"\"\"Form the screened union list of obj with the layer m, objID j H object that has\n been offset according to offsetTuple. The screened union list is defined by checking\n first whether the object intersects with the components of the checkOffset version\n of the H object. Then, for each component that would intersect, we return the a list\n of the offsetTuple version of the object.\n \"\"\"\n logging.debug('>>> %s (%s)', obj.Name, obj.Label)\n # First, we need to check to see if we need to compute either of the\n # underlying H obj lists:\n HDict = info.lithoDict['layers'][m]['objIDs'][j]['HDict']\n # HDict stores a collection of H object component lists for each (layerNum,objID)\n # pair. The index of this dictionary is a tuple: () indicates no\n # offset, while other indices indicate an offset by summing the thicknesses\n # from corresponding layers.\n if checkOffsetTuple not in HDict: # If we haven't computed this yet\n HDict[checkOffsetTuple] = H_offset(info, opts, m, j,\n tList=list(checkOffsetTuple)) # list of H parts\n info.trash += HDict[checkOffsetTuple]\n if offsetTuple not in HDict: # If we haven't computed this yet\n HDict[offsetTuple] = H_offset(info, opts, m, j, tList=list(offsetTuple)) # list of H parts\n info.trash += HDict[offsetTuple]\n HObjCheckList = HDict[checkOffsetTuple]\n HObjList = HDict[offsetTuple]\n\n returnList = []\n for i, HObjPart in enumerate(HObjCheckList):\n if checkOverlap(\n [obj, HObjPart]): # if we need to include an overlap\n returnList.append(HObjList[i])\n\n # fix for multilayer intersections: make sure we really check all overlaps\n for i, HObjPart in enumerate(HObjList):\n if checkOverlap(\n [obj, HObjPart]): # if we need to include an overlap\n returnList.append(HObjList[i])\n\n logging.debug('<<< %s', [o.Name + ' (' + o.Label + ')' for o in returnList])\n return returnList\n\n\ndef screened_A_UnionList(info, opts, obj, t, ti, offsetTuple, checkOffsetTuple):\n \"\"\"Form the screened union list of obj with the substrate A that has\n been offset according to offsetTuple.\n \"\"\"\n logging.debug('>>> %s (%s)', obj.Name, obj.Label)\n # First, we need to see if we have built the objects before:\n if checkOffsetTuple not in info.lithoDict['substrate']:\n info.lithoDict['substrate'][checkOffsetTuple] = []\n for A in info.lithoDict['substrate'][()]:\n AObj = gen_offset(opts, A, t)\n info.trash.append(AObj)\n info.lithoDict['substrate'][checkOffsetTuple].append(AObj)\n if offsetTuple not in info.lithoDict['substrate']:\n info.lithoDict['substrate'][offsetTuple] = []\n for A in info.lithoDict['substrate'][()]:\n AObj = gen_offset(opts, A, t + ti)\n info.trash.append(AObj)\n info.lithoDict['substrate'][offsetTuple].append(AObj)\n\n returnList = []\n for i, ACheck in enumerate(\n info.lithoDict['substrate'][checkOffsetTuple]):\n if checkOverlap([obj, ACheck]):\n returnList.append(info.lithoDict['substrate'][offsetTuple][i])\n\n logging.debug('<<< %s', [o.Name + ' (' + o.Label + ')' for o in returnList])\n return returnList\n\n\ndef H_offset(info, opts, layerNum, objID, tList=[]):\n \"\"\"For a given layerNum=n and ObjID=i, compute the deposited object.\n\n ```latex\n H_{n,i}(t) = C_{n,i}(t) \\cap [ B_{n,i}(t) \\cup (\\cup_{m<n;j} H_{m,j}(t_i+t)) \\cup (\\cup_k A_k(t_i + t))],\n ```\n where A_k is from the base substrate list. This is computed recursively. The list of integers\n tList determines the offset t; t = the sum of all layer thicknesses ti that appear\n in tList. For example, tList = [1,2,3] -> t = t1+t2+t3.\n\n Note: this object is returned as a list of objects that need to be unioned together\n in order to form the full H.\n \"\"\"\n\n logging.debug('>>> partname %s',\n info.lithoDict['layers'][layerNum]['objIDs'][objID]['partName'])\n\n # This is a tuple that encodes the check offset t:\n checkOffsetTuple = tuple(sorted(tList))\n # This is a tuple that encodes the total offset t_i+t:\n offsetTuple = tuple(sorted(tList + [layerNum]))\n # First, check if we have to do anything:\n layers = info.lithoDict['layers']\n if checkOffsetTuple in layers[layerNum]['objIDs'][\n objID]['HDict']:\n return layers[layerNum]['objIDs'][objID][\n 'HDict'][checkOffsetTuple]\n # First, compute t:\n t = 0.0\n for tIndex in tList:\n t += layers[tIndex]['thickness']\n # thickness of this layer\n ti = layers[layerNum]['thickness']\n # Set the aux. thickness t:\n B = layers[layerNum]['objIDs'][objID]['B'] # B prism for this layer & objID\n C = layers[layerNum]['objIDs'][objID]['C'] # C prism for this layer & ObjID\n B_t = gen_offset(opts, B, t) # offset the B prism\n C_t = gen_offset(opts, C, t) # offset the C prism\n info.trash.append(B_t)\n info.trash.append(C_t)\n\n # Build up the substrate due to previously deposited gates\n HOffsetList = []\n for m in layers.keys():\n if m < layerNum: # then this is a lower layer\n for j in layers[m]['objIDs'].keys():\n HOffsetList += screened_H_union_list(\n info, opts, C_t, m, j, offsetTuple, checkOffsetTuple)\n # Next, build up the original substrate list:\n AOffsetList = []\n AOffsetList += screened_A_UnionList(info, opts, C_t, t, ti,\n offsetTuple, checkOffsetTuple)\n unionList = HOffsetList + AOffsetList\n returnList = [B_t]\n\n for obj in unionList:\n intObj = intersect([C_t, obj])\n info.trash.append(intObj)\n returnList.append(intObj)\n logging.debug('%s (%s) -> %s (%s)', obj.Name, obj.Label, intObj.Name, intObj.Label)\n\n layers[layerNum]['objIDs'][objID]['HDict'][checkOffsetTuple] = returnList\n\n logging.debug('<<< %s', [o.Name + ' (' + o.Label + ')' for o in returnList])\n return returnList\n\n\ndef gen_U(info, layerNum, objID):\n \"\"\"For a given layerNum and objID, compute the quantity:\n ```latex\n U_{n,i}(t) = (\\cup_{m<n;j} G_{m,j}) \\cup (\\cup_{k} A_k),\n ```\n where the inner union terms are not included if their intersection\n with B_i is empty.\n \"\"\"\n layers = info.lithoDict['layers']\n B = layers[layerNum]['objIDs'][objID][\n 'B'] # B prism for this layer & objID\n GList = []\n for m in layers.keys():\n if m < layerNum: # then this is a lower layer\n for j in layers[m].keys():\n if 'G' not in layers[layerNum][\n 'objIDs'][objID]:\n gen_G(info, m, j)\n G = layers[layerNum]['objIDs'][objID]['G']\n if checkOverlap([B, G]):\n GList.append(G)\n AList = []\n for A in info.lithoDict['substrate'][()]:\n if checkOverlap([B, A]):\n AList.append(A)\n unionList = GList + AList\n unionObj = genUnion(unionList, consumeInputs=False)\n return unionObj\n\n\ndef gen_G(info, opts, layerNum, objID):\n \"\"\"Generate the gate deposition for a given layerNum and objID.\"\"\"\n\n layerobj = info.lithoDict['layers'][layerNum]['objIDs'][objID]\n logging.debug('>>> layer %d obj %d (part:%s B:%s C:%s sketch:%s)', layerNum, objID,\n layerobj['partName'], layerobj['B'].Name, layerobj['C'].Name,\n layerobj['sketch'].Name)\n\n if 'G' not in layerobj:\n if () not in layerobj['HDict']:\n layerobj['HDict'][()] = H_offset(info, opts, layerNum, objID)\n\n if DBG_OUT:\n FreeCAD.ActiveDocument.saveAs('tmp_after_H_offset.fcstd')\n # TODO: reuse new function\n # This block fixes multifuses for wireshells with too big offsets,\n # by forcing all participating object shells into a new solid.\n # It still needs to be coerced into handling disjoint \"solids\".\n # ~ solid_hlist = []\n # ~ import Part\n # ~ for obj in layerobj['HDict'][()]:\n # ~ obj.Shape.Solids\n # ~ try:\n\n # ~ __s__ = obj.Shape.Faces\n # ~ __s__ = Part.Solid(Part.Shell(__s__))\n # ~ __o__ = FreeCAD.ActiveDocument.addObject(\"Part::Feature\", obj.Name + \"_solid\")\n # ~ __o__.Label = obj.Label + \"_solid\"\n # ~ __o__.Shape = __s__\n\n # ~ except Part.OCCError:\n #Draft.downgrade(obj,delete=True) # doesn't work without GUI\n # ~ for solid in obj.Shape.Solids:\n # ~ for shell in solid.Shells:\n # ~ pass\n\n # ~ solid_hlist.append(__o__)\n # ~ info.trash.append(obj)\n # ~ info.trash.append(__o__)\n # ~ info.trash.append(__s__)\n\n # ~ layerobj['HDict'][()] = solid_hlist\n # ~ logging.debug('new HDict: %s', [o.Name + ' (' + o.Label + ')' for o in layerobj['HDict'][()]])\n\n H = genUnion(layerobj['HDict'][()],\n consumeInputs=False)\n info.trash.append(H)\n if info.fillShells:\n G = copy_move(H)\n else:\n U = gen_U(info, layerNum, objID)\n G = subtract(H, U)\n delete(U)\n layerobj['G'] = G\n\n G = layerobj['G']\n partName = layerobj['partName']\n G.Label = partName\n logging.debug('<<< G from H: %s (%s)', G.Name, G.Label)\n return G\n\n\ndef collect_garbage(info):\n \"\"\"Delete all the objects in trash.\"\"\"\n for obj in info.trash:\n try:\n delete(obj)\n except BaseException:\n pass\n\n\ndef buildCrossSection(sliceName, axis, distance, built_parts_dict):\n \"\"\"Render the 2D objects required for cross-sections.\"\"\"\n polygons = {}\n for part_name in built_parts_dict:\n built_part = built_parts_dict[part_name]\n # loop over FreeCAD shapes corresponding to part\n # slice the 3D part\n fcName = part_name + '_section_' + sliceName\n section = crossSection(built_part, axis=axis, d=distance, name=fcName)\n # separate disjoint pieces\n segments, cycles = findEdgeCycles(section)\n for i, cycle in enumerate(cycles):\n points = [tuple(segments[idx, 0]) for idx in cycle]\n patchName = fcName\n patchName = '{}_{}'.format(part_name, i)\n # this mapping is necessary since numpy floats have a pickle error:\n polygons[patchName] = [map(float,point) for point in points]\n return polygons\n"
]
| [
[
"numpy.array",
"numpy.sin",
"numpy.float",
"numpy.sum",
"numpy.tan",
"numpy.cos"
]
]
|
dotCirill/captcha_sh | [
"0b38464b114c35d4588f9672f7fc04686632bba5"
]
| [
"captcha_analysis/img2text/method_CNN.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nmodel = keras.models.load_model('./NN/normal_model.h5')\n# same with model(!)\n# characters = ['f', 'q', '0', '9', 'e', 'm', 'd', 'a', '4', 'z', '8', 'n', 's', '5', 'g', 'k', 'x', 'u', '7', 'l', 'b', 'i', 'c', 'o', 'v', '.', 'j', 'h', '@', 'p', 'r', '2', 'y', 'w', '1', '6', 't', '3']\ncharacters = [' ', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '@', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\nchar_to_num = layers.experimental.preprocessing.StringLookup(\n vocabulary=list(characters), num_oov_indices=0, mask_token=None\n)\n\nnum_to_char = layers.experimental.preprocessing.StringLookup(\n vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True\n)\n\nmax_length = 3\n\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_length\n ]\n # Iterate over the results and get back the text\n output_text = []\n for res in results:\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\ndef translate(img_path):\n img = tf.io.read_file(img_path)\n img = tf.io.decode_png(img, channels=1)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = tf.image.resize(img, [100, 100])\n img = tf.transpose(img, perm=[1, 0, 2])\n result = model.predict(np.array([img]))\n return decode_batch_predictions(result)[0].replace('[UNK]', '').replace(' ', '')\n\n"
]
| [
[
"numpy.array",
"tensorflow.image.convert_image_dtype",
"tensorflow.io.read_file",
"numpy.ones",
"tensorflow.transpose",
"tensorflow.keras.models.load_model",
"tensorflow.image.resize",
"tensorflow.io.decode_png",
"tensorflow.keras.backend.ctc_decode"
]
]
|
probcomp/cgpm2 | [
"280ab5bf3dd0d7c61196deaff7cb590692fc412a"
]
| [
"tests/test_cc_ensemble_dependencies.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2018 MIT Probabilistic Computing Project.\n# Released under Apache 2.0; refer to LICENSE.txt.\n\nimport itertools\n\nimport numpy as np\nimport pytest\n\nfrom cgpm.utils.general import get_prng\n\nfrom cgpm2.crosscat_ensemble import CrossCatEnsemble\nfrom cgpm2.transition_crosscat import GibbsCrossCat\nfrom cgpm2.transition_crosscat import validate_crosscat_dependencies\n\n\[email protected](strict=True,\n reason='Outputs must be zero based for dependence constraints.')\ndef test_dependencies_zero_based():\n prng = get_prng(2)\n CrossCatEnsemble(outputs=(1,2), inputs=(), Ci=[(1,2)],\n distributions=[('normal', None)]*2, chains=5, rng=prng)\n\[email protected](strict=True,\n reason='CPP backend for view inference with dependence constraints.')\ndef test_dependencies_no_cpp():\n prng = get_prng(2)\n ensemble = CrossCatEnsemble(outputs=(0,1), inputs=[], Ci=[(0,1)],\n distributions=[('normal', None)]*2, chains=5, rng=prng)\n ensemble.observe(0, {0:0, 1:1})\n synthesizer = GibbsCrossCat(ensemble.cgpms[0], Ci=ensemble.Ci)\n synthesizer.transition_view_assignments()\n\ndef incorporate_data(ensemble, T):\n rowids = range(np.shape(T)[0])\n observations = [dict(zip(ensemble.outputs, row)) for row in T]\n ensemble.observe_bulk(rowids, observations)\n return ensemble\n\nCi_list = [\n list(itertools.combinations(range(10), 2)), # All independent.\n [(2,8), (0,3)] # Custom independences.\n]\[email protected]('Ci', Ci_list)\ndef test_custom_independence(Ci):\n prng = get_prng(1)\n D = prng.normal(size=(10,1))\n T = np.repeat(D, 10, axis=1)\n ensemble = CrossCatEnsemble(outputs=range(10), inputs=[],\n distributions=[('normal', None)]*10, chains=5, Ci=Ci, rng=prng)\n incorporate_data(ensemble, T)\n for crosscat in ensemble.cgpms:\n validate_crosscat_dependencies(crosscat, (), Ci)\n ensemble.transition(ensemble.make_default_inference_program(N=10))\n for crosscat in ensemble.cgpms:\n validate_crosscat_dependencies(crosscat, (), Ci)\n\nCIs = [[], [(2,8), (0,3)]]\[email protected]('Ci', CIs)\ndef test_simple_dependence_constraint(Ci):\n prng = get_prng(1)\n D = prng.normal(size=(10,1))\n T = np.repeat(D, 10, axis=1)\n Cd = [(2,0), (8,3)]\n ensemble = CrossCatEnsemble(outputs=range(10), inputs=[],\n distributions=[('normal', None)]*10, chains=5, Ci=Ci, Cd=Cd, rng=prng)\n incorporate_data(ensemble, T)\n for crosscat in ensemble.cgpms:\n validate_crosscat_dependencies(crosscat, (), Ci)\n ensemble.transition(ensemble.make_default_inference_program(N=10))\n for crosscat in ensemble.cgpms:\n validate_crosscat_dependencies(crosscat, Cd, Ci)\n\ndef get_independence_inference_data(prng):\n column_view_1 = prng.normal(loc=0, size=(50,1))\n column_view_2 = np.concatenate((\n prng.normal(loc=10, size=(25,1)),\n prng.normal(loc=20, size=(25,1)),\n ))\n data_view_1 = np.repeat(column_view_1, 4, axis=1)\n data_view_2 = np.repeat(column_view_2, 4, axis=1)\n return np.column_stack((data_view_1, data_view_2))\n\ndef test_independence_inference_break():\n # Get lovecat to disassemble a view into two views.\n prng = get_prng(584)\n data = get_independence_inference_data(prng)\n # HACK: Use Cd to initialize CrossCat state to one view.\n Cd = ((0, 1, 2, 3, 4, 5, 6, 7),)\n ensemble = CrossCatEnsemble(outputs=range(8), inputs=[],\n distributions=[('normal', None)]*8, chains=1, Cd=Cd, rng=prng)\n ensemble.Cd = ()\n incorporate_data(ensemble, data)\n ensemble.transition(ensemble.make_default_inference_program(N=100))\n crosscat = ensemble.cgpms[0]\n Zv = {c: i for i, cgpm in enumerate(crosscat.cgpms) for c in cgpm.outputs}\n for output in [0, 1, 2, 3]:\n assert Zv[output] == Zv[0]\n for output in [4, 5, 6, 7]:\n assert Zv[output] == Zv[4]\n assert len(crosscat.cgpms) == 2\n\ndef test_independence_inference_merge():\n # Get lovecat to merge dependent columns into one view.\n prng = get_prng(582)\n data = get_independence_inference_data(prng)\n # Hack: Use Cd/Ci to initialize CrossCat as\n # {0:0, 1:0, 2:1, 3:1, 4:2, 5:2, 6:3, 7:3}\n Cd = ((0,1), (2,3), (4,5), (6,7))\n Ci = ((0,2), (0,4), (0, 6), (2,4), (2,6), (4,6))\n ensemble = CrossCatEnsemble(outputs=range(8), inputs=[],\n distributions=[('normal', None)]*8, chains=1, Cd=Cd, Ci=Ci, rng=prng)\n ensemble.Ci = ()\n incorporate_data(ensemble, data)\n ensemble.transition(ensemble.make_default_inference_program(N=100))\n crosscat = ensemble.cgpms[0]\n Zv = {c: i for i, cgpm in enumerate(crosscat.cgpms) for c in cgpm.outputs}\n for output in [0, 1, 2, 3,]:\n assert Zv[output] == Zv[0]\n for output in [4, 5, 6, 7]:\n assert Zv[output] == Zv[4]\n assert len(crosscat.cgpms) == 2\n"
]
| [
[
"numpy.repeat",
"numpy.column_stack",
"numpy.shape"
]
]
|
Frikster/Mesoscale-Brain-Explorer | [
"269d8f18162e2b9dca4619561e73a6beb8ba810c"
]
| [
"src/plugins/trim.py"
]
| [
"#!/usr/bin/env python3\n\nimport functools\nimport os\n\nimport numpy as np\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom .util import file_io\nfrom .util import project_functions as pfs\nfrom .util.plugin import PluginDefault\nfrom .util.plugin import WidgetDefault\n\n\nclass Widget(QWidget, WidgetDefault):\n class Labels(WidgetDefault.Labels):\n start_cut_off_label = 'Trim from start'\n end_cut_off_label = 'Trim from end'\n\n class Defaults(WidgetDefault.Defaults):\n start_cut_off_default = 0\n end_cut_off_default = 0\n manip = \"trim\"\n\n def __init__(self, project, plugin_position, parent=None):\n super(Widget, self).__init__(parent)\n if not project or not isinstance(plugin_position, int):\n return\n self.left_cut_off = QSpinBox()\n self.right_cut_off = QSpinBox()\n self.main_button = QPushButton('Trim frames')\n WidgetDefault.__init__(self, project, plugin_position)\n\n def setup_ui(self):\n super().setup_ui()\n max_cut_off = 50000\n self.vbox.addWidget(QLabel(self.Labels.start_cut_off_label))\n self.left_cut_off.setMinimum(0)\n self.left_cut_off.setMaximum(max_cut_off)\n self.left_cut_off.setValue(self.Defaults.start_cut_off_default)\n self.vbox.addWidget(self.left_cut_off)\n self.vbox.addWidget(QLabel(self.Labels.end_cut_off_label))\n self.right_cut_off.setMinimum(0)\n self.right_cut_off.setMaximum(max_cut_off)\n self.right_cut_off.setValue(self.Defaults.end_cut_off_default)\n self.vbox.addWidget(self.right_cut_off)\n self.vbox.addWidget(self.main_button)\n\n def setup_signals(self):\n super().setup_signals()\n self.main_button.clicked.connect(self.execute_primary_function)\n\n def setup_params(self, reset=False):\n super().setup_params(reset)\n if len(self.params) == 1 or reset:\n self.update_plugin_params(self.Labels.start_cut_off_label, self.Defaults.start_cut_off_default)\n self.update_plugin_params(self.Labels.end_cut_off_label, self.Defaults.end_cut_off_default)\n self.left_cut_off.setValue(self.params[self.Labels.start_cut_off_label])\n self.right_cut_off.setValue(self.params[self.Labels.end_cut_off_label])\n\n def setup_param_signals(self):\n super().setup_param_signals()\n self.left_cut_off.valueChanged[int].connect(functools.partial(self.update_plugin_params,\n self.Labels.start_cut_off_label))\n self.right_cut_off.valueChanged[int].connect(functools.partial(self.update_plugin_params,\n self.Labels.end_cut_off_label))\n\n def execute_primary_function(self, input_paths=None):\n if not input_paths:\n if not self.selected_videos:\n return\n else:\n selected_videos = self.selected_videos\n else:\n selected_videos = input_paths\n\n progress_global = QProgressDialog('Trimming Image Stack...', 'Abort', 0, 100, self)\n progress_global.setAutoClose(True)\n progress_global.setMinimumDuration(0)\n\n def global_callback(x):\n progress_global.setValue(x * 100)\n QApplication.processEvents()\n\n output_paths = []\n total = len(selected_videos)\n for global_i, video_path in enumerate(selected_videos):\n global_callback(global_i / total)\n frames_mmap = np.load(video_path, mmap_mode='c')\n cut_off_start = self.left_cut_off.value()\n cut_off_end = self.right_cut_off.value()\n\n progress = QProgressDialog('Trimming Image Stack ' + video_path, 'Abort', 0, 100, self)\n progress.setAutoClose(True)\n progress.setMinimumDuration(0)\n\n def callback(x):\n progress.setValue(x * 100)\n QApplication.processEvents()\n\n num_frames = len(frames_mmap)-cut_off_end-cut_off_start\n name_before, ext = os.path.splitext(os.path.basename(video_path))\n name_after = file_io.get_name_after_no_overwrite(name_before, self.Defaults.manip, self.project)\n path = str(os.path.join(self.project.path, name_after) + '.npy')\n file_io.save_file(path, np.empty((num_frames, len(frames_mmap[0]), len(frames_mmap[1])),\n np.load(video_path, mmap_mode='r').dtype))\n frames = np.load(path, mmap_mode='r+')\n for i, frame in enumerate(frames_mmap[cut_off_start:len(frames_mmap)-cut_off_end]):\n callback(i / float(len(frames_mmap)))\n frames[i] = frame[:, :]\n callback(1)\n # frames = np.array(frames_mmap[cut_off_start:len(frames_mmap)-cut_off_end])\n path = pfs.save_project(video_path, self.project, None, self.Defaults.manip, 'video')\n output_paths = output_paths + [path]\n pfs.refresh_list(self.project, self.video_list,\n self.params[self.Labels.video_list_indices_label],\n self.Defaults.list_display_type,\n self.params[self.Labels.last_manips_to_display_label])\n if self.Defaults.manip not in [self.toolbutton.itemText(i) for\n i in range(self.toolbutton.model().rowCount())]:\n self.toolbutton.addItem(self.Defaults.manip)\n item = self.toolbutton.model().item(self.toolbutton.model().rowCount()-1, 0)\n item.setCheckState(Qt.Checked)\n global_callback(1)\n return output_paths\n\n def setup_whats_this(self):\n super().setup_whats_this()\n self.left_cut_off.setWhatsThis(\"Number of frames to remove from the start of each image stack selected. Useful \"\n \"if artifacts (e.g. movement) occur near the start of most image stacks\")\n self.right_cut_off.setWhatsThis(\"Number of frames to remove from the end of each image stack selected. Useful \"\n \"if artifacts (e.g. movement) occur near the end of most image stacks\")\n\n\nclass MyPlugin(PluginDefault):\n def __init__(self, project, plugin_position):\n self.name = 'Trimming'\n self.widget = Widget(project, plugin_position)\n super().__init__(self.widget, self.widget.Labels, self.name)\n\n def check_ready_for_automation(self, expected_input_number):\n lc = self.widget.left_cut_off.value()\n rc = self.widget.right_cut_off.value()\n return lc > 0 or rc > 0\n\n def automation_error_message(self):\n return \"Trim plugin cannot have both trim paramaters set to 0.\"\n"
]
| [
[
"numpy.load"
]
]
|
clawpack/geoclaw_1d | [
"2272459a81f253720feaa3561094764433e7115a"
]
| [
"examples/okada_fault/setrun.py"
]
| [
"\"\"\"\nModule to set up run time parameters for geoclaw 1d_nonuniform code\n\nThe values set in the function setrun are then written out to data files\nthat will be read in by the Fortran code.\n\n\"\"\"\n\nimport os, sys\nimport numpy as np\nfrom mapc2p import make_mapc2p\n\n\n# Read in nonuniform computational grid, which should have\n# been created using makegrid.py:\n\nrundir = os.getcwd()\nmapc2p, ngrid = make_mapc2p(rundir)\ngrid_data_file = os.path.join(rundir, 'grid.data')\nprint('Found %i grid edges in %s' % (ngrid, grid_data_file))\nmx = ngrid - 1\n \n#dxc = 1./mx\n#xc = np.linspace(dxc/2., 1-dxc/2., mx) # computational cell centers\nxc = np.linspace(0,1,ngrid) # computational cell edges\nxp = mapc2p(xc) # corresponding physical cell edges\nprint('Setting mx = %i, cell edges from %g to %g' % (mx,xp[0],xp[-1]))\n\n\n#------------------------------\ndef setrun(claw_pkg='geoclaw'):\n#------------------------------\n\n \"\"\"\n Define the parameters used for running Clawpack.\n\n INPUT:\n claw_pkg expected to be \"geoclaw\" for this setrun.\n\n OUTPUT:\n rundata - object of class ClawRunData\n\n \"\"\"\n\n from clawpack.clawutil import data\n\n\n assert claw_pkg.lower() == 'geoclaw', \"Expected claw_pkg = 'geoclaw'\"\n\n num_dim = 1\n rundata = data.ClawRunData(claw_pkg, num_dim)\n\n #------------------------------------------------------------------\n # Problem-specific parameters to be written to setprob.data:\n #------------------------------------------------------------------\n # Sample setup to write one line to setprob.data ...\n probdata = rundata.new_UserData(name='probdata',fname='setprob.data')\n\n\n #------------------------------------------------------------------\n # Standard Clawpack parameters to be written to claw.data:\n #------------------------------------------------------------------\n\n clawdata = rundata.clawdata # initialized when rundata instantiated\n\n\n # ---------------\n # Spatial domain:\n # ---------------\n\n # Number of space dimensions:\n clawdata.num_dim = num_dim\n\n # Lower and upper edge of computational domain:\n # For nonuniform grid, 0 <= xc <= 1 and the file grid.data should\n # define the mapping to the physical domain\n\n clawdata.lower[0] = 0. # xlower\n clawdata.upper[0] = 1. # xupper\n\n # Number of grid cells:\n clawdata.num_cells[0] = 1000 # mx\n\n\n # ---------------\n # Size of system:\n # ---------------\n\n # Number of equations in the system:\n clawdata.num_eqn = 2\n\n # Number of auxiliary variables in the aux array (initialized in setaux)\n clawdata.num_aux = 2\n\n # Index of aux array corresponding to capacity function, if there is one:\n clawdata.capa_index = 2\n\n\n # -------------\n # Initial time:\n # -------------\n\n clawdata.t0 = 0.\n\n\n # Restart from checkpoint file of a previous run?\n # Note: If restarting, you must also change the Makefile to set:\n # RESTART = True\n # If restarting, t0 above should be from original run, and the\n # restart_file 'fort.qNNNN' specified below should be in\n # the OUTDIR indicated in Makefile.\n\n clawdata.restart = False # True to restart from prior results\n clawdata.restart_file = 'fort.q0006' # File to use for restart data\n\n\n # -------------\n # Output times:\n #--------------\n\n # Specify at what times the results should be written to fort.q files.\n # Note that the time integration stops after the final output time.\n\n clawdata.output_style = 1\n\n if clawdata.output_style==1:\n # Output ntimes frames at equally spaced times up to tfinal:\n # Can specify num_output_times = 0 for no output\n clawdata.num_output_times = 55\n clawdata.tfinal = 550.\n clawdata.output_t0 = False # output at initial (or restart) time?\n\n elif clawdata.output_style == 2:\n # Specify a list or numpy array of output times:\n # Include t0 if you want output at the initial time.\n clawdata.output_times = [0.2,1.6,2.9,4.3,5.6,6.9,8.3,9.6,10.9]\n\n elif clawdata.output_style == 3:\n # Output every step_interval timesteps over total_steps timesteps:\n clawdata.output_step_interval = 1\n clawdata.total_steps = 20\n clawdata.output_t0 = True # output at initial (or restart) time?\n\n\n clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'\n\n clawdata.output_q_components = 'all' # could be list such as [True,True]\n clawdata.output_aux_components = 'all' # could be list\n clawdata.output_aux_onlyonce = True # output aux arrays only at t0\n\n\n # ---------------------------------------------------\n # Verbosity of messages to screen during integration:\n # ---------------------------------------------------\n\n # The current t, dt, and cfl will be printed every time step\n # at AMR levels <= verbosity. Set verbosity = 0 for no printing.\n # (E.g. verbosity == 2 means print only on levels 1 and 2.)\n clawdata.verbosity = 0\n\n\n\n # --------------\n # Time stepping:\n # --------------\n\n # if dt_variable==True: variable time steps used based on cfl_desired,\n # if dt_variable==False: fixed time steps dt = dt_initial always used.\n clawdata.dt_variable = True\n\n # Initial time step for variable dt.\n # (If dt_variable==0 then dt=dt_initial for all steps)\n clawdata.dt_initial = 1.\n\n # Max time step to be allowed if variable dt used:\n clawdata.dt_max = 1.e9\n\n # Desired Courant number if variable dt used\n clawdata.cfl_desired = 0.15\n # max Courant number to allow without retaking step with a smaller dt:\n clawdata.cfl_max = 0.2\n\n # Maximum number of time steps to allow between output times:\n clawdata.steps_max = 50000\n\n\n # ------------------\n # Method to be used:\n # ------------------\n\n # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters\n clawdata.order = 2\n\n\n # Number of waves in the Riemann solution:\n clawdata.num_waves = 2\n\n # List of limiters to use for each wave family:\n # Required: len(limiter) == num_waves\n # Some options:\n # 0 or 'none' ==> no limiter (Lax-Wendroff)\n # 1 or 'minmod' ==> minmod\n # 2 or 'superbee' ==> superbee\n # 3 or 'vanleer' ==> van Leer\n # 4 or 'mc' ==> MC limiter\n clawdata.limiter = [4,4]\n\n clawdata.use_fwaves = True # True ==> use f-wave version of algorithms\n\n # Source terms splitting:\n # src_split == 0 or 'none' ==> no source term (src routine never called)\n # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,\n # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.\n clawdata.source_split = 'godunov'\n\n\n # --------------------\n # Boundary conditions:\n # --------------------\n\n # Number of ghost cells (usually 2)\n clawdata.num_ghost = 2\n\n # Choice of BCs at xlower and xupper:\n # 0 or 'user' => user specified (must modify bcNamr.f to use this option)\n # 1 or 'extrap' => extrapolation (non-reflecting outflow)\n # 2 or 'periodic' => periodic (must specify this at both boundaries)\n # 3 or 'wall' => solid wall for systems where q(2) is normal velocity\n\n clawdata.bc_lower[0] = 'extrap' # at xlower\n #clawdata.bc_lower[0] = 'wall' # at xlower\n clawdata.bc_upper[0] = 'extrap' # at xupper\n\n # Specify type of each aux variable in amrdata.auxtype.\n # This must be a list of length maux, each element of which is one of:\n # 'center', 'capacity', 'xleft' (see documentation).\n # Isn't used for this non-amr version, but still expected in data.\n\n amrdata = rundata.amrdata\n amrdata.aux_type = ['center','capacity']\n\n geo_data = rundata.geo_data\n\n geo_data.dry_tolerance = 1.e-3\n\n # Friction source terms:\n # src_split > 0 required\n # currently only Manning friction with a single n=friction_coefficient\n # is supported in 1d.\n\n geo_data.friction_forcing = True\n geo_data.manning_coefficient =.025\n\n\n\n # ---------------\n # Gauges:\n # ---------------\n rundata.gaugedata.gauges = []\n # for gauges append lines of the form [gaugeno, x, t1, t2]\n\n # for gauges append [gauge id, xc, t1, t2])\n # note that xc is the computational grid point, 0 <= xc <= 1,\n # so if you want to specify physical points xp, these need to be mapped\n # to corresponding xc as follows:\n\n if 1:\n xp_gauges = [-100e3, -20e3] # km\n for k,xp_g in enumerate(xp_gauges):\n gaugeno = k+1 \n # compute computational point xc_g that maps to xp_g:\n ii = np.where(xp < xp_g)[0][-1]\n xp_frac = (xp_g - xp[ii])/(xp[ii+1] - xp[ii])\n xc_g = (ii + xp_frac)/float(mx)\n print('gaugeno = %i: physical location xp_g = %g maps to xc_g = %.12f' \\\n % (gaugeno,xp_g, xc_g))\n rundata.gaugedata.gauges.append([gaugeno, xc_g, 0, 1e9])\n\n\n\n return rundata\n\n\nif __name__ == '__main__':\n # Set up run-time parameters and write all data files.\n import sys\n rundata = setrun(*sys.argv[1:])\n rundata.write()\n\n"
]
| [
[
"numpy.where",
"numpy.linspace"
]
]
|
LionKiss/tensorflow_template_application-master | [
"d7a553001f24521bc85689eeef2761179748682e"
]
| [
"distributed/dense_classifier.py"
]
| [
"#!/usr/bin/env python\n\nimport tensorflow as tf\nimport math\nimport os\nimport numpy as np\n\n# Define parameters\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\nflags.DEFINE_integer('epoch_number', None, 'Number of epochs to run trainer.')\nflags.DEFINE_integer(\"batch_size\", 1024,\n \"indicates batch size in a single gpu, default is 1024\")\nflags.DEFINE_integer(\"thread_number\", 1, \"Number of thread to read data\")\nflags.DEFINE_integer(\"min_after_dequeue\", 100,\n \"indicates min_after_dequeue of shuffle queue\")\nflags.DEFINE_string(\"output_dir\", \"./tensorboard/\",\n \"indicates training output\")\nflags.DEFINE_string(\"model\", \"deep\",\n \"Model to train, option model: deep, linear\")\nflags.DEFINE_string(\"optimizer\", \"sgd\", \"optimizer to import\")\nflags.DEFINE_integer('hidden1', 10, 'Number of units in hidden layer 1.')\nflags.DEFINE_integer('hidden2', 20, 'Number of units in hidden layer 2.')\nflags.DEFINE_integer('steps_to_validate', 10,\n 'Steps to validate and print loss')\nflags.DEFINE_string(\"mode\", \"train\",\n \"Option mode: train, train_from_scratch, inference\")\n# For distributed\ntf.app.flags.DEFINE_string(\"ps_hosts\", \"\",\n \"Comma-separated list of hostname:port pairs\")\ntf.app.flags.DEFINE_string(\"worker_hosts\", \"\",\n \"Comma-separated list of hostname:port pairs\")\ntf.app.flags.DEFINE_string(\"job_name\", \"\", \"One of 'ps', 'worker'\")\ntf.app.flags.DEFINE_integer(\"task_index\", 0, \"Index of task within the job\")\n\n# Hyperparameters\nlearning_rate = FLAGS.learning_rate\nepoch_number = FLAGS.epoch_number\nthread_number = FLAGS.thread_number\nbatch_size = FLAGS.batch_size\nmin_after_dequeue = FLAGS.min_after_dequeue\ncapacity = thread_number * batch_size + min_after_dequeue\nFEATURE_SIZE = 9\n\n\n# Read serialized examples from filename queue\ndef read_and_decode(filename_queue):\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"label\": tf.FixedLenFeature([], tf.float32),\n \"features\": tf.FixedLenFeature([FEATURE_SIZE], tf.float32),\n })\n\n label = features[\"label\"]\n features = features[\"features\"]\n\n return label, features\n\n\ndef main(_):\n ps_hosts = FLAGS.ps_hosts.split(\",\")\n worker_hosts = FLAGS.worker_hosts.split(\",\")\n cluster = tf.train.ClusterSpec({\"ps\": ps_hosts, \"worker\": worker_hosts})\n server = tf.train.Server(cluster,\n job_name=FLAGS.job_name,\n task_index=FLAGS.task_index)\n\n if FLAGS.job_name == \"ps\":\n server.join()\n elif FLAGS.job_name == \"worker\":\n\n with tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % FLAGS.task_index,\n cluster=cluster)):\n\n # Read TFRecords files\n filename_queue = tf.train.string_input_producer(\n tf.train.match_filenames_once(\"../data/cancer/cancer_train.csv.tfrecords\"),\n num_epochs=epoch_number)\n label, features = read_and_decode(filename_queue)\n batch_labels, batch_features = tf.train.shuffle_batch(\n [label, features],\n batch_size=batch_size,\n num_threads=thread_number,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue)\n\n validate_filename_queue = tf.train.string_input_producer(\n tf.train.match_filenames_once(\n \"../data/cancer/cancer_test.csv.tfrecords\"),\n num_epochs=epoch_number)\n validate_label, validate_features = read_and_decode(\n validate_filename_queue)\n validate_batch_labels, validate_batch_features = tf.train.shuffle_batch(\n [validate_label, validate_features],\n batch_size=batch_size,\n num_threads=thread_number,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue)\n\n # Define the model\n input_units = FEATURE_SIZE\n hidden1_units = FLAGS.hidden1\n hidden2_units = FLAGS.hidden2\n output_units = 2\n\n # Hidden 1\n weights1 = tf.Variable(\n tf.truncated_normal([input_units, hidden1_units]),\n dtype=tf.float32,\n name='weights')\n biases1 = tf.Variable(\n tf.truncated_normal([hidden1_units]),\n name='biases',\n dtype=tf.float32)\n hidden1 = tf.nn.relu(tf.matmul(batch_features, weights1) + biases1)\n\n # Hidden 2\n weights2 = tf.Variable(\n tf.truncated_normal([hidden1_units, hidden2_units]),\n dtype=tf.float32,\n name='weights')\n biases2 = tf.Variable(\n tf.truncated_normal([hidden2_units]),\n name='biases',\n dtype=tf.float32)\n hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2)\n\n # Linear\n weights3 = tf.Variable(\n tf.truncated_normal([hidden2_units, output_units]),\n dtype=tf.float32,\n name='weights')\n biases3 = tf.Variable(\n tf.truncated_normal([output_units]),\n name='biases',\n dtype=tf.float32)\n logits = tf.matmul(hidden2, weights3) + biases3\n\n batch_labels = tf.to_int64(batch_labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=batch_labels)\n loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n if FLAGS.optimizer == \"sgd\":\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n optimizer = tf.train.MomentumOptimizer(learning_rate)\n global_step = tf.Variable(0, name='global_step', trainable=False)\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n # Compute accuracy\n accuracy_hidden1 = tf.nn.relu(tf.matmul(validate_batch_features,\n weights1) + biases1)\n accuracy_hidden2 = tf.nn.relu(tf.matmul(accuracy_hidden1, weights2)\n + biases2)\n accuracy_logits = tf.matmul(accuracy_hidden2, weights3) + biases3\n validate_softmax = tf.nn.softmax(accuracy_logits)\n\n validate_batch_labels = tf.to_int64(validate_batch_labels)\n correct_prediction = tf.equal(\n tf.argmax(validate_softmax, 1), validate_batch_labels)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # Compute auc\n validate_batch_labels = tf.cast(validate_batch_labels, tf.int32)\n num_labels = 2\n sparse_labels = tf.reshape(validate_batch_labels, [-1, 1])\n derived_size = tf.shape(validate_batch_labels)[0]\n indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])\n concated = tf.concat(axis=1, values=[indices, sparse_labels])\n outshape = tf.stack([derived_size, num_labels])\n new_validate_batch_labels = tf.sparse_to_dense(concated, outshape,\n 1.0, 0.0)\n _, auc_op = tf.contrib.metrics.streaming_auc(\n validate_softmax, new_validate_batch_labels)\n\n # Define inference op\n inference_features = tf.placeholder(\"float\", [None, 9])\n inference_hidden1 = tf.nn.relu(tf.matmul(inference_features,\n weights1) + biases1)\n inference_hidden2 = tf.nn.relu(tf.matmul(inference_hidden1,\n weights2) + biases2)\n inference_logits = tf.matmul(inference_hidden2, weights3) + biases3\n inference_softmax = tf.nn.softmax(inference_logits)\n inference_op = tf.argmax(inference_softmax, 1)\n\n saver = tf.train.Saver()\n steps_to_validate = FLAGS.steps_to_validate\n init_op = tf.global_variables_initializer()\n\n tf.summary.scalar('loss', loss)\n tf.summary.scalar('accuracy', accuracy)\n tf.summary.scalar('auc', auc_op)\n\n summary_op = tf.summary.merge_all()\n\n sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),\n logdir=\"./checkpoint/\",\n init_op=init_op,\n summary_op=summary_op,\n saver=saver,\n global_step=global_step,\n save_model_secs=60)\n\n with sv.managed_session(server.target) as sess:\n step = 0\n while not sv.should_stop() and step < 50:\n\n # Get coordinator and run queues to read data\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n try:\n while not coord.should_stop():\n # Run train op\n _, loss_value, step = sess.run([train_op, loss,\n global_step])\n\n if step % steps_to_validate == 0:\n accuracy_value, auc_value, summary_value = sess.run(\n [accuracy, auc_op, summary_op])\n print(\n \"Step: {}, loss: {}, accuracy: {}, auc: {}\".format(\n step, loss_value, accuracy_value,\n auc_value))\n\n except tf.errors.OutOfRangeError:\n print(\"Done training after reading all data\")\n finally:\n coord.request_stop()\n\n # Wait for threads to exit\n coord.join(threads)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
]
| [
[
"tensorflow.train.start_queue_runners",
"tensorflow.contrib.metrics.streaming_auc",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.stack",
"tensorflow.nn.softmax",
"tensorflow.global_variables_initializer",
"tensorflow.cast",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.train.Saver",
"tensorflow.train.Supervisor",
"tensorflow.app.run",
"tensorflow.range",
"tensorflow.train.ClusterSpec",
"tensorflow.train.Coordinator",
"tensorflow.summary.scalar",
"tensorflow.to_int64",
"tensorflow.truncated_normal",
"tensorflow.train.shuffle_batch",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.train.Server",
"tensorflow.sparse_to_dense",
"tensorflow.train.replica_device_setter",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.match_filenames_once",
"tensorflow.TFRecordReader",
"tensorflow.reduce_mean"
]
]
|
Enigmatisms/Maevit | [
"f0520c8ba708aa6cb178e6e7bdb21fe7b5c0ca70"
]
| [
"py/LECosineAnnealing.py"
]
| [
"#-*-coding:utf-8-*-\n\"\"\"\n Linear Exponential Cosine Annealing Smooth Warm Restart Learning Rate for lr_scheduler.LambdaLR\n @author (Enigmatisms) HQY\n @date 2021.11.30\n @copyright Enigmatisms\n\"\"\"\n\nfrom math import cos\nimport math\n\n\nclass LECosineAnnealingSmoothRestart:\n \"\"\"\n The Maximum lr is bounded by a linear function, while the mininum lr is bounded by a exponential function\n The frequency decreases over epochs, at (epochs: which is last_epoch) time, lr comes to the mininum\n - max_start (min_start): upper (lower) bound starting lr\n - max_end (min_end): upper (lower) bound ending lr\n - epochs: The number of steps lr_scheduler needs\n - folds: (upper bound of) number of peaks\n - use_linear: whether upper bound decreases linearly\n \"\"\"\n def __init__(self, max_start, max_end, min_start, min_end, epochs, folds = 15, use_linear = False) -> None:\n coeff = (min_end / min_start) ** (1.0 / epochs)\n coeff2 = (max_end / max_start) ** (1.0 / epochs)\n b = epochs / (folds * 2.5 * math.pi)\n k = math.ceil(0.625 * folds - 0.25)\n a = 1 / (((k << 1) + 1) * math.pi) - 1 / (2.5 * math.pi * folds)\n self.f = None\n if use_linear: self.f = lambda x: (max_end - max_start) / epochs * x + max_start\n else: self.f = lambda x: max_start * (coeff2 ** x)\n self.g = lambda x: min_start * (coeff ** x)\n self.c = lambda x: cos(x / (a * x + b))\n\n def lr(self, x):\n return 0.5 * (self.f(x) - self.g(x)) * self.c(x) + (self.f(x) + self.g(x)) * 0.5 \n\nif __name__ == \"__main__\":\n import numpy as np\n import matplotlib.pyplot as plt\n import torch\n from torch.optim import lr_scheduler\n\n opt = torch.optim.Adam(torch.nn.Conv2d(3, 3, 3, 1, 1).parameters(), lr = 1.0)\n def lr_sch_res(lr_func, epoch):\n sch = lr_scheduler.LambdaLR(opt, lr_func)\n result = []\n for _ in range(epoch):\n opt.step()\n sch.step()\n result.append(sch.get_last_lr()[-1])\n return np.array(result)\n\n max_ep = 30 * 782\n max_start = 5e-4\n max_end = 5e-5\n min_start = 1e-4\n min_end = 5e-7\n fold = 7\n\n xs = np.linspace(0, max_ep, max_ep + 1)\n inf_b = np.array([min_end for _ in xs])\n sup_b = np.array([max_start for _ in xs])\n lr_linear = LECosineAnnealingSmoothRestart(max_start, max_end, min_start, min_end, max_ep, fold, True)\n lr_exp = LECosineAnnealingSmoothRestart(max_start, max_end, min_start, min_end, max_ep, fold)\n ys1 = lr_sch_res(lr_linear.lr, max_ep + 1)\n ys2 = lr_sch_res(lr_exp.lr, max_ep + 1)\n plt.plot(xs, ys1, c = 'r', label = 'linear')\n plt.plot(xs, ys2, c = 'b', label = 'exp')\n plt.plot(xs, inf_b, c = 'grey', label = 'inf_b', linestyle='--')\n plt.plot(xs, sup_b, c = 'black', label = 'sup_b', linestyle='--')\n plt.grid(axis = 'both')\n plt.legend()\n plt.show()\n"
]
| [
[
"numpy.array",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"torch.nn.Conv2d",
"torch.optim.lr_scheduler.LambdaLR",
"matplotlib.pyplot.show",
"numpy.linspace"
]
]
|
nishidayoshikatsu/Gasyori100knock | [
"f7fe35bca772eda2961a0790274c4934119b3fc2"
]
| [
"Question_41_50/answers/answer_48.py"
]
| [
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\nH, W, C = img.shape\n\n# Otsu binary\n## Grayscale\nout = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]\nout = out.astype(np.uint8)\n\n## Determine threshold of Otsu's binarization\nmax_sigma = 0\nmax_t = 0\n\nfor _t in range(1, 255):\n v0 = out[np.where(out < _t)]\n m0 = np.mean(v0) if len(v0) > 0 else 0.\n w0 = len(v0) / (H * W)\n v1 = out[np.where(out >= _t)]\n m1 = np.mean(v1) if len(v1) > 0 else 0.\n w1 = len(v1) / (H * W)\n sigma = w0 * w1 * ((m0 - m1) ** 2)\n if sigma > max_sigma:\n max_sigma = sigma\n max_t = _t\n\n## Binarization\n#print(\"threshold >>\", max_t)\nth = max_t\nout[out < th] = 0\nout[out >= th] = 255\n\n\n# Morphology filter\nMF = np.array(((0, 1, 0),\n (1, 0, 1),\n (0, 1, 0)), dtype=np.int)\n\n# Morphology - erode\nErode_time = 2\n\nfor i in range(Erode_time):\n tmp = np.pad(out, (1, 1), 'edge')\n for y in range(1, H+1):\n for x in range(1, W+1):\n if np.sum(MF * tmp[y-1:y+2, x-1:x+2]) < 255*4:\n out[y-1, x-1] = 0\n\n\n# Save result\ncv2.imwrite(\"out.jpg\", out)\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
]
| [
[
"numpy.pad",
"numpy.array",
"numpy.sum",
"numpy.mean",
"numpy.where"
]
]
|
VHeusinkveld/neural-chessboard | [
"452054d974d00331871292992a1357a65529fb9d"
]
| [
"slid.py"
]
| [
"import utils, debug\nfrom config import *\n\nimport math\nimport cv2, numpy as np\nimport collections\nna = np.array\n\n\"\"\"\nNC_SLID_CLAHE = [[3, (2, 6), 5], # @1\n\t\t [3, (6, 2), 5], # @2\n\t\t\t\t [0, (0, 0), 0]] # EE\n\"\"\"\n\n\"\"\"\nNC_SLID_CLAHE = [[4, (2, 5), 5], # @1\n\t\t [4, (5, 2), 5], # @2\n\t\t\t\t #[1, (2, 2), 15], # @3\n\t\t\t\t [0, (0, 0), 0]] # EE\n\"\"\"\n\n\"\"\"\nNC_SLID_CLAHE = [[2, (1, 5), 5], # @1\n\t\t [2, (5, 1), 5], # @2\n\t\t\t\t #[1, (2, 2), 15], # @3\n\t\t\t\t [0, (0, 0), 0]] # EE\n\"\"\"\n\n\"\"\"\nNC_SLID_CLAHE = [[3, (2, 8), 5], # @1\n\t\t [3, (8, 2), 5], # @2\n\t\t\t\t [5, (4, 4), 5], # @3\n\t\t\t\t [0, (0, 0), 0]] # EE\n\"\"\"\n\n# 7???\n# 4???\nNC_SLID_CLAHE = [[3, (2, 6), 5], # @1\n\t\t [3, (6, 2), 5], # @2\n\t\t\t\t [5, (3, 3), 5], # @3\n\t\t\t\t [0, (0, 0), 0]] # EE\n\n################################################################################\n\ndef slid_canny(img, sigma=0.25):\n\t\"\"\"apply Canny edge detector (automatic thresh)\"\"\"\n\tv = np.median(img)\n\timg = cv2.medianBlur(img, 5)\n\timg = cv2.GaussianBlur(img, (7, 7), 2)\n\tlower = int(max(0, (1.0 - sigma) * v))\n\tupper = int(min(255, (1.0 + sigma) * v))\n\treturn cv2.Canny(img, lower, upper)\n\ndef slid_detector(img, alfa=150, beta=2):\n\t\"\"\"detect lines using Hough algorithm\"\"\"\n\t__lines, lines = [], cv2.HoughLinesP(img, rho=1, theta=np.pi/360*beta,\n\t\tthreshold=40, minLineLength=50, maxLineGap=15) # [40, 40, 10]\n\tif lines is None: return []\n\tfor line in np.reshape(lines, (-1, 4)):\n\t\t__lines += [[[int(line[0]), int(line[1])],\n\t\t\t [int(line[2]), int(line[3])]]]\n\treturn __lines\n\ndef slid_clahe(img, limit=2, grid=(3,3), iters=5):\n\t\"\"\"repair using CLAHE algorithm (adaptive histogram equalization)\"\"\"\n\timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tfor i in range(iters):\n\t\timg = cv2.createCLAHE(clipLimit=limit, \\\n\t\t\t\ttileGridSize=grid).apply(img)\n\tdebug.image(img).save(\"slid_clahe_@1\")\n\tif limit != 0:\n\t\tkernel = np.ones((10, 10), np.uint8)\n\t\timg = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n\t\tdebug.image(img).save(\"slid_clahe_@2\")\n\treturn img\n\n################################################################################\n\ndef pSLID(img, thresh=150):\n\t\"\"\"find all lines using different settings\"\"\"\n\tprint(utils.call(\"pSLID(img)\"))\n\tsegments = []; i = 0\n\tfor key, arr in enumerate(NC_SLID_CLAHE):\n\t\ttmp = slid_clahe(img, limit=arr[0], grid=arr[1], iters=arr[2])\n\t\t__segments = list(slid_detector(slid_canny(tmp), thresh))\n\t\tsegments += __segments; i += 1\n\t\tprint(\"FILTER: {} {} : {}\".format(i, arr, len(__segments)))\n\t\tdebug.image(slid_canny(tmp)).lines(__segments).save(\"pslid_F%d\" % i)\n\treturn segments\n\nall_points = []\ndef SLID(img, segments):\n\t# FIXME: zrobic 2 rodzaje haszowania (katy + pasy [blad - delta])\n\tprint(utils.call(\"SLID(img, segments)\"))\n\t\n\tglobal all_points; all_points = []\n\tpregroup, group, hashmap, raw_lines = [[], []], {}, {}, []\n\n\t__cache = {}\n\tdef __dis(a, b):\n\t\tidx = hash(\"__dis\" + str(a) + str(b))\n\t\tif idx in __cache: return __cache[idx]\n\t\t__cache[idx] = np.linalg.norm(na(a)-na(b))\n\t\treturn __cache[idx]\n\n\tX = {}\n\tdef __fi(x):\n\t\tif x not in X: X[x] = 0;\n\t\tif (X[x] == x or X[x] == 0): X[x] = x\n\t\telse: X[x] = __fi(X[x])\n\t\treturn X[x]\n\tdef __un(a, b):\n\t\tia, ib = __fi(a), __fi(b)\n\t\tX[ia] = ib; group[ib] |= group[ia]\n\t\t#group[ia] = set()\n\t\t#group[ia] = set()\n\n\t# shortest path // height\n\tnln = lambda l1, x, dx: \\\n\t\tnp.linalg.norm(np.cross(na(l1[1])-na(l1[0]),\n\t\t\t\t\t\t\t\tna(l1[0])-na( x)))/dx\n\n\tdef __similar(l1, l2):\n\t\tda, db = __dis(l1[0], l1[1]), __dis(l2[0], l2[1])\n\t\t# if da > db: l1, l2, da, db = l2, l1, db, da\n\n\t\td1a, d2a = nln(l1, l2[0], da), nln(l1, l2[1], da)\n\t\td1b, d2b = nln(l2, l1[0], db), nln(l2, l1[1], db)\n\t\n\t\tds = 0.25 * (d1a + d1b + d2a + d2b) + 0.00001\n\t\t#print(da, db, abs(da-db))\n\t\t#print(int(da/ds), int(db/ds), \"|\", int(abs(da-db)), int(da+db),\n\t\t#\t\tint(da+db)/(int(abs(da-db))+0.00001))\n\t\talfa = 0.0625 * (da + db) #15\n\t\t# FIXME: roznica???\n\t\t#if d1 + d2 == 0: d1 += 0.00001 # [FIXME]: divide by 0\n\t\tt1 = (da/ds > alfa and db/ds > alfa)\n\t\tif not t1: return False # [FIXME]: dist???\n\t\treturn True\n\n\tdef __generate(a, b, n):\n\t\tpoints = []; t = 1/n\n\t\tfor i in range(n):\n\t\t\tx = a[0] + (b[0]-a[0]) * (i * t)\n\t\t\ty = a[1] + (b[1]-a[1]) * (i * t)\n\t\t\tpoints += [[int(x), int(y)]]\n\t\treturn points\n\n\tdef __analyze(group):\n\t\tglobal all_points\n\t\tpoints = []\n\t\tfor idx in group:\n\t\t\tpoints += __generate(*hashmap[idx], 10)\n\t\t_, radius = cv2.minEnclosingCircle(na(points)); w = radius * (math.pi/2)\n\t\tvx, vy, cx, cy = cv2.fitLine(na(points), cv2.DIST_L2, 0, 0.01, 0.01)\n\t\t# debug.color()\n\t\tall_points += points\n\t\treturn [[int(cx-vx*w), int(cy-vy*w)], [int(cx+vx*w), int(cy+vy*w)]]\n\n\tfor l in segments:\n\t\th = hash(str(l))\n\t\tt1 = l[0][0] - l[1][0]\n\t\tt2 = l[0][1] - l[1][1]\n\t\thashmap[h] = l; group[h] = set([h]); X[h] = h\n\t\tif abs(t1) < abs(t2): pregroup[0].append(l)\n\t\telse: pregroup[1].append(l)\n\n\tdebug.image(img.shape) \\\n\t\t.lines(pregroup[0], color=debug.color()) \\\n\t\t.lines(pregroup[1], color=debug.color()) \\\n\t.save(\"slid_pre_groups\")\n\n\tfor lines in pregroup:\n\t\tfor i in range(len(lines)):\n\t\t\tl1 = lines[i]; h1 = hash(str(l1))\n\t\t\t#print(h1, __fi(h1))\n\t\t\tif (X[h1] != h1): continue\n\t\t\t#if (__fi(h1) != h1): continue\n\t\t\tfor j in range(i+1, len(lines)):\n\t\t\t\tl2 = lines[j]; h2 = hash(str(l2))\n\t\t\t\t#if (__fi(h2) != h2): continue\n\t\t\t\tif (X[h2] != h2): continue\n\t\t\t\t#if (len(group[h2])==0): continue\n\t\t\t\tif not __similar(l1, l2): continue\n\t\t\t\t__un(h1, h2) # union & find\n\t\t\t\t# break # FIXME\n\n\t__d = debug.image(img.shape)\n\tfor i in group:\n\t\t#if (__fi(i) != i): continue\n\t\tif (X[i] != i): continue\n\t\t#if len(group[i]) == 0: continue\n\t\tls = [hashmap[h] for h in group[i]]\n\t\t__d.lines(ls, color=debug.color())\n\t__d.save(\"slid_all_groups\")\n\n\tfor i in group:\n\t\t#if (__fi(i) != i): continue\n\t\tif (X[i] != i): continue\n\t\t#if len(group[i]) == 0: continue\n\t\t#if (__fi(i) != i): continue\n\t\traw_lines += [__analyze(group[i])]\n\tdebug.image(img.shape).lines(raw_lines).save(\"slid_final\")\n\n\tdebug.image(img.shape)\\\n\t\t.points(all_points, color=(0,255,0), size=2)\\\n\t.lines(raw_lines).save(\"slid_final2\")\n\n\treturn raw_lines\n\ndef slid_tendency(raw_lines, s=4): # FIXME: [1.25 -> 2]\n\tprint(utils.call(\"slid_tendency(raw_lines)\"))\n\tlines = []; scale = lambda x, y, s: \\\n\t\tint(x * (1+s)/2 + y * (1-s)/2)\n\tfor a, b in raw_lines:\n\t\t# [A] s - scale\n\t\t# Xa' = Xa (1+s)/2 + Xb (1-s)/2\n\t\t# Ya' = Ya (1+s)/2 + Yb (1-s)/2\n\t\ta[0] = scale(a[0], b[0], s)\n\t\ta[1] = scale(a[1], b[1], s)\n\t\t# [B] s - scale\n\t\t# Xb' = Xb (1+s)/2 + Xa (1-s)/2\n\t\t# Yb' = Yb (1+s)/2 + Ya (1-s)/2\n\t\tb[0] = scale(b[0], a[0], s)\n\t\tb[1] = scale(b[1], a[1], s)\n\t\tlines += [[a, b]]\n\treturn lines\n"
]
| [
[
"numpy.median",
"numpy.ones",
"numpy.reshape"
]
]
|
r-beer/FINE | [
"a11078baff99a543437ab1646751e30df34ad5f3"
]
| [
"FINE/subclasses/lopf.py"
]
| [
"from FINE.transmission import Transmission, TransmissionModel\nfrom FINE import utils\nimport pyomo.environ as pyomo\nimport pandas as pd\n\n\nclass LinearOptimalPowerFlow(Transmission):\n \"\"\"\n A LinearOptimalPowerFlow component shows the behavior of a Transmission component but additionally models a\n linearized power flow. The LinearOptimalPowerFlow class inherits from the Transmission class.\n \"\"\"\n def __init__(self, esM, name, commodity, reactances, losses=0, distances=None,\n hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1,\n hasIsBuiltBinaryVariable=False, bigM=None,\n operationRateMax=None, operationRateFix=None, tsaWeight=1,\n locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None,\n capacityFix=None, isBuiltFix=None,\n investPerCapacity=0, investIfBuilt=0, opexPerOperation=0, opexPerCapacity=0,\n opexIfBuilt=0, interestRate=0.08, economicLifetime=10):\n \"\"\"\n Constructor for creating an LinearOptimalPowerFlow class instance.\n The LinearOptimalPowerFlow component specific input arguments are described below. The Transmission component\n specific input arguments are described in the Transmission class and the general component\n input arguments are described in the Component class.\n\n **Required arguments:**\n\n :param reactances: reactances for DC power flow modeling.\n :type reactances: Pandas DataFrame. The row and column indices of the DataFrame have to equal\n the in the energy system model specified locations.\n \"\"\"\n Transmission.__init__(self, esM, name, commodity, losses, distances, hasCapacityVariable,\n capacityVariableDomain, capacityPerPlantUnit, hasIsBuiltBinaryVariable, bigM,\n operationRateMax, operationRateFix, tsaWeight, locationalEligibility, capacityMin,\n capacityMax, sharedPotentialID, capacityFix, isBuiltFix, investPerCapacity,\n investIfBuilt, opexPerOperation, opexPerCapacity, opexIfBuilt, interestRate,\n economicLifetime)\n\n self.modelingClass = LOPFModel\n\n self.reactances2dim = reactances\n self.reactances = pd.Series(self._mapC).apply(lambda loc: self.reactances2dim[loc[0]][loc[1]])\n\n def addToEnergySystemModel(self, esM):\n \"\"\"\n Function for adding a LinearOptimalPowerFlow component to the given energy system model.\n\n :param esM: EnergySystemModel instance representing the energy system in which the component should be modeled.\n :type esM: EnergySystemModel class instance\n \"\"\"\n super().addToEnergySystemModel(esM)\n\n\nclass LOPFModel(TransmissionModel):\n\n \"\"\"\n A LOPFModel class instance will be instantly created if a LinearOptimalPowerFlow class instance is initialized.\n It is used for the declaration of the sets, variables and constraints which are valid for the LinearOptimalPowerFlow\n class instance. These declarations are necessary for the modeling and optimization of the energy system model.\n The LOPFModel class inherits from the TransmissionModel class. \"\"\"\n\n def __init__(self):\n self.abbrvName = 'lopf'\n self.dimension = '2dim'\n self.componentsDict = {}\n self.capacityVariablesOptimum, self.isBuiltVariablesOptimum = None, None\n self.operationVariablesOptimum, self.phaseAngleVariablesOptimum = None, None\n self.optSummary = None\n\n ####################################################################################################################\n # Declare sparse index sets #\n ####################################################################################################################\n\n def initPhaseAngleVarSet(self, pyM):\n \"\"\"\n Declare phase angle variable set in the pyomo object for for each node.\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n\n # Set for operation variables\n def initPhaseAngleVarSet(pyM):\n return ((loc, compName) for compName, comp in compDict.items() for loc in compDict[compName]._mapL.keys())\n setattr(pyM, 'phaseAngleVarSet_' + abbrvName, pyomo.Set(dimen=2, initialize=initPhaseAngleVarSet))\n\n def declareSets(self, esM, pyM):\n \"\"\"\n Declare sets and dictionaries: design variable sets, operation variable sets, operation mode sets and\n linked components dictionary.\n\n :param esM: EnergySystemModel instance representing the energy system in which the component should be modeled.\n :type esM: EnergySystemModel class instance\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n\n # # Declare design variable sets\n self.declareDesignVarSet(pyM)\n self.declareContinuousDesignVarSet(pyM)\n self.declareDiscreteDesignVarSet(pyM)\n self.declareDesignDecisionVarSet(pyM)\n\n # Declare operation variable sets\n self.declareOpVarSet(esM, pyM)\n self.initPhaseAngleVarSet(pyM)\n\n # Declare operation variable set\n self.declareOperationModeSets(pyM, 'opConstrSet', 'operationRateMax', 'operationRateFix')\n\n ####################################################################################################################\n # Declare variables #\n ####################################################################################################################\n\n def declarePhaseAngleVariables(self, pyM):\n \"\"\"\n Declare phase angle variables.\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n setattr(pyM, 'phaseAngle_' + self.abbrvName,\n pyomo.Var(getattr(pyM, 'phaseAngleVarSet_' + self.abbrvName), pyM.timeSet, domain=pyomo.Reals))\n\n def declareVariables(self, esM, pyM):\n \"\"\"\n Declare design and operation variables.\n\n :param esM: EnergySystemModel instance representing the energy system in which the component should be modeled.\n :type esM: EnergySystemModel class instance\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n\n # Capacity variables in [commodityUnit]\n self.declareCapacityVars(pyM)\n # (Continuous) numbers of installed components [-]\n self.declareRealNumbersVars(pyM)\n # (Discrete/integer) numbers of installed components [-]\n self.declareIntNumbersVars(pyM)\n # Binary variables [-] indicating if a component is considered at a location or not [-]\n self.declareBinaryDesignDecisionVars(pyM)\n # Flow over the edges of the components [commodityUnit]\n self.declareOperationVars(pyM, 'op')\n # Operation of component [commodityUnit]\n self.declarePhaseAngleVariables(pyM)\n\n ####################################################################################################################\n # Declare component constraints #\n ####################################################################################################################\n\n def powerFlowDC(self, pyM):\n \"\"\"\n Ensure that the flow between two locations is equal to the difference between the phase angle variables at\n these locations divided by the reactance of the line between these locations.\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n phaseAngleVar = getattr(pyM, 'phaseAngle_' + self.abbrvName)\n opVar, opVarSet = getattr(pyM, 'op_' + abbrvName), getattr(pyM, 'operationVarSet_' + abbrvName)\n\n def powerFlowDC(pyM, loc, compName, p, t):\n node1, node2 = compDict[compName]._mapC[loc]\n return (opVar[loc, compName, p, t] - opVar[compDict[compName]._mapI[loc], compName, p, t] ==\n (phaseAngleVar[node1, compName, p, t]-phaseAngleVar[node2, compName, p, t])/\n compDict[compName].reactances[loc])\n setattr(pyM, 'ConstrpowerFlowDC_' + abbrvName, pyomo.Constraint(opVarSet, pyM.timeSet, rule=powerFlowDC))\n\n def basePhaseAngle(self, pyM):\n \"\"\"\n Declare the constraint that the reference phase angle is set to zero for all time steps.\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n phaseAngleVar = getattr(pyM, 'phaseAngle_' + self.abbrvName)\n\n def basePhaseAngle(pyM, compName, p, t):\n node0 = sorted(compDict[compName]._mapL)[0]\n return phaseAngleVar[node0, compName, p, t] == 0\n setattr(pyM, 'ConstrBasePhaseAngle_' + abbrvName,\n pyomo.Constraint(compDict.keys(), pyM.timeSet, rule=basePhaseAngle))\n\n def declareComponentConstraints(self, esM, pyM):\n \"\"\"\n Declare time independent and dependent constraints.\n\n :param esM: EnergySystemModel instance representing the energy system in which the component should be modeled.\n :type esM: EnergySystemModel class instance\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n super().declareComponentConstraints(esM, pyM)\n\n ################################################################################################################\n # Add DC power flow constraints #\n ################################################################################################################\n\n self.powerFlowDC(pyM)\n self.basePhaseAngle(pyM)\n\n ####################################################################################################################\n # Declare component contributions to basic EnergySystemModel constraints and its objective function #\n ####################################################################################################################\n\n def getSharedPotentialContribution(self, pyM, key, loc):\n \"\"\" Get contributions to shared location potential. \"\"\"\n return super().getSharedPotentialContribution(pyM, key, loc)\n\n def hasOpVariablesForLocationCommodity(self, esM, loc, commod):\n \"\"\"\n Check if the commodity´s transfer between a given location and the other locations of the energy system model\n is eligible.\n\n :param esM: EnergySystemModel in which the LinearOptimalPowerFlow components have been added to.\n :type esM: esM - EnergySystemModel class instance\n\n :param loc: Name of the regarded location (locations are defined in the EnergySystemModel instance)\n :type loc: string\n\n :param commod: Name of the regarded commodity (commodities are defined in the EnergySystemModel instance)\n :param commod: string\n \"\"\"\n return super().hasOpVariablesForLocationCommodity(esM, loc, commod)\n\n def getCommodityBalanceContribution(self, pyM, commod, loc, p, t):\n \"\"\" Get contribution to a commodity balance. \"\"\"\n return super().getCommodityBalanceContribution(pyM, commod, loc, p, t)\n\n def getObjectiveFunctionContribution(self, esM, pyM):\n \"\"\"\n Get contribution to the objective function.\n\n :param esM: EnergySystemModel instance representing the energy system in which the component should be modeled.\n :type esM: EnergySystemModel class instance\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n return super().getObjectiveFunctionContribution(esM, pyM)\n\n def setOptimalValues(self, esM, pyM):\n \"\"\"\n Set the optimal values of the components.\n\n :param esM: EnergySystemModel instance representing the energy system in which the component should be modeled.\n :type esM: EnergySystemModel class instance\n\n :param pyM: pyomo ConcreteModel which stores the mathematical formulation of the model.\n :type pyM: pyomo Concrete Model\n \"\"\"\n\n super().setOptimalValues(esM, pyM)\n\n compDict, abbrvName = self.componentsDict, self.abbrvName\n phaseAngleVar = getattr(pyM, 'phaseAngle_' + abbrvName)\n\n optVal_ = utils.formatOptimizationOutput(phaseAngleVar.get_values(), 'operationVariables', '1dim',\n esM.periodsOrder)\n self.operationVariablesOptimum = optVal_\n\n def getOptimalValues(self, name='all'):\n \"\"\"\n Return optimal values of the components.\n\n :param name: name of the variables of which the optimal values should be returned:\\n\n * 'capacityVariables',\n * 'isBuiltVariables',\n * 'operationVariablesOptimum',\n * 'phaseAngleVariablesOptimum',\n * 'all' or another input: all variables are returned.\\n\n :type name: string\n \"\"\"\n if name == 'capacityVariablesOptimum':\n return {'values': self.capacityVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}\n elif name == 'isBuiltVariablesOptimum':\n return {'values': self.isBuiltVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}\n elif name == 'operationVariablesOptimum':\n return {'values': self.operationVariablesOptimum, 'timeDependent': True, 'dimension': self.dimension}\n elif name == 'phaseAngleVariablesOptimum':\n return {'values': self.phaseAngleVariablesOptimum, 'timeDependent': True, 'dimension': self.dimension}\n else:\n return {'capacityVariablesOptimum': {'values': self.capacityVariablesOptimum, 'timeDependent': False,\n 'dimension': self.dimension},\n 'isBuiltVariablesOptimum': {'values': self.isBuiltVariablesOptimum, 'timeDependent': False,\n 'dimension': self.dimension},\n 'operationVariablesOptimum': {'values': self.operationVariablesOptimum, 'timeDependent': True,\n 'dimension': self.dimension},\n 'phaseAngleVariablesOptimum': {'values': self.phaseAngleVariablesOptimum, 'timeDependent': True,\n 'dimension': self.dimension}}\n"
]
| [
[
"pandas.Series"
]
]
|
yijiaozhang/hypercompare | [
"13459caf53d9eebecbc2ad8196ee7a20a15c3b77"
]
| [
"code/workflow/merge_results.py"
]
| [
"\"\"\"\nThis script merges all the input tables\n\"\"\"\nimport sys\nimport pandas as pd\nimport hypercomparison.utils\n\nlogger = hypercomparison.utils.get_logger(__name__)\n\n\ninput_lists = sys.argv[1:-1]\nout_path = sys.argv[-1]\n\nlogger.info(\"Start to load files...\")\n\ndfs = []\nfor input_list in input_lists:\n temp_df = pd.read_csv(input_list)\n dfs.append(temp_df)\n\ndf = pd.concat(dfs)\n\nlogger.info(\"Start to dump result...\")\ndf.to_csv(out_path, index=None)"
]
| [
[
"pandas.read_csv",
"pandas.concat"
]
]
|
RashulChutani/ivy | [
"fd9e7c63c0f2bb7eb4c01681c90818efb2c12ca6"
]
| [
"ivy/functional/backends/tensorflow/layers.py"
]
| [
"\"\"\"Collection of TensorFlow network layers, wrapped to fit Ivy syntax and\nsignature.\"\"\"\n\n# global\nimport tensorflow as tf\n\nfrom tensorflow.python.types.core import Tensor\n\n\ndef conv1d(\n x: Tensor,\n filters: Tensor,\n strides: int,\n padding: str,\n data_format: str = \"NWC\",\n dilations: int = 1,\n) -> Tensor:\n if data_format == \"NCW\":\n x = tf.transpose(x, (0, 1, 2))\n res = tf.nn.conv1d(x, filters, strides, padding, \"NWC\", dilations)\n if data_format == \"NCW\":\n res = tf.transpose(res, (0, 1, 2))\n return res\n\n\nconv1d_transpose = lambda x, filters, strides, padding, output_shape=None, data_format=\"NWC\", dilations=1: tf.nn.conv1d_transpose(\n x, filters, output_shape, strides, padding, data_format, dilations\n)\n\n\ndef conv2d(x, filters, strides, padding, data_format=\"NHWC\", dilations=1):\n if data_format == \"NCHW\":\n x = tf.transpose(x, (0, 2, 3, 1))\n res = tf.nn.conv2d(x, filters, strides, padding, \"NHWC\", dilations)\n if data_format == \"NCHW\":\n return tf.transpose(res, (0, 3, 1, 2))\n return res\n\n\nconv2d_transpose = lambda x, filters, strides, padding, output_shape=None, data_format=\"NHWC\", dilations=1: tf.nn.conv2d_transpose(\n x, filters, output_shape, strides, padding, data_format, dilations\n)\n\n\ndef depthwise_conv2d(x, filters, strides, padding, data_format=\"NHWC\", dilations=1):\n filters = tf.expand_dims(filters, -1)\n strides = [1, strides, strides, 1]\n dilations = [dilations, dilations]\n return tf.nn.depthwise_conv2d(x, filters, strides, padding, data_format, dilations)\n\n\n# noinspection PyDefaultArgument\ndef conv3d(x, filters, strides, padding, data_format=\"NDHWC\", dilations=1):\n strides = [1] * 2 + ([strides] * 3 if isinstance(strides, int) else strides)\n dilations = [1] * 2 + ([dilations] * 3 if isinstance(dilations, int) else dilations)\n return tf.nn.conv3d(x, filters, strides, padding, data_format, dilations)\n\n\nconv3d_transpose = lambda x, filters, strides, padding, output_shape=None, data_format=\"NDHWC\", dilations=1: tf.nn.conv3d_transpose(\n x, filters, output_shape, strides, padding, data_format, dilations\n)\n"
]
| [
[
"tensorflow.nn.conv3d",
"tensorflow.nn.conv2d",
"tensorflow.expand_dims",
"tensorflow.nn.conv1d",
"tensorflow.nn.conv3d_transpose",
"tensorflow.transpose",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.nn.conv2d_transpose",
"tensorflow.nn.conv1d_transpose"
]
]
|
paskett/advanceddeeplearning | [
"a11b1327b640183159f29cf08dbf6341b65cf290"
]
| [
"INN_VAE/INN_autoencoder_example/functionalities/gpu.py"
]
| [
"import torch\n\ndef get_device(dev_idx=0):\n \"\"\"\n Check if GPU is available and select GPU base on provided device index.\n\n :param dev_idx: index of the GPU device to use (if your PC has multiple GPU's). Default: 0\n :return: device variable used for further training on the GPU\n \"\"\"\n\n if torch.cuda.is_available():\n torch.cuda.set_device(dev_idx)\n device = 'cuda'\n else:\n device = 'cpu'\n\n return device\n"
]
| [
[
"torch.cuda.set_device",
"torch.cuda.is_available"
]
]
|
flover4/oci-designer-toolkit | [
"37b93648264022c4913ad8728f940b4d966d0bbd"
]
| [
"visualiser/model/bom.py"
]
| [
"\n# Copyright (c) 2020, 2021, Oracle and/or its affiliates.\n# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.\n\n\"\"\"Provide Module Description\n\"\"\"\n\nimport requests\nimport pandas as pd\n\n\npd.options.mode.chained_assignment = None # default='warn'\n\nocipartnumber_url = \"https://guk9elytviiyjhz-devadw.adb.uk-london-1.oraclecloudapps.com/ords/ociprice/okit/partnumber\"\n#ociprice_url = \"https://guk9elytviiyjhz-devadw.adb.uk-london-1.oraclecloudapps.com/ords/ociprice/okit/bom/\"\nskus = []\ndisplayname = []\nmetricdisplayname = []\nlistprice = []\nqty = []\nunit_used = []\nprice = []\n\n# calculate pricing for bom\n\n\ndef finalize_bom(df):\n df = df[df.qty != 0]\n df.price_per_month = df.listprice * df.qty * df.unit_used\n print(df)\n #install openpyxl\n #pip install openpyxl\n #change the export path in local directory in okit docker\n #export_path = \"C:\\\\Users\\\\janhan\\\\Downloads\\\\exported_bom_\" + job_id + \".xlsx\"\n #export_path = \"C:\\\\Users\\\\janhan\\\\Downloads\\\\exported_bom_test.xlsx\"\n #df.to_excel(export_path, index=False, header=True)\n return df\n\n# update bom\n\n\ndef update_bom(df, sku, qty, unit_used):\n df.loc[df['skus'] == sku, ['qty']] += qty\n df.loc[df['skus'] == sku, ['unit_used']] = unit_used\n # print(df)\n\n\n# create empty bom format\ndef create_bom():\n res = requests.get(ocipartnumber_url+\"?offset=0&limit=500\")\n partnumbers = res.json()\n\n for partnumber in partnumbers['items']:\n skus.append(partnumber.get(\"partnumber\"))\n # remove duplicate sku in front of display name\n displayname.append(partnumber.get(\"displayname\")[8::])\n metricdisplayname.append(partnumber.get(\"metricdisplayname\"))\n listprice.append(partnumber.get(\"pay_as_you_go\"))\n #listprice.append(partnumber.get(\"monthly_commit\"))\n qty.append(0)\n unit_used.append(0)\n price.append(0)\n bom_format = {\n 'skus': skus,\n 'displayname': displayname,\n 'metricdisplayname': metricdisplayname,\n 'listprice': listprice,\n 'qty': qty,\n 'unit_used': unit_used,\n 'price_per_month': price\n }\n df = pd.DataFrame(bom_format, columns=[\n 'skus', 'displayname', 'metricdisplayname', 'listprice', 'qty', 'unit_used', 'price_per_month'])\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', 10)\n\n #df.loc[df['skus'] == 'B88517', ['qty']] += 2\n #df.loc[df['skus'] == 'B88517', ['unit_used']] += 10\n #print(df.loc[df['skus'] == 'B88517'])\n\n return df\n\n"
]
| [
[
"pandas.DataFrame",
"pandas.set_option"
]
]
|
trh0ly/Derivate | [
"d7aa46d95e4a0999b7d81c35eb987f9bab8f3419"
]
| [
"part 2/sn_random_numbers.py"
]
| [
"#\r\n# DX Package\r\n#\r\n# Frame -- Random Number Generation\r\n#\r\n# sn_random_numbers.py\r\n#\r\n# Python for Finance, 2nd ed.\r\n# (c) Dr. Yves J. Hilpisch\r\n#\r\nimport numpy as np\r\n\r\n\r\ndef sn_random_numbers(shape, antithetic=True, moment_matching=True,\r\n fixed_seed=False):\r\n ''' Returns an ndarray object of shape shape with (pseudo)random numbers\r\n that are standard normally distributed.\r\n Parameters\r\n ==========\r\n shape: tuple (o, n, m)\r\n generation of array with shape (o, n, m)\r\n antithetic: Boolean\r\n generation of antithetic variates\r\n moment_matching: Boolean\r\n matching of first and second moments\r\n fixed_seed: Boolean\r\n flag to fix the seed\r\n Results\r\n =======\r\n ran: (o, n, m) array of (pseudo)random numbers\r\n '''\r\n if fixed_seed:\r\n np.random.seed(1000)\r\n if antithetic:\r\n ran = np.random.standard_normal(\r\n (shape[0], shape[1], shape[2] // 2))\r\n ran = np.concatenate((ran, -ran), axis=2)\r\n else:\r\n ran = np.random.standard_normal(shape)\r\n if moment_matching:\r\n ran = ran - np.mean(ran)\r\n ran = ran / np.std(ran)\r\n if shape[0] == 1:\r\n return ran[0]\r\n else:\r\n return ran"
]
| [
[
"numpy.concatenate",
"numpy.random.standard_normal",
"numpy.random.seed",
"numpy.mean",
"numpy.std"
]
]
|
BookOps-CAT/babel | [
"47c8102bfbad8466185cd0e70501a931dd79ef29"
]
| [
"babel/data/transactions_carts.py"
]
| [
"# datastore transactions of CartsView\n\nfrom datetime import datetime, date\nimport logging\nimport os\nimport sys\n\nfrom pandas import read_sql\n# from sqlalchemy.sql.expression import between\n\n\nfrom data.datastore import (session_scope, Audn, Branch, Cart, Fund, Order,\n Lang, Library, MatType, Resource,\n ShelfCode, User, Vendor)\nfrom data.datastore_worker import (count_records, get_cart_data_view_records,\n insert,\n retrieve_record, retrieve_records,\n retrieve_cart_details_view_stmn,\n update_record, retrieve_first_record,\n retrieve_last_record_filtered)\nfrom errors import BabelError\nfrom logging_settings import format_traceback\nfrom gui.utils import get_id_from_index\nfrom ingest.sierra_exports import get_sierra_ids\nfrom marc.marc21 import make_bib\n\n\nmlogger = logging.getLogger('babel')\n\n\ndef get_carts_data(\n system_id, user='All users', status=''):\n data = []\n\n try:\n with session_scope() as session:\n recs = get_cart_data_view_records(\n session,\n system_id, user, status)\n for r in recs:\n data.append([\n r.cart_id,\n r.cart_name,\n f'{r.cart_date:%y-%m-%d %H:%M}',\n r.cart_status,\n r.cart_owner,\n r.linked])\n return data\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\n 'Unhandled error on cart data retrieval.'\n f'Traceback: {tb}')\n raise BabelError(exc)\n\n\ndef export_orders_to_marc_file(fh, saving_status, cart_rec, progbar):\n # this has to be rewritten to make it more transparent\n # and easier to maintain\n\n try:\n progbar['value'] = 0\n\n # overwrite existing files\n if os.path.isfile(fh):\n try:\n os.remove(fh)\n except WindowsError as e:\n raise BabelError(\n f'File in use. Error: {e}')\n\n with session_scope() as session:\n rec_count = count_records(session, Order, cart_id=cart_rec.did)\n progbar['maximum'] = rec_count\n\n selector = retrieve_record(\n session, User, did=cart_rec.user_id)\n blanketPO = cart_rec.blanketPO\n # determine some global values\n if cart_rec.system_id == 1:\n oclc_code = 'BKL'\n selector_code = selector.bpl_code\n\n elif cart_rec.system_id == 2:\n oclc_code = 'NYP'\n selector_code = selector.nyp_code\n\n lib_rec = retrieve_record(\n session, Library, did=cart_rec.library_id)\n library_code = lib_rec.code\n\n ord_recs = retrieve_records(session, Order, cart_id=cart_rec.did)\n\n for order in ord_recs:\n mat_rec = retrieve_record(\n session, MatType, did=order.matType_id)\n ven_rec = retrieve_record(session, Vendor, did=order.vendor_id)\n\n if cart_rec.system_id == 1:\n order.mat_bib = mat_rec.bpl_bib_code\n order.mat_ord = mat_rec.bpl_ord_code\n order.vendor = ven_rec.bpl_code\n elif cart_rec.system_id == 2:\n order.mat_bib = mat_rec.nyp_bib_code\n order.mat_ord = mat_rec.nyp_ord_code\n order.vendor = ven_rec.nyp_code\n\n # retrieve joined values\n rec = retrieve_record(session, Audn, did=order.audn_id)\n order.audn = rec.code\n rec = retrieve_record(session, Lang, did=order.lang_id)\n order.lang = rec.code\n\n copies = 0\n locs = []\n funds = []\n for loc in order.locations:\n rec = retrieve_record(session, Branch, did=loc.branch_id)\n branch = rec.code\n try:\n rec = retrieve_record(\n session, ShelfCode, did=loc.shelfcode_id)\n shelfcode = rec.code\n shelf_with_audn = rec.includes_audn\n except AttributeError:\n shelfcode = ''\n shelf_with_audn = False\n try:\n rec = retrieve_record(session, Fund, did=loc.fund_id)\n fund = rec.code\n except AttributeError:\n fund = ''\n copies += loc.qty\n\n if shelf_with_audn:\n loc_str = f'{branch}{order.audn}{shelfcode}/{loc.qty}'\n else:\n if shelfcode is None:\n loc_str = f'{branch}/{loc.qty}'\n else:\n loc_str = f'{branch}{shelfcode}/{loc.qty}'\n locs.append(loc_str)\n\n fund_str = f'{fund}/{loc.qty}'\n funds.append(fund_str)\n\n order.copies = str(copies)\n order.locs = ','.join(locs)\n order.funds = ','.join(funds)\n order.order_date = datetime.strftime(date.today(), '%m-%d-%Y')\n\n make_bib(\n fh, oclc_code, library_code, blanketPO,\n selector_code, order)\n progbar['value'] += 1\n progbar.update()\n\n saving_status.set('Data saved to MARC file successfully.')\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\n 'Unhandled error on saving to MARC.'\n f'Traceback: {tb}')\n raise BabelError(exc)\n\n\ndef get_cart_details_as_dataframe(cart_id):\n with session_scope() as session:\n stmn = retrieve_cart_details_view_stmn(cart_id)\n df = read_sql(stmn, session.bind)\n return df\n\n\ndef get_cart_data_for_order_sheet(cart_id):\n try:\n data_set = []\n with session_scope() as session:\n cart_rec = retrieve_record(session, Cart, did=cart_id)\n order_recs = retrieve_records(session, Order, cart_id=cart_id)\n for rec in order_recs:\n data = []\n data.append(rec.resource.other_no)\n data.append(rec.resource.isbn)\n data.append(rec.resource.title)\n data.append(rec.resource.author)\n total_cost = 0\n total_qty = 0\n for loc in rec.locations:\n total_cost += loc.qty * rec.resource.price_disc\n total_qty += loc.qty\n data.append(f'{rec.resource.price_disc:.2f}')\n data.append(total_qty)\n data.append(total_cost)\n data.append(rec.oid)\n data.append(cart_rec.blanketPO)\n data_set.append(data)\n session.expunge_all()\n\n return data_set\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\n 'Unhandled error cart data retrieval.'\n f'Traceback: {tb}')\n raise BabelError(exc)\n\n\ndef create_cart_copy(cart_id, system, user, profile_idx, cart_name, status):\n \"\"\"\n Creates a copy of a cart\n args:\n cart_id: int, datastore cart did\n system: str, NYPL or BPL\n user: str, profile/user name\n profile_idx: dict, dictionary of user_id (key) and names\n cart_name: str, new cart name\n status: tkinter StringVar\n \"\"\"\n valid = True\n if not cart_id:\n valid = False\n status.set('Invalid cart id')\n elif not system:\n valid = False\n status.set('Failed. Missing system parameter.')\n elif not user:\n valid = False\n status.set('Failed. Missing profile prameter.')\n elif not cart_name:\n valid = False\n status.set('Failed. Missing new cart name.')\n\n try:\n with session_scope() as session:\n if cart_id and system and user and cart_name:\n # verify name/user not used:\n if system == 'BPL':\n system_id = 1\n elif system == 'NYPL':\n system_id = 2\n\n rec = retrieve_record(\n session, Cart,\n system_id=system_id,\n user_id=get_id_from_index(\n user, profile_idx),\n name=cart_name)\n if rec:\n valid = False\n status.set(\n 'Failed. A cart with the same name'\n 'already exists.\\nPlease change the name.')\n if valid:\n # create copy of the original cart\n old_orders = retrieve_records(\n session,\n Order,\n cart_id=cart_id)\n\n new_orders = []\n for order in old_orders:\n\n resource = Resource(\n title=order.resource.title,\n add_title=order.resource.add_title,\n author=order.resource.author,\n series=order.resource.series,\n publisher=order.resource.publisher,\n pub_place=order.resource.pub_place,\n summary=order.resource.summary,\n isbn=order.resource.isbn,\n upc=order.resource.upc,\n other_no=order.resource.other_no,\n price_list=order.resource.price_list,\n price_disc=order.resource.price_disc,\n desc_url=order.resource.desc_url,\n misc=order.resource.misc)\n\n new_orders.append(\n Order(\n lang_id=order.lang_id,\n audn_id=order.audn_id,\n vendor_id=order.vendor_id,\n matType_id=order.matType_id,\n poPerLine=order.poPerLine,\n note=order.note,\n comment=order.comment,\n resource=resource))\n\n insert(\n session,\n Cart,\n name=cart_name,\n user_id=get_id_from_index(\n user, profile_idx),\n system_id=system_id,\n orders=new_orders)\n\n status.set('Cart copied successfully.')\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\n 'Unhandled error on cart copy.'\n f'Traceback: {tb}')\n raise BabelError(exc)\n\n\ndef determine_carts_linked(session, cart_ids):\n \"\"\"\n Determines if all orders in relevant cart have corresponding\n Sierra order and bib number and updates cart linked status\n args:\n cart_ids: list of cart_ids that had their orders updated with oid\n \"\"\"\n\n mlogger.debug('Updating carts linked status.')\n\n # determine time period when wlos were assigned\n for cart_id in cart_ids:\n cart_rec = retrieve_record(session, Cart, did=cart_id)\n # check if all orders have oid\n if cart_rec:\n linked = True\n for o in cart_rec.orders:\n if o.oid is None:\n mlogger.debug(\n f'Order did={o.did} missing oid.')\n linked = False\n\n if linked:\n mlogger.debug(\n f'Cart {cart_rec.name} (did={cart_rec.did}) linked.')\n update_record(session, Cart, cart_rec.did, linked=True)\n else:\n mlogger.debug(\n f'Cart {cart_rec.name} (did={cart_rec.did}) not linked.')\n else:\n mlogger.debug(\n f'Cart with did={cart_id} not linked (missing record).')\n\n\ndef add_sierra_ids_to_orders(source_fh, system_id):\n mlogger.debug(f'Linking IDs initiated system_id-{system_id}.')\n sids = get_sierra_ids(source_fh, system_id)\n try:\n unique_carts = set()\n with session_scope() as session:\n for sid in sids:\n wlo, oid, bid = sid\n ord_rec = retrieve_record(session, Order, wlo=wlo)\n if ord_rec:\n # record affected cart_id\n unique_carts.add(ord_rec.cart_id)\n # update ord rec\n update_record(\n session, Order, ord_rec.did, oid=oid, bid=bid)\n mlogger.debug(\n f'Record updated: order_id={ord_rec.did}, '\n f'wlo={wlo}, oid={oid}, bid={bid}')\n\n session.flush()\n # check which carts are linked\n determine_carts_linked(session, unique_carts)\n\n mlogger.debug('Linking completed.')\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\n 'Unhandled error on linking IDs.'\n f'Traceback: {tb}')\n raise BabelError(exc)\n\n\ndef get_cart_id_ranges(cart_id):\n with session_scope() as session:\n first_ord = retrieve_first_record(session, Order, cart_id=cart_id)\n last_ord = retrieve_last_record_filtered(\n session, Order, cart_id=cart_id)\n\n return ((first_ord.wlo, last_ord.wlo), (first_ord.oid, last_ord.oid))\n"
]
| [
[
"pandas.read_sql"
]
]
|
Bodo-inc/numba | [
"4469f04e0f4916d2541e15b38d71d10268fca73f"
]
| [
"numba/tests/test_withlifting.py"
]
| [
"import copy\nimport warnings\nimport numpy as np\n\nimport numba\nfrom numba.core.transforms import find_setupwiths, with_lifting\nfrom numba.core.withcontexts import bypass_context, call_context, objmode_context\nfrom numba.core.bytecode import FunctionIdentity, ByteCode\nfrom numba.core.interpreter import Interpreter\nfrom numba.core import typing, errors, cpu\nfrom numba.core.registry import cpu_target\nfrom numba.core.compiler import compile_ir, DEFAULT_FLAGS\nfrom numba import njit, typeof, objmode, types\nfrom numba.core.extending import overload\nfrom numba.tests.support import (MemoryLeak, TestCase, captured_stdout,\n skip_unless_scipy)\nfrom numba.core.utils import PYVERSION\nfrom numba.experimental import jitclass\nimport unittest\n\n\ndef get_func_ir(func):\n func_id = FunctionIdentity.from_function(func)\n bc = ByteCode(func_id=func_id)\n interp = Interpreter(func_id)\n func_ir = interp.interpret(bc)\n return func_ir\n\n\ndef lift1():\n print(\"A\")\n with bypass_context:\n print(\"B\")\n b()\n print(\"C\")\n\n\ndef lift2():\n x = 1\n print(\"A\", x)\n x = 1\n with bypass_context:\n print(\"B\", x)\n x += 100\n b()\n x += 1\n with bypass_context:\n print(\"C\", x)\n b()\n x += 10\n x += 1\n print(\"D\", x)\n\n\ndef lift3():\n x = 1\n y = 100\n print(\"A\", x, y)\n with bypass_context:\n print(\"B\")\n b()\n x += 100\n with bypass_context:\n print(\"C\")\n y += 100000\n b()\n x += 1\n y += 1\n print(\"D\", x, y)\n\n\ndef lift4():\n x = 0\n print(\"A\", x)\n x += 10\n with bypass_context:\n print(\"B\")\n b()\n x += 1\n for i in range(10):\n with bypass_context:\n print(\"C\")\n b()\n x += i\n with bypass_context:\n print(\"D\")\n b()\n if x:\n x *= 10\n x += 1\n print(\"E\", x)\n\n\ndef lift5():\n print(\"A\")\n\n\ndef liftcall1():\n x = 1\n print(\"A\", x)\n with call_context:\n x += 1\n print(\"B\", x)\n return x\n\n\ndef liftcall2():\n x = 1\n print(\"A\", x)\n with call_context:\n x += 1\n print(\"B\", x)\n with call_context:\n x += 10\n print(\"C\", x)\n return x\n\n\ndef liftcall3():\n x = 1\n print(\"A\", x)\n with call_context:\n if x > 0:\n x += 1\n print(\"B\", x)\n with call_context:\n for i in range(10):\n x += i\n print(\"C\", x)\n return x\n\n\ndef liftcall4():\n with call_context:\n with call_context:\n pass\n\n\ndef liftcall5():\n for i in range(10):\n with call_context:\n if i == 5:\n print(\"A\")\n break\n return 10\n\n\ndef lift_undefiend():\n with undefined_global_var:\n pass\n\n\nbogus_contextmanager = object()\n\n\ndef lift_invalid():\n with bogus_contextmanager:\n pass\n\n\ngv_type = types.intp\n\n\nclass TestWithFinding(TestCase):\n def check_num_of_with(self, func, expect_count):\n the_ir = get_func_ir(func)\n ct = len(find_setupwiths(the_ir.blocks))\n self.assertEqual(ct, expect_count)\n\n def test_lift1(self):\n self.check_num_of_with(lift1, expect_count=1)\n\n def test_lift2(self):\n self.check_num_of_with(lift2, expect_count=2)\n\n def test_lift3(self):\n self.check_num_of_with(lift3, expect_count=1)\n\n def test_lift4(self):\n self.check_num_of_with(lift4, expect_count=2)\n\n def test_lift5(self):\n self.check_num_of_with(lift5, expect_count=0)\n\n\nclass BaseTestWithLifting(TestCase):\n def setUp(self):\n super(BaseTestWithLifting, self).setUp()\n self.typingctx = typing.Context()\n self.targetctx = cpu.CPUContext(self.typingctx)\n self.flags = DEFAULT_FLAGS\n\n def check_extracted_with(self, func, expect_count, expected_stdout):\n the_ir = get_func_ir(func)\n new_ir, extracted = with_lifting(\n the_ir, self.typingctx, self.targetctx, self.flags,\n locals={},\n )\n self.assertEqual(len(extracted), expect_count)\n cres = self.compile_ir(new_ir)\n\n with captured_stdout() as out:\n cres.entry_point()\n\n self.assertEqual(out.getvalue(), expected_stdout)\n\n def compile_ir(self, the_ir, args=(), return_type=None):\n typingctx = self.typingctx\n targetctx = self.targetctx\n flags = self.flags\n # Register the contexts in case for nested @jit or @overload calls\n with cpu_target.nested_context(typingctx, targetctx):\n return compile_ir(typingctx, targetctx, the_ir, args,\n return_type, flags, locals={})\n\n\nclass TestLiftByPass(BaseTestWithLifting):\n\n def test_lift1(self):\n self.check_extracted_with(lift1, expect_count=1,\n expected_stdout=\"A\\nC\\n\")\n\n def test_lift2(self):\n self.check_extracted_with(lift2, expect_count=2,\n expected_stdout=\"A 1\\nD 3\\n\")\n\n def test_lift3(self):\n self.check_extracted_with(lift3, expect_count=1,\n expected_stdout=\"A 1 100\\nD 2 101\\n\")\n\n def test_lift4(self):\n self.check_extracted_with(lift4, expect_count=2,\n expected_stdout=\"A 0\\nE 11\\n\")\n\n def test_lift5(self):\n self.check_extracted_with(lift5, expect_count=0,\n expected_stdout=\"A\\n\")\n\n\nclass TestLiftCall(BaseTestWithLifting):\n\n def check_same_semantic(self, func):\n \"\"\"Ensure same semantic with non-jitted code\n \"\"\"\n jitted = njit(func)\n with captured_stdout() as got:\n jitted()\n\n with captured_stdout() as expect:\n func()\n\n self.assertEqual(got.getvalue(), expect.getvalue())\n\n def test_liftcall1(self):\n self.check_extracted_with(liftcall1, expect_count=1,\n expected_stdout=\"A 1\\nB 2\\n\")\n self.check_same_semantic(liftcall1)\n\n def test_liftcall2(self):\n self.check_extracted_with(liftcall2, expect_count=2,\n expected_stdout=\"A 1\\nB 2\\nC 12\\n\")\n self.check_same_semantic(liftcall2)\n\n def test_liftcall3(self):\n self.check_extracted_with(liftcall3, expect_count=2,\n expected_stdout=\"A 1\\nB 2\\nC 47\\n\")\n self.check_same_semantic(liftcall3)\n\n def test_liftcall4(self):\n accept = (errors.TypingError, errors.NumbaRuntimeError,\n errors.NumbaValueError, errors.CompilerError)\n with self.assertRaises(accept) as raises:\n njit(liftcall4)()\n # Known error. We only support one context manager per function\n # for body that are lifted.\n msg = (\"compiler re-entrant to the same function signature\")\n self.assertIn(msg, str(raises.exception))\n\n # 3.7 fails to interpret the bytecode for this example\n @unittest.skipIf(PYVERSION <= (3, 8),\n \"unsupported on py3.8 and before\")\n def test_liftcall5(self):\n with self.assertRaises(errors.CompilerError) as raises:\n njit(liftcall5)()\n # Make sure we can detect a break-within-with and have a reasonable\n # error.\n msg = (\"unsupported control flow: with-context contains branches\")\n self.assertIn(msg, str(raises.exception))\n\n\ndef expected_failure_for_list_arg(fn):\n def core(self, *args, **kwargs):\n with self.assertRaises(errors.TypingError) as raises:\n fn(self, *args, **kwargs)\n self.assertIn('Does not support list type',\n str(raises.exception))\n return core\n\n\ndef expected_failure_for_function_arg(fn):\n def core(self, *args, **kwargs):\n with self.assertRaises(errors.TypingError) as raises:\n fn(self, *args, **kwargs)\n self.assertIn('Does not support function type',\n str(raises.exception))\n return core\n\n\nclass TestLiftObj(MemoryLeak, TestCase):\n\n def setUp(self):\n warnings.simplefilter(\"error\", errors.NumbaWarning)\n\n def tearDown(self):\n warnings.resetwarnings()\n\n def assert_equal_return_and_stdout(self, pyfunc, *args):\n py_args = copy.deepcopy(args)\n c_args = copy.deepcopy(args)\n cfunc = njit(pyfunc)\n\n with captured_stdout() as stream:\n expect_res = pyfunc(*py_args)\n expect_out = stream.getvalue()\n\n # avoid compiling during stdout-capturing for easier print-debugging\n cfunc.compile(tuple(map(typeof, c_args)))\n with captured_stdout() as stream:\n got_res = cfunc(*c_args)\n got_out = stream.getvalue()\n\n self.assertEqual(expect_out, got_out)\n self.assertPreciseEqual(expect_res, got_res)\n\n def test_lift_objmode_basic(self):\n def bar(ival):\n print(\"ival =\", {'ival': ival // 2})\n\n def foo(ival):\n ival += 1\n with objmode_context:\n bar(ival)\n return ival + 1\n\n def foo_nonglobal(ival):\n ival += 1\n with numba.objmode:\n bar(ival)\n return ival + 1\n\n self.assert_equal_return_and_stdout(foo, 123)\n self.assert_equal_return_and_stdout(foo_nonglobal, 123)\n\n def test_lift_objmode_array_in(self):\n def bar(arr):\n print({'arr': arr // 2})\n # arr is modified. the effect is visible outside.\n arr *= 2\n\n def foo(nelem):\n arr = np.arange(nelem).astype(np.int64)\n with objmode_context:\n # arr is modified inplace inside bar()\n bar(arr)\n return arr + 1\n\n nelem = 10\n self.assert_equal_return_and_stdout(foo, nelem)\n\n def test_lift_objmode_define_new_unused(self):\n def bar(y):\n print(y)\n\n def foo(x):\n with objmode_context():\n y = 2 + x # defined but unused outside\n a = np.arange(y) # defined but unused outside\n bar(a)\n return x\n\n arg = 123\n self.assert_equal_return_and_stdout(foo, arg)\n\n def test_lift_objmode_return_simple(self):\n def inverse(x):\n print(x)\n return 1 / x\n\n def foo(x):\n with objmode_context(y=\"float64\"):\n y = inverse(x)\n return x, y\n\n def foo_nonglobal(x):\n with numba.objmode(y=\"float64\"):\n y = inverse(x)\n return x, y\n\n arg = 123\n self.assert_equal_return_and_stdout(foo, arg)\n self.assert_equal_return_and_stdout(foo_nonglobal, arg)\n\n def test_lift_objmode_return_array(self):\n def inverse(x):\n print(x)\n return 1 / x\n\n def foo(x):\n with objmode_context(y=\"float64[:]\", z=\"int64\"):\n y = inverse(x)\n z = int(y[0])\n return x, y, z\n\n arg = np.arange(1, 10, dtype=np.float64)\n self.assert_equal_return_and_stdout(foo, arg)\n\n @expected_failure_for_list_arg\n def test_lift_objmode_using_list(self):\n def foo(x):\n with objmode_context(y=\"float64[:]\"):\n print(x)\n x[0] = 4\n print(x)\n y = [1, 2, 3] + x\n y = np.asarray([1 / i for i in y])\n return x, y\n\n arg = [1, 2, 3]\n self.assert_equal_return_and_stdout(foo, arg)\n\n def test_lift_objmode_var_redef(self):\n def foo(x):\n for x in range(x):\n pass\n if x:\n x += 1\n with objmode_context(x=\"intp\"):\n print(x)\n x -= 1\n print(x)\n for i in range(x):\n x += i\n print(x)\n return x\n\n arg = 123\n self.assert_equal_return_and_stdout(foo, arg)\n\n @expected_failure_for_list_arg\n def test_case01_mutate_list_ahead_of_ctx(self):\n def foo(x, z):\n x[2] = z\n\n with objmode_context():\n # should print [1, 2, 15] but prints [1, 2, 3]\n print(x)\n\n with objmode_context():\n x[2] = 2 * z\n # should print [1, 2, 30] but prints [1, 2, 15]\n print(x)\n\n return x\n\n self.assert_equal_return_and_stdout(foo, [1, 2, 3], 15)\n\n def test_case02_mutate_array_ahead_of_ctx(self):\n def foo(x, z):\n x[2] = z\n\n with objmode_context():\n # should print [1, 2, 15]\n print(x)\n\n with objmode_context():\n x[2] = 2 * z\n # should print [1, 2, 30]\n print(x)\n\n return x\n\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x, 15)\n\n @expected_failure_for_list_arg\n def test_case03_create_and_mutate(self):\n def foo(x):\n with objmode_context(y='List(int64)'):\n y = [1, 2, 3]\n with objmode_context():\n y[2] = 10\n return y\n self.assert_equal_return_and_stdout(foo, 1)\n\n def test_case04_bogus_variable_type_info(self):\n\n def foo(x):\n # should specifying nonsense type info be considered valid?\n with objmode_context(k=\"float64[:]\"):\n print(x)\n return x\n\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(errors.TypingError) as raises:\n cfoo(x)\n self.assertIn(\n \"Invalid type annotation on non-outgoing variables\",\n str(raises.exception),\n )\n\n def test_case05_bogus_type_info(self):\n def foo(x):\n # should specifying the wrong type info be considered valid?\n # z is complex.\n # Note: for now, we will coerce for scalar and raise for array\n with objmode_context(z=\"float64[:]\"):\n z = x + 1.j\n return z\n\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(TypeError) as raises:\n got = cfoo(x)\n self.assertIn(\n (\"can't unbox array from PyObject into native value.\"\n \" The object maybe of a different type\"),\n str(raises.exception),\n )\n\n def test_case06_double_objmode(self):\n def foo(x):\n # would nested ctx in the same scope ever make sense? Is this\n # pattern useful?\n with objmode_context():\n #with npmmode_context(): not implemented yet\n with objmode_context():\n print(x)\n return x\n\n with self.assertRaises(errors.TypingError) as raises:\n njit(foo)(123)\n # Check that an error occurred in with-lifting in objmode\n pat = (\"During: resolving callee type: \"\n \"type\\(ObjModeLiftedWith\\(<.*>\\)\\)\")\n self.assertRegex(str(raises.exception), pat)\n\n def test_case07_mystery_key_error(self):\n # this raises a key error\n def foo(x):\n with objmode_context():\n t = {'a': x}\n u = 3\n return x, t, u\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n\n with self.assertRaises(errors.TypingError) as raises:\n cfoo(x)\n\n exstr = str(raises.exception)\n self.assertIn(\"Missing type annotation on outgoing variable(s): \"\n \"['t', 'u']\",\n exstr)\n self.assertIn(\"Example code: with objmode\"\n \"(t='<add_type_as_string_here>')\",\n exstr)\n\n def test_case08_raise_from_external(self):\n # this segfaults, expect its because the dict needs to raise as '2' is\n # not in the keys until a later loop (looking for `d['0']` works fine).\n d = dict()\n\n def foo(x):\n for i in range(len(x)):\n with objmode_context():\n k = str(i)\n v = x[i]\n d[k] = v\n print(d['2'])\n return x\n\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(KeyError) as raises:\n cfoo(x)\n self.assertEqual(str(raises.exception), \"'2'\")\n\n def test_case09_explicit_raise(self):\n def foo(x):\n with objmode_context():\n raise ValueError()\n return x\n\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(errors.CompilerError) as raises:\n cfoo(x)\n self.assertIn(\n ('unsupported control flow due to raise statements inside '\n 'with block'),\n str(raises.exception),\n )\n\n @expected_failure_for_list_arg\n def test_case10_mutate_across_contexts(self):\n # This shouldn't work due to using List as input.\n def foo(x):\n with objmode_context(y='List(int64)'):\n y = [1, 2, 3]\n with objmode_context():\n y[2] = 10\n return y\n\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n def test_case10_mutate_array_across_contexts(self):\n # Sub-case of case-10.\n def foo(x):\n with objmode_context(y='int64[:]'):\n y = np.asarray([1, 2, 3], dtype='int64')\n with objmode_context():\n # Note: `y` is not an output.\n y[2] = 10\n return y\n\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n def test_case11_define_function_in_context(self):\n # should this work? no, global name 'bar' is not defined\n def foo(x):\n with objmode_context():\n def bar(y):\n return y + 1\n return x\n\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(NameError) as raises:\n cfoo(x)\n self.assertIn(\n \"global name 'bar' is not defined\",\n str(raises.exception),\n )\n\n def test_case12_njit_inside_a_objmode_ctx(self):\n # TODO: is this still the cases?\n # this works locally but not inside this test, probably due to the way\n # compilation is being done\n def bar(y):\n return y + 1\n\n def foo(x):\n with objmode_context(y='int64[:]'):\n y = njit(bar)(x).astype('int64')\n return x + y\n\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n def test_case13_branch_to_objmode_ctx(self):\n # Checks for warning in dataflow.py due to mishandled stack offset\n # dataflow.py:57: RuntimeWarning: inconsistent stack offset ...\n def foo(x, wobj):\n if wobj:\n with objmode_context(y='int64[:]'):\n y = (x + 1).astype('int64')\n else:\n y = x + 2\n\n return x + y\n\n x = np.array([1, 2, 3], dtype='int64')\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\", RuntimeWarning)\n self.assert_equal_return_and_stdout(foo, x, True)\n # Assert no warnings from dataflow.py\n for each in w:\n self.assertFalse(each.filename.endswith('dataflow.py'),\n msg='there were warnings in dataflow.py')\n\n def test_case14_return_direct_from_objmode_ctx(self):\n # fails with:\n # AssertionError: Failed in nopython mode pipeline (step: Handle with contexts)\n # ending offset is not a label\n def foo(x):\n with objmode_context(x='int64[:]'):\n return x\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(errors.CompilerError) as raises:\n cfoo(x)\n self.assertIn(\n ('unsupported control flow due to return statements inside '\n 'with block'),\n str(raises.exception),\n )\n\n # No easy way to handle this yet.\n @unittest.expectedFailure\n def test_case15_close_over_objmode_ctx(self):\n # Fails with Unsupported constraint encountered: enter_with $phi8.1\n def foo(x):\n j = 10\n\n def bar(x):\n with objmode_context(x='int64[:]'):\n print(x)\n return x + j\n return bar(x) + 2\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n @skip_unless_scipy\n def test_case16_scipy_call_in_objmode_ctx(self):\n from scipy import sparse as sp\n\n def foo(x):\n with objmode_context(k='int64'):\n print(x)\n spx = sp.csr_matrix(x)\n # the np.int64 call is pointless, works around:\n # https://github.com/scipy/scipy/issues/10206\n # which hit the SciPy 1.3 release.\n k = np.int64(spx[0, 0])\n return k\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n def test_case17_print_own_bytecode(self):\n import dis\n\n def foo(x):\n with objmode_context():\n dis.dis(foo)\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n @expected_failure_for_function_arg\n def test_case18_njitfunc_passed_to_objmode_ctx(self):\n def foo(func, x):\n with objmode_context():\n func(x[0])\n\n x = np.array([1, 2, 3])\n fn = njit(lambda z: z + 5)\n self.assert_equal_return_and_stdout(foo, fn, x)\n\n def test_case19_recursion(self):\n def foo(x):\n with objmode_context():\n if x == 0:\n return 7\n ret = foo(x - 1)\n return ret\n x = np.array([1, 2, 3])\n cfoo = njit(foo)\n with self.assertRaises(errors.CompilerError) as raises:\n cfoo(x)\n msg = \"unsupported control flow due to return statements inside with block\"\n self.assertIn(msg, str(raises.exception))\n\n @unittest.expectedFailure\n def test_case20_rng_works_ok(self):\n def foo(x):\n np.random.seed(0)\n y = np.random.rand()\n with objmode_context(z=\"float64\"):\n # It's known that the random state does not sync\n z = np.random.rand()\n return x + z + y\n\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n def test_case21_rng_seed_works_ok(self):\n def foo(x):\n np.random.seed(0)\n y = np.random.rand()\n with objmode_context(z=\"float64\"):\n # Similar to test_case20_rng_works_ok but call seed\n np.random.seed(0)\n z = np.random.rand()\n return x + z + y\n\n x = np.array([1, 2, 3])\n self.assert_equal_return_and_stdout(foo, x)\n\n def test_example01(self):\n # Example from _ObjModeContextType.__doc__\n def bar(x):\n return np.asarray(list(reversed(x.tolist())))\n\n @njit\n def foo():\n x = np.arange(5)\n with objmode(y='intp[:]'): # annotate return type\n # this region is executed by object-mode.\n y = x + bar(x)\n return y\n\n self.assertPreciseEqual(foo(), foo.py_func())\n self.assertIs(objmode, objmode_context)\n\n def test_objmode_in_overload(self):\n def foo(s):\n pass\n\n @overload(foo)\n def foo_overload(s):\n def impl(s):\n with objmode(out='intp'):\n out = s + 3\n return out\n return impl\n\n @numba.njit\n def f():\n return foo(1)\n\n self.assertEqual(f(), 1 + 3)\n\n def test_objmode_gv_variable(self):\n @njit\n def global_var():\n with objmode(val=gv_type):\n val = 12.3\n return val\n\n ret = global_var()\n # the result is truncated because of the intp return-type\n self.assertIsInstance(ret, int)\n self.assertEqual(ret, 12)\n\n def test_objmode_gv_variable_error(self):\n @njit\n def global_var():\n with objmode(val=gv_type2):\n val = 123\n return val\n\n with self.assertRaisesRegex(\n errors.CompilerError,\n (\"Error handling objmode argument 'val'. \"\n \"Global 'gv_type2' is not defined\\.\")\n ):\n global_var()\n\n def test_objmode_gv_mod_attr(self):\n @njit\n def modattr1():\n with objmode(val=types.intp):\n val = 12.3\n return val\n\n @njit\n def modattr2():\n with objmode(val=numba.types.intp):\n val = 12.3\n return val\n\n for fn in (modattr1, modattr2):\n with self.subTest(fn=str(fn)):\n ret = fn()\n # the result is truncated because of the intp return-type\n self.assertIsInstance(ret, int)\n self.assertEqual(ret, 12)\n\n def test_objmode_gv_mod_attr_error(self):\n @njit\n def moderror():\n with objmode(val=types.THIS_DOES_NOT_EXIST):\n val = 12.3\n return val\n with self.assertRaisesRegex(\n errors.CompilerError,\n (\"Error handling objmode argument 'val'. \"\n \"Getattr cannot be resolved at compile-time\"),\n ):\n moderror()\n\n def test_objmode_gv_mod_attr_error_multiple(self):\n @njit\n def moderror():\n with objmode(v1=types.intp, v2=types.THIS_DOES_NOT_EXIST,\n v3=types.float32):\n v1 = 12.3\n v2 = 12.3\n v3 = 12.3\n return val\n with self.assertRaisesRegex(\n errors.CompilerError,\n (\"Error handling objmode argument 'v2'. \"\n \"Getattr cannot be resolved at compile-time\"),\n ):\n moderror()\n\n def test_objmode_closure_type_in_overload(self):\n def foo():\n pass\n\n @overload(foo)\n def foo_overload():\n shrubbery = types.float64[:]\n def impl():\n with objmode(out=shrubbery):\n out = np.arange(10).astype(np.float64)\n return out\n return impl\n\n @njit\n def bar():\n return foo()\n\n self.assertPreciseEqual(bar(), np.arange(10).astype(np.float64))\n\n def test_objmode_closure_type_in_overload_error(self):\n def foo():\n pass\n\n @overload(foo)\n def foo_overload():\n shrubbery = types.float64[:]\n def impl():\n with objmode(out=shrubbery):\n out = np.arange(10).astype(np.float64)\n return out\n # Remove closure var.\n # Otherwise, it will \"shrubbery\" will be a global\n del shrubbery\n return impl\n\n @njit\n def bar():\n return foo()\n\n with self.assertRaisesRegex(\n errors.TypingError,\n (\"Error handling objmode argument 'out'. \"\n \"Freevar 'shrubbery' is not defined\"),\n ):\n bar()\n\n def test_objmode_invalid_use(self):\n @njit\n def moderror():\n with objmode(bad=1 + 1):\n out = 1\n return val\n with self.assertRaisesRegex(\n errors.CompilerError,\n (\"Error handling objmode argument 'bad'. \"\n \"The value must be a compile-time constant either as \"\n \"a non-local variable or a getattr expression that \"\n \"refers to a Numba type.\"),\n ):\n moderror()\n\n def test_objmode_multi_type_args(self):\n array_ty = types.int32[:]\n @njit\n def foo():\n # t1 is a string\n # t2 is a global type\n # t3 is a non-local/freevar\n with objmode(t1=\"float64\", t2=gv_type, t3=array_ty):\n t1 = 793856.5\n t2 = t1 # to observe truncation\n t3 = np.arange(5).astype(np.int32)\n return t1, t2, t3\n\n t1, t2, t3 = foo()\n self.assertPreciseEqual(t1, 793856.5)\n self.assertPreciseEqual(t2, 793856)\n self.assertPreciseEqual(t3, np.arange(5).astype(np.int32))\n\n def test_objmode_jitclass(self):\n spec = [\n ('value', types.int32), # a simple scalar field\n ('array', types.float32[:]), # an array field\n ]\n\n @jitclass(spec)\n class Bag(object):\n def __init__(self, value):\n self.value = value\n self.array = np.zeros(value, dtype=np.float32)\n\n @property\n def size(self):\n return self.array.size\n\n def increment(self, val):\n for i in range(self.size):\n self.array[i] += val\n return self.array\n\n @staticmethod\n def add(x, y):\n return x + y\n\n n = 21\n mybag = Bag(n)\n\n def foo():\n pass\n\n @overload(foo)\n def foo_overload():\n shrubbery = mybag._numba_type_\n def impl():\n with objmode(out=shrubbery):\n out = Bag(123)\n out.increment(3)\n return out\n return impl\n\n @njit\n def bar():\n return foo()\n\n z = bar()\n self.assertIsInstance(z, Bag)\n self.assertEqual(z.add(2, 3), 2 + 3)\n exp_array = np.zeros(123, dtype=np.float32) + 3\n self.assertPreciseEqual(z.array, exp_array)\n\n\n @staticmethod\n def case_objmode_cache(x):\n with objmode(output='float64'):\n output = x / 10\n return output\n\n def test_objmode_reflected_list(self):\n ret_type = typeof([1, 2, 3, 4, 5])\n @njit\n def test2():\n with objmode(out=ret_type):\n out = [1, 2, 3, 4, 5]\n return out\n\n with self.assertRaises(errors.CompilerError) as raises:\n test2()\n self.assertRegex(\n str(raises.exception),\n (r\"Objmode context failed. \"\n r\"Argument 'out' is declared as an unsupported type: \"\n r\"reflected list\\(int(32|64)\\)<iv=None>. \"\n r\"Reflected types are not supported.\"),\n )\n\n def test_objmode_reflected_set(self):\n ret_type = typeof({1, 2, 3, 4, 5})\n @njit\n def test2():\n with objmode(result=ret_type):\n result = {1, 2, 3, 4, 5}\n return result\n\n with self.assertRaises(errors.CompilerError) as raises:\n test2()\n self.assertRegex(\n str(raises.exception),\n (r\"Objmode context failed. \"\n r\"Argument 'result' is declared as an unsupported type: \"\n r\"reflected set\\(int(32|64)\\). \"\n r\"Reflected types are not supported.\"),\n )\n\n def test_objmode_typed_dict(self):\n ret_type = types.DictType(types.unicode_type, types.int64)\n @njit\n def test4():\n with objmode(res=ret_type):\n res = {'A': 1, 'B': 2}\n return res\n\n with self.assertRaises(TypeError) as raises:\n test4()\n self.assertIn(\n (\"can't unbox a <class 'dict'> \"\n \"as a <class 'numba.typed.typeddict.Dict'>\"),\n str(raises.exception),\n )\n\n def test_objmode_typed_list(self):\n ret_type = types.ListType(types.int64)\n @njit\n def test4():\n with objmode(res=ret_type):\n res = [1, 2]\n return res\n\n with self.assertRaises(TypeError) as raises:\n test4()\n # Note: in python3.6, the Generic[T] on typedlist is causing it to\n # format differently.\n self.assertRegex(\n str(raises.exception),\n (r\"can't unbox a <class 'list'> \"\n r\"as a (<class ')?numba.typed.typedlist.List('>)?\"),\n )\n\n def test_objmode_use_of_view(self):\n # See issue #7158, npm functionality should only be validated if in\n # npm.\n @njit\n def foo(x):\n with numba.objmode(y=\"int64[::1]\"):\n y = x.view(\"int64\")\n return y\n\n a = np.ones(1, np.int64).view('float64')\n expected = foo.py_func(a)\n got = foo(a)\n self.assertPreciseEqual(expected, got)\n\n\ndef case_inner_pyfunc(x):\n return x / 10\n\n\ndef case_objmode_cache(x):\n with objmode(output='float64'):\n output = case_inner_pyfunc(x)\n return output\n\n\nclass TestLiftObjCaching(MemoryLeak, TestCase):\n # Warnings in this test class are converted to errors\n\n def setUp(self):\n warnings.simplefilter(\"error\", errors.NumbaWarning)\n\n def tearDown(self):\n warnings.resetwarnings()\n\n def check(self, py_func):\n first = njit(cache=True)(py_func)\n self.assertEqual(first(123), 12.3)\n\n second = njit(cache=True)(py_func)\n self.assertFalse(second._cache_hits)\n self.assertEqual(second(123), 12.3)\n self.assertTrue(second._cache_hits)\n\n def test_objmode_caching_basic(self):\n def pyfunc(x):\n with objmode(output='float64'):\n output = x / 10\n return output\n\n self.check(pyfunc)\n\n def test_objmode_caching_call_closure_bad(self):\n def other_pyfunc(x):\n return x / 10\n\n def pyfunc(x):\n with objmode(output='float64'):\n output = other_pyfunc(x)\n return output\n\n self.check(pyfunc)\n\n def test_objmode_caching_call_closure_good(self):\n self.check(case_objmode_cache)\n\n\nclass TestBogusContext(BaseTestWithLifting):\n def test_undefined_global(self):\n the_ir = get_func_ir(lift_undefiend)\n\n with self.assertRaises(errors.CompilerError) as raises:\n with_lifting(\n the_ir, self.typingctx, self.targetctx, self.flags, locals={},\n )\n self.assertIn(\n \"Undefined variable used as context manager\",\n str(raises.exception),\n )\n\n def test_invalid(self):\n the_ir = get_func_ir(lift_invalid)\n\n with self.assertRaises(errors.CompilerError) as raises:\n with_lifting(\n the_ir, self.typingctx, self.targetctx, self.flags, locals={},\n )\n self.assertIn(\n \"Unsupported context manager in use\",\n str(raises.exception),\n )\n\n def test_with_as_fails_gracefully(self):\n @njit\n def foo():\n with open('') as f:\n pass\n\n with self.assertRaises(errors.UnsupportedError) as raises:\n foo()\n\n excstr = str(raises.exception)\n msg = (\"The 'with (context manager) as (variable):' construct is not \"\n \"supported.\")\n self.assertIn(msg, excstr)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.array",
"numpy.random.rand",
"numpy.asarray",
"numpy.zeros",
"numpy.random.seed",
"numpy.ones",
"numpy.arange",
"numpy.int64",
"scipy.sparse.csr_matrix"
]
]
|
napsterstiffler/faceoff | [
"af0c92e3803e74bfd5922ac980457728427d2605"
]
| [
"experimental/ObjectPrediction/DeepPredict.py"
]
| [
"import numpy as np\n# import argparse\nimport time\nimport cv2\nfrom imutils.video import VideoStream\n\n\n# load the input image from disk\n# image = cv2.imread('images/doggo.jpeg')\nvs = VideoStream(src=0).start()\n\n# load the class labels from disk\nrows = open('synset_words.txt').read().strip().split(\"\\n\")\nclasses = [r[r.find(\" \") + 1:].split(\",\")[0] for r in rows]\n\n\n\n\nwhile True:\n\timage = vs.read()\n\tblob = cv2.dnn.blobFromImage(image, 1, (224, 224), (104, 117, 123))\n\n\n\tnet = cv2.dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel')\n\n\t# set the blob as input to the network and perform a forward-pass to\n\t# obtain our output classification\n\tnet.setInput(blob)\n\tstart = time.time()\n\tpreds = net.forward()\n\tend = time.time()\n\tprint(\"[INFO] classification took {:.5} seconds\".format(end - start))\n\n\t# sort the indexes of the probabilities in descending order (higher\n\t# probabilitiy first) and grab the top-5 predictions\n\tidxs = np.argsort(preds[0])[::-1][:5]\n\n\t# loop over the top-5 predictions and display them\n\tfor (i, idx) in enumerate(idxs):\n\t\t# draw the top prediction on the input image\n\t\tif i == 0:\n\t\t\ttext = \"Prediction: {}, {:.2f}%\".format(classes[idx], preds[0][idx] * 100)\n\t\t\tcv2.putText(image, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t\t\t0.7, (0, 0, 255), 2)\n\n\t# display the predicted label + associated probability to the\n\t# console\t\n\t\tprint(\"[INFO] {}. label: {}, probability: {:.5}\".format(i + 1, classes[idx], preds[0][idx]))\n\n\t# display the output image\n\tcv2.imshow(\"Image\", image)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\tif key == ord(\"q\"):\n\t\tbreak\n\ncv2.destroyAllWindows()\nvs.stop()"
]
| [
[
"numpy.argsort"
]
]
|
jbhurat/GamestonkTerminal | [
"419c3691db220c467d2979b19ca308b3b800c0bd"
]
| [
"gamestonk_terminal/options/tradier_model.py"
]
| [
"\"\"\"Tradier options model\"\"\"\n__docformat__ = \"numpy\"\n\nimport requests\nimport pandas as pd\n\nfrom gamestonk_terminal import config_terminal as cfg\n\noption_columns = [\n \"symbol\",\n \"bid\",\n \"ask\",\n \"strike\",\n \"bidsize\",\n \"asksize\",\n \"volume\",\n \"open_interest\",\n \"option_type\",\n]\ngreek_columns = [\"delta\", \"gamma\", \"theta\", \"vega\", \"ask_iv\", \"bid_iv\", \"mid_iv\"]\ndf_columns = option_columns + greek_columns\n\ndefault_columns = [\n \"mid_iv\",\n \"vega\",\n \"delta\",\n \"gamma\",\n \"theta\",\n \"volume\",\n \"open_interest\",\n \"bid\",\n \"ask\",\n]\n\n# pylint: disable=no-else-return\n\n\ndef option_expirations(ticker: str):\n \"\"\"Get available expiration dates for given ticker\n\n Parameters\n ----------\n ticker: str\n Ticker to get expirations for\n\n Returns\n -------\n dates: List[str]\n List of of available expirations\n \"\"\"\n r = requests.get(\n \"https://sandbox.tradier.com/v1/markets/options/expirations\",\n params={\"symbol\": ticker, \"includeAllRoots\": \"true\", \"strikes\": \"false\"},\n headers={\n \"Authorization\": f\"Bearer {cfg.TRADIER_TOKEN}\",\n \"Accept\": \"application/json\",\n },\n )\n if r.status_code == 200:\n dates = r.json()[\"expirations\"][\"date\"]\n return dates\n else:\n print(\"Tradier request failed. Check token. \\n\")\n return []\n\n\ndef get_option_chains(symbol: str, expiry: str) -> pd.DataFrame:\n \"\"\"Display option chains [Source: Tradier]\"\n\n Parameters\n ----------\n symbol : str\n Ticker to get options for\n expiry : str\n Expiration date in the form of \"YYYY-MM-DD\"\n\n Returns\n -------\n chains: pd.DataFrame\n Dataframe with options for the given Symbol and Expiration date\n \"\"\"\n params = {\"symbol\": symbol, \"expiration\": expiry, \"greeks\": \"true\"}\n\n headers = {\n \"Authorization\": f\"Bearer {cfg.TRADIER_TOKEN}\",\n \"Accept\": \"application/json\",\n }\n\n response = requests.get(\n \"https://sandbox.tradier.com/v1/markets/options/chains\",\n params=params,\n headers=headers,\n )\n if response.status_code != 200:\n print(\"Error in request. Check TRADIER_TOKEN\\n\")\n return pd.DataFrame()\n\n chains = process_chains(response)\n return chains\n\n\ndef process_chains(response: requests.models.Response) -> pd.DataFrame:\n \"\"\"Function to take in the requests.get and return a DataFrame\n\n Parameters\n ----------\n response: requests.models.Response\n This is the response from tradier api.\n\n Returns\n -------\n opt_chain: pd.DataFrame\n Dataframe with all available options\n \"\"\"\n json_response = response.json()\n options = json_response[\"options\"][\"option\"]\n\n opt_chain = pd.DataFrame(columns=df_columns)\n for idx, option in enumerate(options):\n data = [option[col] for col in option_columns]\n data += [option[\"greeks\"][col] for col in greek_columns]\n opt_chain.loc[idx, :] = data\n\n return opt_chain\n\n\ndef last_price(ticker: str):\n \"\"\"Makes api request for last price\n\n Parameters\n ----------\n ticker: str\n Ticker\n\n Returns\n -------\n float:\n Last price\n \"\"\"\n r = requests.get(\n \"https://sandbox.tradier.com/v1/markets/quotes\",\n params={\"symbols\": ticker, \"includeAllRoots\": \"true\", \"strikes\": \"false\"},\n headers={\n \"Authorization\": f\"Bearer {cfg.TRADIER_TOKEN}\",\n \"Accept\": \"application/json\",\n },\n )\n if r.status_code == 200:\n return float(r.json()[\"quotes\"][\"quote\"][\"last\"])\n else:\n print(\"Error getting last price\")\n return None\n\n\ndef historical_prices(symbol: str) -> pd.DataFrame:\n \"\"\"Get historical options prices\n\n Parameters\n ----------\n symbol: str\n OCC option chain symbol\n\n Returns\n -------\n df_hist: pd.DataFrame\n Dataframe of historical options\n \"\"\"\n response = requests.get(\n \"https://sandbox.tradier.com/v1/markets/history\",\n params={\"symbol\": {symbol}, \"interval\": \"daily\"},\n headers={\n \"Authorization\": f\"Bearer {cfg.TRADIER_TOKEN}\",\n \"Accept\": \"application/json\",\n },\n )\n\n if response.status_code != 200:\n print(\"Error with request\")\n return pd.DataFrame()\n\n data = response.json()[\"history\"]\n if not data:\n print(\"No historical data available\")\n return pd.DataFrame()\n\n df_hist = pd.DataFrame(data[\"day\"]).set_index(\"date\")\n\n return df_hist\n"
]
| [
[
"pandas.DataFrame"
]
]
|
saarimrahman/UMAPs-true-loss | [
"d761cff16daadd02d7003a5fb5a5177666b5cd53"
]
| [
"umap/my_utils.py"
]
| [
"import numpy as np\nimport numba\nimport scipy.sparse\nfrom pykeops.torch import LazyTensor\nimport torch\nimport umap\nfrom sklearn.metrics import pairwise_distances\nfrom scipy.stats import pearsonr, spearmanr\n\n\n# Contains utility function, including for computing similarities and losses\n\n\ndef corr_pdist_subsample(x, y, sample_size, seed=0, metric=\"euclidean\"):\n \"\"\"\n Computes correlation between pairwise distances among the x's and among the y's\n :param x: array of positions for x\n :param y: array of positions for y\n :param sample_size: number of points to subsample from x and y for pairwise distance computation\n :param seed: random seed\n :param metric: Metric used for distances of x, must be a metric available for sklearn.metrics.pairwise_distances\n :return: tuple of Pearson and Spearman correlation coefficient\n \"\"\"\n np.random.seed(seed)\n sample_idx = np.random.randint(len(x), size=sample_size)\n x_sample = x[sample_idx]\n y_sample = y[sample_idx]\n\n x_dists = pairwise_distances(x_sample, metric=metric).flatten()\n y_dists = pairwise_distances(y_sample, metric=\"euclidean\").flatten()\n\n pear_r, _ = pearsonr(x_dists, y_dists)\n spear_r, _ = spearmanr(x_dists, y_dists)\n return pear_r, spear_r\n\n\n\n\ndef acc_kNN(x, y, k, metric=\"euclidean\"):\n \"\"\"\n Computes the accuracy of k nearest neighbors between x and y.\n :param x: array of positions for first dataset\n :param y: arraoy of positions for second dataset\n :param k: number of nearest neighbors considered\n :param metric: Metric used for distances of x, must be a metric available for sklearn.metrics.pairwise_distances\n :return: Share of x's k nearest neighbors that are also y's k nearest neighbors\n \"\"\"\n x_kNN = scipy.sparse.coo_matrix((np.ones(len(x)*k),\n (np.repeat(np.arange(x.shape[0]), k),\n kNN_graph(x, k, metric=metric).cpu().numpy().flatten())),\n shape=(len(x), len(x)))\n y_kNN = scipy.sparse.coo_matrix((np.ones(len(y)*k),\n (np.repeat(np.arange(y.shape[0]), k),\n kNN_graph(y, k).cpu().numpy().flatten())),\n shape=(len(y), len(y)))\n overlap = x_kNN.multiply(y_kNN)\n matched_kNNs = overlap.sum()\n return matched_kNNs / (len(x) * k)\n\ndef kNN_graph(x, k, metric=\"euclidean\"):\n \"\"\"\n Pykeops implementation of a k nearest neighbor graph\n :param x: array containing the dataset\n :param k: number of neartest neighbors\n :param metric: Metric used for distances of x, must be \"euclidean\" or \"cosine\".\n :return: array of shape (len(x), k) containing the indices of the k nearest neighbors of each datapoint\n \"\"\"\n x = torch.tensor(x).to(\"cuda\").contiguous()\n x_i = LazyTensor(x[:, None])\n x_j = LazyTensor(x[None])\n if metric == \"euclidean\":\n dists = ((x_i - x_j)**2).sum(-1)\n elif metric == \"cosine\":\n scalar_prod = (x_i * x_j).sum(-1)\n norm_x_i = (x_i**2).sum(-1).sqrt()\n norm_x_j = (x_j**2).sum(-1).sqrt()\n dists = 1 - scalar_prod / (norm_x_i * norm_x_j)\n else:\n raise NotImplementedError(f\"Metric {metric} is not implemented.\")\n knn_idx = dists.argKmin(K=k+1, dim=0)[:, 1:] # use k+1 neighbours and omit first, which is just the point itself\n return knn_idx\n\ndef kNN_dists(x, k):\n \"\"\"\n Pykeops implementation for computing the euclidean distances to the k nearest neighbors\n :param x: array dataset\n :param k: int, number of nearest neighbors\n :return: array of shape (len(x), k) containing the distances to the k nearest neighbors for each datapoint\n \"\"\"\n x = torch.tensor(x).to(\"cuda\").contiguous()\n x_i = LazyTensor(x[:, None])\n x_j = LazyTensor(x[None])\n knn_dists = ((x_i - x_j) ** 2).sum(-1).Kmin(K=k + 1, dim=0)[:, 1:].sqrt() # use k+1 neighbours and omit first, which is just the point\n return knn_dists\n\ndef compute_loss_table(umapper, data):\n \"\"\"\n Computes the losses for different combinations of high- and low-dimensional similarites and for different loss\n methods.\n :param umapper: UMAP instance\n :param data: original data\n :return: dictionary of losses\n \"\"\"\n filtered_graph = filter_graph(umapper.graph_, umapper.n_epochs)\n high_sim = np.array(filtered_graph.todense())\n a, b = umap.umap_.find_ab_params(spread=umapper.spread, min_dist=umapper.min_dist)\n\n low_sim_embd = compute_low_dim_psims(umapper.embedding_, a ,b)\n low_sim_data = compute_low_dim_psims(data, a, b)\n target_sim = get_target_sim(high_sim, negative_sample_rate=umapper.negative_sample_rate)\n\n loss_high_low_embd = reproducing_loss(high_sim, low_sim_embd)\n loss_high_high = reproducing_loss(high_sim, high_sim)\n loss_high_0 = reproducing_loss(high_sim, np.eye(len(high_sim)))\n loss_high_low_data = reproducing_loss(high_sim, low_sim_data)\n\n eff_loss_low_embd = expected_loss(high_sim,\n low_sim_embd,\n negative_sample_rate=umapper.negative_sample_rate)\n eff_loss_target = expected_loss(high_sim,\n target_sim,\n negative_sample_rate=umapper.negative_sample_rate)\n eff_loss_0 = expected_loss(high_sim,\n np.eye(len(high_sim)),\n negative_sample_rate=umapper.negative_sample_rate)\n eff_loss_low_data = expected_loss(high_sim,\n low_sim_data,\n negative_sample_rate=umapper.negative_sample_rate)\n return {\"loss_high_high\": (*loss_high_high, loss_high_high[0] + loss_high_high[1]),\n \"loss_high_0\": (*loss_high_0, loss_high_0[0] + loss_high_0[1]),\n \"loss_high_low_embd\": (*loss_high_low_embd, loss_high_low_embd[0] + loss_high_low_embd[1]),\n \"loss_high_low_data\": (*loss_high_low_data, loss_high_low_data[0] + loss_high_low_data[1]),\n \"eff_loss_target\": (*eff_loss_target, eff_loss_target[0] + eff_loss_target[1]),\n \"eff_loss_0\": (*eff_loss_0, eff_loss_0[0] + eff_loss_0[1]),\n \"eff_loss_low_embd\": (*eff_loss_low_embd, eff_loss_low_embd[0] + eff_loss_low_embd[1]),\n \"eff_loss_low_data\": (*eff_loss_low_data, eff_loss_low_data[0] + eff_loss_low_data[1])\n }\n\n\ndef filter_graph(graph, n_epochs):\n \"\"\"\n Filters graph, so that no entry is too low to yield at least one sample during optimization.\n :param graph: sparse matrix holding the high-dimensional similarities\n :param n_epochs: int Number of optimization epochs\n :return:\n \"\"\"\n graph = graph.copy()\n graph.data[graph.data < graph.data.max() / float(n_epochs)] = 0\n graph.eliminate_zeros()\n return graph\n\n\n## data generation\ndef get_ring(n, r, var=0, noise=\"gauss\"):\n \"\"\"\n Create toy ring dataset\n :param n: int Number of samples\n :param r: float Radius of ring\n :param var: float Controls the width of the ring\n :param noise: string Type of noise model. \"gauss\" Gaussian noise, \"uniform\" uniform distribution in ring\n :return:\n \"\"\"\n angles = 2*np.pi * np.arange(n) / n\n points = r * np.stack([np.sin(angles), np.cos(angles)])\n\n if noise==\"gauss\":\n noise = np.random.normal(0.0, var, size=points.shape)\n elif noise==\"uniform\":\n noise_r = np.sqrt(np.random.uniform(0, 1, size=points.shape[1])) * var\n noise_angle = np.pi * np.random.uniform(0, 2, size=points.shape[1])\n noise = np.stack([noise_r * np.sin(noise_angle),\n noise_r * np.cos(noise_angle)])\n else:\n raise NotImplementedError(f\"noise {noise} not supported.\")\n points += noise\n return points.T\n\n## similarities\[email protected]()\ndef low_dim_sim_dist(x, a, b, squared=False):\n \"\"\"\n Smooth function from distances to low-dimensional simiarlity. Compatible with numba.njit\n :param x: np.array pairwise distances\n :param a: float shape parameter a\n :param b: float shape parameter b\n :param squared: bool whether input distances are already squared\n :return: np.array low-dimensional similarities\n \"\"\"\n if not squared:\n return 1.0 / (1.0 + a * x ** (2.0 * b))\n return 1.0 / (1.0 + a * x ** b)\n\ndef low_dim_sim_keops_dist(x, a, b, squared=False):\n \"\"\"\n Smooth function from distances to low-dimensional simiarlity. Compatible with keops\n :param x: keops.LazyTensor pairwise distances\n :param a: float shape parameter a\n :param b: float shape parameter b\n :param squared: bool whether input distances are already squared\n :return: np.array low-dimensional similarities\n \"\"\"\n if not squared:\n return 1.0 / (1.0 + a * x ** (2.0 * b))\n return 1.0 / (1.0 + a * x ** b)\n\ndef compute_low_dim_psim_keops_embd(embedding, a, b):\n \"\"\"\n Computes low-dimensional pairwise similarites from embeddings via keops.\n :param embedding: np.array embedding coordinates\n :param a: float shape parameter a\n :param b: float shape parameter b\n :return: keops.LazyTensor low-dimensional similarities\n \"\"\"\n lazy_embd_i = LazyTensor(torch.tensor(embedding[:, None, :], device=\"cuda\"))\n lazy_embd_j = LazyTensor(torch.tensor(embedding[None], device=\"cuda\"))\n a = LazyTensor(torch.tensor(a, device=\"cuda\", dtype=torch.float32))\n b = LazyTensor(torch.tensor(b, device=\"cuda\", dtype=torch.float32))\n sq_dists = ((lazy_embd_i-lazy_embd_j) ** 2).sum(-1)\n return low_dim_sim_keops_dist(sq_dists, a, b, squared=True)\n\ndef true_sim(x, min_dist, spread):\n return np.ones_like(x) * (x <= min_dist) + np.exp(-(x - min_dist) / spread) * (x > min_dist)\n\[email protected]()\ndef compute_low_dim_psims(embedding, a, b):\n \"\"\"\n Computes low-dimensional pairwise similarites from embeddings via numba.\n :param embedding: np.array embedding coordinates\n :param a: float shape parameter a\n :param b: float shape parameter b\n :return: np.array low-dimensional similarities\n \"\"\"\n embd_dim = embedding.shape[1]\n n_points = embedding.shape[0]\n # numba does not support np.array[None], so use reshape\n squared_dists = ((embedding.reshape((n_points, 1, embd_dim))\n - embedding.reshape((1, n_points, embd_dim)))**2).sum(-1)\n return low_dim_sim_dist(squared_dists, a, b, squared=True)\n\n\ndef compute_low_dim_sims(embedding1, embedding2, a, b):\n \"\"\"\n Computes low-dimensional similarites between two sets of embeddings.\n :param embedding1: np.array Coordinates of first set of embeddings\n :param embedding2: np.array Coordinates of second set of embeddings\n :param a: float shape parameter a\n :param b: float shape parameter b\n :return: np.array low-dimensional similarities\n \"\"\"\n assert embedding1.shape == embedding2.shape\n squared_dists = ((embedding1 - embedding2) ** 2).sum(-1)\n return low_dim_sim_dist(squared_dists, a, b, squared=True)\n\n\n\n## loss functions\[email protected]()\ndef my_log(x, eps=1e-4):\n \"\"\"\n Safe version of log\n \"\"\"\n return np.log(np.minimum(x + eps, 1.0))\n\n# expects dense np.arrays\ndef reproducing_loss(high_sim, low_sim):\n \"\"\"\n UMAPs original loss function, numpy implementation\n :param high_sim: np.array or scipy.sparse.coo_matrix high-dimensional similarities\n :param low_sim: np.array low-dimensional similarities\n :return: tuple of floats, attractive and repulsive loss\n \"\"\"\n return BCE_loss(high_sim_a = high_sim,\n high_sim_r = high_sim,\n low_sim = low_sim)\n\n\ndef expected_loss(high_sim, low_sim, negative_sample_rate, push_tail=True):\n \"\"\"\n UMAP's true loss function, numpy implementation\n :param high_sim: np.array or scipy.sparse.coo_matrix high-dimensional similarities\n :param low_sim: np.array low-dimensional similarities\n :param negative_sample_rate: int Number of negative samples per positive sample\n :param push_tail: bool Whether tail of negative sample is pushed away from its head.\n :return:\n \"\"\"\n # get decreased repulsive weights\n high_sim_r, _ = get_UMAP_push_weight(high_sim, negative_sample_rate=negative_sample_rate, push_tail=push_tail)\n if isinstance(high_sim_r, np.ndarray):\n high_sim_r = 1-high_sim_r\n elif isinstance(high_sim_r, scipy.sparse.coo_matrix):\n high_sim_r.data = 1-high_sim_r.data\n return BCE_loss(high_sim_a = high_sim,\n high_sim_r = high_sim_r,\n low_sim = low_sim)\n\ndef BCE_loss(high_sim_a, high_sim_r, low_sim):\n \"\"\"\n General BCE loss between the high-dimensional similarities and the low dimensional similarities, numpy implementation\n :param high_sim_a: np.array or scipy.sparse.coo_matrix attractive high-dimensional similarities\n :param high_sim_r: np.array or scipy.sparse.coo_matrix repulsive high-dimensional similarities\n :param low_sim: np.array low-dimensional similarities\n :return: tuple of floats attractive and repulsive parts of BCE loss\n \"\"\"\n if type(high_sim_a) == type(high_sim_r) == type(low_sim) == np.ndarray:\n loss_a = (high_sim_a * my_log(low_sim)).sum()\n loss_r = ((1-high_sim_r) * my_log(1 - low_sim)).sum()\n\n elif type(high_sim_a) == type(high_sim_r) == type(low_sim) == scipy.sparse.coo_matrix:\n assert np.all(high_sim_a.row == high_sim_r.row) and np.all(high_sim_a.row == low_sim.row) and \\\n np.all(high_sim_a.col == high_sim_r.col) and np.all(high_sim_a.col == low_sim.col), \\\n \"Sparse matrices without matching indices for nonzero elements are not supported.\"\n loss_a = (high_sim_a.data * my_log(low_sim.data)).sum()\n loss_r = ((1 - high_sim_r.data) * my_log(1-low_sim.data)).sum() # 1 * log(1) = 0\n else:\n raise NotImplementedError(f\"high_sim_a, high_sim_r, low_sim have types {type(high_sim_a)}, {type(high_sim_r)}\"\n f\"and {type(low_sim)}\")\n return -loss_a, -loss_r\n\n# keops implementations:\ndef KL_divergence(high_sim,\n a,\n b,\n embedding,\n eps=1e-12,\n norm_over_pos=True):\n \"\"\"\n Computes the KL divergence between the high-dimensional p and low-dimensional\n similarities q. The latter are inferred from the embedding.\n KL = sum_ij p_ij * log(p_ij / q_ij) = sum_ij p_ij * log(p_ij) - sum_ij p_ij * log(q_ij)\n --> Only ij with p_ij > 0 need to be considered as 0* log(0) is 0 by\n convention.\n :param high_sim: scipy.sparse.coo_matrix high-dimensional similarities\n :param a: float shape parameter a\n :param b: float shape parameter b\n :param embedding: np.array Coordinates of embeddings\n :return: float, KL divergence\n \"\"\"\n heads = high_sim.row\n tails = high_sim.col\n\n # compute low dimensional simiarities on the edges with positive p_ij\n sq_dist_pos_edges = ((embedding[heads]-embedding[tails])**2).sum(-1)\n low_sim_pos_edges = low_dim_sim_keops_dist(sq_dist_pos_edges,\n a,\n b,\n squared=True)\n if norm_over_pos:\n low_sim_pos_edges_norm = low_sim_pos_edges / low_sim_pos_edges.sum()\n else:\n total_low_sim = compute_low_dim_psim_keops_embd(embedding,\n a,\n b).sum(1).cpu().numpy().sum()\n low_sim_pos_edges_norm = low_sim_pos_edges / total_low_sim\n\n\n\n high_sim_pos_edges_norm = high_sim.data / high_sim.data.sum()\n\n neg_entropy = (high_sim_pos_edges_norm * my_log(high_sim_pos_edges_norm, eps)).sum()\n cross_entropy = - (high_sim_pos_edges_norm * my_log(low_sim_pos_edges_norm, eps)).sum()\n return cross_entropy + neg_entropy\n\n\ndef reproducing_loss_keops(high_sim: scipy.sparse.coo_matrix,\n a,\n b,\n embedding,\n eps=1e-4):\n \"\"\"\n UMAPs original loss function, keops implementation\n :param high_sim: scipy.sparse.coo_matrix high-dimensional similarities\n :param a: float shape parameter a\n :param b: float shape parameter b\n :param embedding: np.array Coordinates of embeddings\n :param eps: float Small epsilon value for log\n :return: tuple of floats, attractive and repulsive loss\n \"\"\"\n heads = high_sim.row\n tails = high_sim.col\n\n # compute low dimensional similarities from embeddings\n sq_dist_pos_edges = ((embedding[heads]-embedding[tails])**2).sum(-1)\n low_sim_pos_edges = low_dim_sim_keops_dist(sq_dist_pos_edges, a, b, squared=True)\n low_sim = compute_low_dim_psim_keops_embd(embedding, a, b)\n\n loss_a = (high_sim.data * my_log(low_sim_pos_edges)).sum()\n\n inv_low_sim = 1 - (low_sim - eps).relu() # pykeops compatible version of min(1-low_sim+eps, 1)\n # for repulsive term compute loss with keops and all high_sims = 1 and substract the sparse positive high_sims\n loss_r = (inv_low_sim).log().sum(1).sum()\n loss_r -= ((1 - high_sim.data) * my_log(1 - low_sim_pos_edges)).sum()\n return -loss_a, float(-loss_r)\n\ndef expected_loss_keops(high_sim: scipy.sparse.coo_matrix,\n a,\n b,\n negative_sample_rate,\n embedding,\n push_tail=False,\n eps=0.0001):\n \"\"\"\n UMAP's true loss function, keops implementation\n :param high_sim: scipy.sparse.coo_matrix high-dimensional similarities\n :param a: float shape parameter a\n :param b: float shape parameter b\n :param negative_sample_rate: int Number of negative samples per positive sample\n :param embedding: np.array Coordinates of embeddings\n :param push_tail: bool Whether tail of negative sample is pushed away from its head.\n :param eps: float Small epsilon value for log\n :return: tuple of floats, attractive and repulsive loss\n \"\"\"\n heads = high_sim.row\n tails = high_sim.col\n\n # compute low dimensional similarities from embeddings\n sq_dist_pos_edges = ((embedding[heads]-embedding[tails])**2).sum(-1)\n low_sim_pos_edges = low_dim_sim_keops_dist(sq_dist_pos_edges, a, b, squared=True)\n low_sim = compute_low_dim_psim_keops_embd(embedding, a, b)\n\n loss_a = (high_sim.data * my_log(low_sim_pos_edges, eps)).sum()\n\n # get decreased repulsive weights\n push_weights = get_UMAP_push_weight_keops(high_sim, negative_sample_rate, push_tail)[0]\n\n inv_low_sim = 1 - (low_sim - eps).relu() # pykeops compatible version of min(1-low_sim+eps, 1)\n loss_r = (push_weights * inv_low_sim.log()).sum(1).sum()\n\n return -loss_a, float(-loss_r)\n\ndef get_UMAP_push_weight_keops(high_sim, negative_sample_rate, push_tail=False):\n \"\"\"\n Computes the effective, decreased repulsive weights and the degrees of each node, keops implementation\n :param high_sim: np.array or scipy.sparse.coo_matrix high-dimensional similarities\n :param negative_sample_rate: int Number of negative samples per positive sample\n :param push_tail: bool Whether tail of negative sample is pushed away from its head.\n :return: tuple of keops.LazyTensor and np.array reduced effective repulsive weights and degrees\n \"\"\"\n n_points = LazyTensor(torch.tensor(high_sim.shape[0], device=\"cuda\", dtype=torch.float32))\n\n degrees = np.array(high_sim.sum(-1)).ravel()\n degrees_t = torch.tensor(degrees, device=\"cuda\", dtype=torch.float32)\n degrees_i = LazyTensor(degrees_t[:, None, None])\n degrees_j = LazyTensor(degrees_t[None, :, None])\n\n if push_tail:\n # np.array[None] does not work for numba, so use reshape instead\n return negative_sample_rate * (degrees_i + degrees_j)/(2*n_points), degrees\n return negative_sample_rate * degrees_i * LazyTensor(torch.ones((1,len(degrees), 1), device=\"cuda\"))/n_points, degrees\n\ndef get_UMAP_push_weight(high_sim, negative_sample_rate, push_tail=False):\n \"\"\"\n Computes the effective, decreased repulsive weights and the degrees of each node, numpy implementation\n :param high_sim: np.array or scipy.sparse.coo_matrix high-dimensional similarities\n :param negative_sample_rate: int Number of negative samples per positive sample\n :param push_tail: bool Whether tail of negative sample is pushed away from its head.\n :return: tuple of np.array or scipy.sparse.coo_matrix and np.array reduced effective repulsive weights and degrees\n \"\"\"\n degrees = np.array(high_sim.sum(-1)).ravel()\n n_points = high_sim.shape[0]\n if isinstance(high_sim, np.ndarray):\n if push_tail:\n # np.array[None] does not work for numba, so use reshape instead\n return negative_sample_rate * (degrees.reshape((-1, 1)) + degrees.reshape((1, -1)))/(2*n_points), degrees\n return (negative_sample_rate * np.tile(degrees, (len(degrees), 1))/n_points).T, degrees\n elif isinstance(high_sim, scipy.sparse.coo_matrix):\n if push_tail:\n push_weights = negative_sample_rate * (degrees[high_sim.row] + degrees[high_sim.col]) / (2*n_points)\n else:\n push_weights = negative_sample_rate * degrees[high_sim.row] / n_points\n return scipy.sparse.coo_matrix((push_weights, (high_sim.row, high_sim.col)),\n shape=(n_points, n_points)), degrees\n else:\n print(type(high_sim))\n raise NotImplementedError\n\n\ndef get_target_sim(high_sim, negative_sample_rate=5):\n \"\"\"\n Computes the true target similarities of UMAP\n :param high_sim: np.array or scipy.sparse.coo_matrix high-dimensional similarities\n :param negative_sample_rate: int Number of negative samples per positive sample\n :return: np.array or scipy.sparse.coo_matrix UMAP's true target similarities\n \"\"\"\n push_weight, _ = get_UMAP_push_weight(high_sim, negative_sample_rate, push_tail=True)\n if isinstance(high_sim, np.ndarray):\n return high_sim / (high_sim + push_weight)\n elif isinstance(high_sim, scipy.sparse.coo_matrix):\n return scipy.sparse.coo_matrix((high_sim.data / (high_sim.data + push_weight.data),\n (high_sim.row, high_sim.col)), shape=high_sim.shape)\n else:\n print(type(high_sim))\n raise NotImplementedError\n\n\n\n\n\n\n"
]
| [
[
"numpy.random.normal",
"numpy.sin",
"numpy.ones_like",
"numpy.minimum",
"numpy.random.seed",
"scipy.stats.spearmanr",
"numpy.exp",
"scipy.stats.pearsonr",
"sklearn.metrics.pairwise_distances",
"numpy.random.uniform",
"torch.tensor",
"numpy.arange",
"numpy.cos",
"numpy.all"
]
]
|
karasevb/legion | [
"f3f4e7d987768598b554ffca65d730f697956dc8"
]
| [
"bindings/python/examples/partition_by_restriction.py"
]
| [
"#!/usr/bin/env python\n\n# Copyright 2020 Stanford University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport pygion\nfrom pygion import task, Domain, Partition, Region, RW\nimport numpy as np\n\n@task(privileges=[RW])\ndef check_subregion(R):\n print('Subregion has volume %s extent %s bounds %s' % (\n R.ispace.volume, R.ispace.domain.extent, R.ispace.bounds))\n assert np.array_equal(R.x.shape, R.ispace.domain.extent)\n return R.ispace.volume\n\n@task\ndef main():\n R = Region([4, 4], {'x': pygion.float64})\n\n pygion.fill(R, 'x', 0)\n\n # Create a partition of R.\n colors = [2, 2]\n transform = [[2, 0], [0, 2]]\n extent = [2, 2]\n P = Partition.restrict(R, colors, transform, extent)\n\n # Again, with different parameters.\n colors2 = [3]\n transform2 = [[1], [2]]\n extent2 = Domain([2, 2], [-1, -1])\n P2 = Partition.restrict(R, colors2, transform2, extent2)\n\n assert P.color_space.volume == 4\n assert P2.color_space.volume == 3\n\n # Grab a subregion of P.\n R00 = P[0, 0]\n\n print('Parent region has volume %s' % R.ispace.volume)\n assert R.ispace.volume == 16\n assert check_subregion(R00).get() == 4\n for Rij in P:\n assert check_subregion(Rij).get() == 4\n assert check_subregion(P2[0]).get() == 1\n assert check_subregion(P2[1]).get() == 4\n assert check_subregion(P2[2]).get() == 2\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.array_equal"
]
]
|
MetaExp/backend | [
"9e37257ed40a1c90ffb7212d3f756a8da201e3bd"
]
| [
"active_learning/evaluation.py"
]
| [
"from active_learning.rating import *\nfrom active_learning.active_learner import *\nfrom active_learning.oracles import *\nfrom util.meta_path_loader_dispatcher import MetaPathLoaderDispatcher\n\nimport logging\nimport pandas as pd\nimport util.tensor_logging as tf_log\nimport embeddings.meta2vec\n\n# Set up logging\nlogger = logging.getLogger()\nconsoleHandler = logging.StreamHandler()\nlogger.addHandler(consoleHandler) # logger.setLevel(logging.DEBUG)\n\n# Different parametrisations that can be used in experiments\nORACLES = [UserOracle, FunctionalOracle]\nALGORITHMS = [UncertaintySamplingAlgorithm, GPSelect_Algorithm, RandomSamplingAlgorithm]\n\n\nclass Evaluator:\n def __init__(self, dataset_name: str,\n batch_size: int,\n algorithm,\n oracle,\n seed: int = 42, **evaluator_params):\n # add tf logging\n self._tf_logger = tf_log.get_logger('evaluator')\n self._tf_logger.track_scalar('mse')\n self._tf_logger.track_scalar('abs')\n # self._tf_logger.track_histogram('uncertainty')\n self._tf_logger.track_histogram('rating')\n self._tf_logger.start_writer()\n\n self.batch_size = batch_size\n\n meta_path_loader = MetaPathLoaderDispatcher().get_loader(dataset_name)\n meta_paths = meta_path_loader.load_meta_paths()\n\n # TODO find unique names\n # create mps\n mp_list = [MetaPath(edge_node_list=[hash(i) for i in mp.as_list()]) for mp in meta_paths]\n\n print('run metapath-embedding')\n embed = embeddings.meta2vec.calculate_metapath_embeddings(mp_list, metapath_embedding_size=10)\n [mp.store_embedding(embed[i][1]) for i, mp in enumerate(meta_paths)]\n print('end metapath-embedding')\n\n print(meta_paths)\n self.algorithm = algorithm(meta_paths=meta_paths, seed=seed, tf_logger=self._tf_logger,**evaluator_params)\n self.oracle = oracle\n\n def compute(self) -> pd.DataFrame:\n \"\"\"\n Label the datapoints according to the ActiveLearningAlgorithm and collect statistics.\n Communication is via ids of meta-paths which are assumed to be zero-indexed.\n\n :return: pd.Dataframe with statistics about the collection process.\n \"\"\"\n statistics = []\n is_last_batch = False\n while self.oracle._wants_to_continue() is True and not is_last_batch:\n # Retrieve next batch\n next_metapaths, is_last_batch, ref_paths = self.algorithm.get_next(batch_size=self.batch_size)\n ids_to_be_rated = [mp['id'] for mp in next_metapaths]\n logger.info(\"\\tRating paths:\\t{}\".format(ids_to_be_rated))\n\n # Rate paths and update algorithm\n rated_metapaths = self.oracle._rate_meta_paths(next_metapaths)\n self.algorithm.update(rated_metapaths)\n\n # Log statistics\n mse = self.compute_mse()\n abs_diff = self.compute_absolute_error()\n stats = {'mse': mse,\n 'absolute_error': abs_diff}\n statistics.append(stats)\n self._tf_logger.update('mse', mse)\n self._tf_logger.update('abs', abs_diff)\n self._tf_logger.write_summary()\n\n logger.info('\\n'.join([\"\\t{}:\\t{}\".format(key, value) for key, value in stats.items()]))\n logger.info(\"\")\n logger.info(\"Finished rating paths.\")\n return pd.DataFrame.from_records(statistics)\n\n def compute_mse(self) -> float:\n \"\"\"\n Calculate the Mean Squared Error between the predicted ratings and the ground truth.\n \"\"\"\n predictions = self.algorithm.get_all_predictions()\n squared_values = [pow(self.oracle._rate_meta_path(p) - p['rating'], 2) for p in predictions]\n return sum(squared_values) / len(squared_values)\n\n def compute_absolute_error(self) -> float:\n \"\"\"\n Calculate the Mean Squared Error between the predicted ratings and the ground truth.\n \"\"\"\n predictions = self.algorithm.get_all_predictions()\n absolute_differences = [abs(self.oracle._rate_meta_path(p) - p['rating']) for p in predictions]\n return sum(absolute_differences) / len(absolute_differences)\n"
]
| [
[
"pandas.DataFrame.from_records"
]
]
|
jarokaz/ucaip-labs | [
"8db85d65a22ad3ffac8a25efea975207f6276049"
]
| [
"src/model_training/model.py"
]
| [
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A DNN keras classification model.\"\"\"\n\nimport os\nimport logging\nimport tensorflow as tf\nimport tensorflow_transform as tft\nfrom tensorflow import keras\n\nfrom src.common import features\nfrom src.model_training import defaults\n\n\ndef create_model_inputs():\n inputs = {}\n for feature_name in features.FEATURE_NAMES:\n name = features.transformed_name(feature_name)\n if feature_name in features.NUMERICAL_FEATURE_NAMES:\n inputs[name] = keras.layers.Input(name=name, shape=[], dtype=tf.float32)\n elif feature_name in features.categorical_feature_names():\n inputs[name] = keras.layers.Input(name=name, shape=[], dtype=tf.int64)\n else:\n pass\n return inputs\n\n\ndef create_binary_classifier(tft_output, hyperparams):\n input_layers = create_model_inputs()\n\n layers = []\n for key in input_layers:\n feature_name = features.original_name(key)\n if feature_name in features.EMBEDDING_CATEGORICAL_FEATURES:\n vocab_size = tft_output.vocabulary_size_by_name(feature_name)\n embedding_size = features.EMBEDDING_CATEGORICAL_FEATURES[feature_name]\n embedding_output = keras.layers.Embedding(\n input_dim=vocab_size + 1,\n output_dim=embedding_size,\n name=f\"{key}_embedding\",\n )(input_layers[key])\n layers.append(embedding_output)\n elif feature_name in features.ONEHOT_CATEGORICAL_FEATURE_NAMES:\n vocab_size = tft_output.vocabulary_size_by_name(feature_name)\n onehot_layer = keras.layers.experimental.preprocessing.CategoryEncoding(\n max_tokens=vocab_size,\n output_mode=\"binary\",\n name=f\"{key}_onehot\",\n )(input_layers[key])\n layers.append(onehot_layer)\n elif feature_name in features.NUMERICAL_FEATURE_NAMES:\n numeric_layer = tf.expand_dims(input_layers[key], -1)\n layers.append(numeric_layer)\n else:\n pass\n\n joined = keras.layers.Concatenate(name=\"combines_inputs\")(layers)\n feedforward_output = keras.Sequential(\n [\n keras.layers.Dense(units, activation=\"relu\")\n for units in hyperparams[\"hidden_units\"]\n ],\n name=\"feedforward_network\",\n )(joined)\n logits = keras.layers.Dense(units=1, name=\"logits\")(feedforward_output)\n\n model = keras.Model(inputs=input_layers, outputs=[logits])\n return model\n"
]
| [
[
"tensorflow.keras.layers.Input",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.layers.experimental.preprocessing.CategoryEncoding",
"tensorflow.keras.layers.Concatenate"
]
]
|
knaaptime/proplot | [
"b2376f0c477689b57e399105b67a8061aac62f7a"
]
| [
"docs/axis.py"
]
| [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_cartesian:\n#\n# Cartesian plots\n# ===============\n#\n# This section documents features used for modifying Cartesian *x* and *y*\n# axis settings, including axis scales, tick locations, and tick label\n# formatting. It also documents a handy \"dual axes\" feature.\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_locators:\n#\n# Tick locations\n# --------------\n#\n# Matplotlib `tick locators\n# <https://matplotlib.org/stable/gallery/ticks_and_spines/tick-locators.html>`__\n# select sensible tick locations based on the axis data limits. In ProPlot, you can\n# change the tick locator using the `~proplot.axes.CartesianAxes.format` keyword\n# arguments `xlocator`, `ylocator`, `xminorlocator`, and `yminorlocator` (or their\n# aliases, `xticks`, `yticks`, `xminorticks`, and `yminorticks`). This is powered by\n# the `~proplot.constructor.Locator` :ref:`constructor function <why_constructor>`.\n#\n# These keyword arguments can be used to apply built-in matplotlib\n# `~matplotlib.ticker.Locator`\\ s by their \"registered\" names (e.g.\n# ``xlocator='log'``), to draw ticks every ``N`` data values with\n# `~matplotlib.ticker.MultipleLocator` (e.g., ``xlocator=2``), or to tick the\n# specific locations in a list using `~matplotlib.ticker.FixedLocator` (just\n# like `~matplotlib.axes.Axes.set_xticks` and\n# `~matplotlib.axes.Axes.set_yticks`). See\n# `~proplot.axes.CartesianAxes.format` and `~proplot.constructor.Locator` for\n# details.\n#\n# To generate lists of tick locations, we recommend using ProPlot's\n# `~proplot.utils.arange` function -- it’s basically an *endpoint-inclusive*\n# version of `numpy.arange`, which is usually what you'll want in this\n# context.\n\n# %%\nimport proplot as pplt\nimport numpy as np\nstate = np.random.RandomState(51423)\npplt.rc.update(\n facecolor=pplt.scale_luminance('powderblue', 1.15),\n linewidth=1, fontsize=10,\n color='dark blue', suptitlecolor='dark blue',\n titleloc='upper center', titlecolor='dark blue', titleborder=False,\n)\nfig, axs = pplt.subplots(nrows=8, refwidth=5, refaspect=(8, 1), share=0)\naxs.format(suptitle='Tick locators demo')\n\n# Step size for tick locations\naxs[0].format(\n xlim=(0, 200), xminorlocator=10, xlocator=30,\n title='MultipleLocator'\n)\n\n# Specific list of locations\naxs[1].format(\n xlim=(0, 10), xminorlocator=0.1,\n xlocator=[0, 0.3, 0.8, 1.6, 4.4, 8, 8.8, 10],\n title='FixedLocator',\n)\n\n# Ticks at numpy.linspace(xmin, xmax, N)\naxs[2].format(\n xlim=(0, 10), xlocator=('linear', 21),\n title='LinearLocator',\n)\n\n# Logarithmic locator, used automatically for log scale plots\naxs[3].format(\n xlim=(1, 100), xlocator='log', xminorlocator='logminor',\n title='LogLocator',\n)\n\n# Maximum number of ticks, but at \"nice\" locations\naxs[4].format(\n xlim=(1, 7), xlocator=('maxn', 11),\n title='MaxNLocator',\n)\n\n# Index locator, only draws ticks where data is plotted\naxs[5].plot(np.arange(10) - 5, state.rand(10), alpha=0)\naxs[5].format(\n xlim=(0, 6), ylim=(0, 1), xlocator='index',\n xformatter=[r'$\\alpha$', r'$\\beta$', r'$\\gamma$', r'$\\delta$', r'$\\epsilon$'],\n title='IndexLocator',\n)\npplt.rc.reset()\n\n# Hide all ticks\naxs[6].format(\n xlim=(-10, 10), xlocator='null',\n title='NullLocator',\n)\n\n# Tick locations that cleanly divide 60 minute/60 second intervals\naxs[7].format(\n xlim=(0, 2), xlocator='dms', xformatter='dms',\n title='Degree-Minute-Second Locator (requires cartopy)',\n)\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_formatters:\n#\n# Tick formatting\n# ---------------\n#\n# Matplotlib `tick formatters\n# <https://matplotlib.org/stable/gallery/ticks_and_spines/tick-formatters.html>`__\n# convert floating point numbers to nicely-formatted tick labels. In ProPlot, you can\n# change the tick formatter using the `~proplot.axes.CartesianAxes.format` keyword\n# arguments `xformatter` and `yformatter` (or their aliases, `xticklabels` and\n# `yticklabels`). This is powered by the `~proplot.constructor.Formatter`\n# :ref:`constructor function <why_constructor>`.\n#\n# These keyword arguments can be used to apply built-in matplotlib\n# `~matplotlib.ticker.Formatter`\\ s by their \"registered\" names (e.g.\n# ``xformatter='log'``), to apply a ``%``-style format directive with\n# `~matplotlib.ticker.FormatStrFormatter` (e.g., ``xformatter='%.0f'``), or\n# to apply custom tick labels with `~matplotlib.ticker.FixedFormatter` (just\n# like `~matplotlib.axes.Axes.set_xticklabels` and\n# `~matplotlib.axes.Axes.set_yticklabels`). They can also be used\n# to apply one of ProPlot's new tick formatters -- for example,\n# ``xformatter='deglat'`` to label ticks as the geographic latitude,\n# ``xformatter='pi'`` to label ticks as fractions of :math:`\\pi`,\n# or ``xformatter='sci'`` to label ticks with scientific notation.\n# See `~proplot.axes.CartesianAxes.format` and\n# `~proplot.constructor.Formatter` for details.\n#\n# ProPlot also changes the default tick formatter to\n# `~proplot.ticker.AutoFormatter`. This class trims trailing zeros by\n# default, can be used to *omit tick labels* outside of some data range, and\n# can add arbitrary prefixes and suffixes to each label. See\n# `~proplot.ticker.AutoFormatter` for details. To disable the trailing\n# zero-trimming feature, set :rcraw:`formatter.zerotrim` to ``False``.\n\n# %%\nimport proplot as pplt\npplt.rc.linewidth = 2\npplt.rc.fontsize = 11\nlocator = [0, 0.25, 0.5, 0.75, 1]\nfig, axs = pplt.subplots(ncols=2, nrows=2, refwidth=1.5, share=0)\n\n# Formatter comparison\naxs[0].format(\n xformatter='scalar', yformatter='scalar', title='Matplotlib formatter'\n)\naxs[1].format(yticklabelloc='both', title='ProPlot formatter')\naxs[:2].format(xlocator=locator, ylocator=locator)\n\n# Limiting the tick range\naxs[2].format(\n title='Omitting tick labels', ticklen=5, xlim=(0, 5), ylim=(0, 5),\n xtickrange=(0, 2), ytickrange=(0, 2), xlocator=1, ylocator=1\n)\n\n# Setting the wrap range\naxs[3].format(\n title='Wrapping the tick range', ticklen=5, xlim=(0, 7), ylim=(0, 6),\n xwraprange=(0, 5), ywraprange=(0, 3), xlocator=1, ylocator=1\n)\naxs.format(\n ytickloc='both', yticklabelloc='both',\n titlepad='0.5em', suptitle='Default formatters demo'\n)\npplt.rc.reset()\n\n\n# %%\nimport proplot as pplt\nimport numpy as np\npplt.rc.update(\n linewidth=1.2, fontsize=10, facecolor='gray0', figurefacecolor='gray2',\n color='gray8', gridcolor='gray8', titlecolor='gray8', suptitlecolor='gray8',\n titleloc='upper center', titleborder=False,\n)\nfig, axs = pplt.subplots(nrows=9, refwidth=5, refaspect=(8, 1), share=0)\n\n# Scientific notation\naxs[0].format(xlim=(0, 1e20), xformatter='sci', title='SciFormatter')\n\n# N significant figures for ticks at specific values\naxs[1].format(\n xlim=(0, 20), xlocator=(0.0034, 3.233, 9.2, 15.2344, 7.2343, 19.58),\n xformatter=('sigfig', 2), title='SigFigFormatter', # 2 significant digits\n)\n\n# Fraction formatters\naxs[2].format(\n xlim=(0, 3 * np.pi), xlocator=np.pi / 4, xformatter='pi', title='FracFormatter',\n)\naxs[3].format(\n xlim=(0, 2 * np.e), xlocator=np.e / 2, xticklabels='e', title='FracFormatter',\n)\n\n# Geographic formatters\naxs[4].format(\n xlim=(-90, 90), xlocator=30, xformatter='deglat', title='Latitude Formatter'\n)\naxs[5].format(\n xlim=(0, 360), xlocator=60, xformatter='deglon', title='Longitude Formatter'\n)\n\n# User input labels\naxs[6].format(\n xlim=(-1.01, 1), xlocator=0.5,\n xticklabels=['a', 'b', 'c', 'd', 'e'], title='FixedFormatter',\n)\n\n# Custom style labels\naxs[7].format(\n xlim=(0, 0.001), xlocator=0.0001, xformatter='%.E', title='FormatStrFormatter',\n)\naxs[8].format(\n xlim=(0, 100), xtickminor=False, xlocator=20,\n xformatter='{x:.1f}', title='StrMethodFormatter',\n)\naxs.format(ylocator='null', suptitle='Tick formatters demo')\npplt.rc.reset()\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_datetime:\n#\n# Datetime ticks\n# --------------\n#\n# ProPlot can also be used to customize the tick locations and tick label\n# format of \"datetime\" axes.\n# To draw ticks on some particular time unit, just use a unit string (e.g.,\n# ``xlocator='month'``). To draw ticks every ``N`` time units, just use a (unit, N)\n# tuple (e.g., ``xlocator=('day', 5)``). For `% style formatting\n# <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__\n# of datetime tick labels, just use a string containing ``'%'`` (e.g.\n# ``xformatter='%Y-%m-%d'``).\n# See `~proplot.axes.CartesianAxes.format`, `~proplot.constructor.Locator`,\n# and `~proplot.constructor.Formatter` for details.\n\n# %%\nimport proplot as pplt\nimport numpy as np\npplt.rc.update(\n linewidth=1.2, fontsize=10, ticklenratio=0.7,\n figurefacecolor='w', facecolor='pastel blue',\n titleloc='upper center', titleborder=False,\n)\nfig, axs = pplt.subplots(nrows=5, refwidth=6, refaspect=(8, 1), share=0)\naxs[:4].format(xrotation=0) # no rotation for these examples\n\n# Default date locator\n# This is enabled if you plot datetime data or set datetime limits\naxs[0].format(\n xlim=(np.datetime64('2000-01-01'), np.datetime64('2001-01-02')),\n title='Auto date locator and formatter'\n)\n\n# Concise date formatter introduced in matplotlib 3.1\naxs[1].format(\n xlim=(np.datetime64('2000-01-01'), np.datetime64('2001-01-01')),\n xformatter='concise', title='Concise date formatter',\n)\n\n# Minor ticks every year, major every 10 years\naxs[2].format(\n xlim=(np.datetime64('2000-01-01'), np.datetime64('2050-01-01')),\n xlocator=('year', 10), xformatter='\\'%y', title='Ticks every N units',\n)\n\n# Minor ticks every 10 minutes, major every 2 minutes\naxs[3].format(\n xlim=(np.datetime64('2000-01-01T00:00:00'), np.datetime64('2000-01-01T12:00:00')),\n xlocator=('hour', range(0, 24, 2)), xminorlocator=('minute', range(0, 60, 10)),\n xformatter='T%H:%M:%S', title='Ticks at specific intervals',\n)\n\n# Month and year labels, with default tick label rotation\naxs[4].format(\n xlim=(np.datetime64('2000-01-01'), np.datetime64('2008-01-01')),\n xlocator='year', xminorlocator='month', # minor ticks every month\n xformatter='%b %Y', title='Ticks with default rotation',\n)\naxs.format(\n ylocator='null', suptitle='Datetime locators and formatters demo'\n)\npplt.rc.reset()\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_scales:\n#\n# Axis scale changes\n# ------------------\n#\n# \"Axis scales\" like ``'linear'`` and ``'log'`` control the *x* and *y* axis\n# coordinate system. To change the axis scale, simply pass e.g.\n# ``xscale='log'`` or ``yscale='log'`` to `~proplot.axes.Axes.format`. This\n# is powered by the `~proplot.constructor.Scale`\n# :ref:`constructor function <why_constructor>`.\n#\n# ProPlot also makes several changes to the axis scale API:\n#\n# * The `~proplot.ticker.AutoFormatter` formatter is now used for all axis scales\n# by default, including ``'log'`` and ``'symlog'``. Matplotlib's behavior can\n# be restored by passing e.g. ``xformatter='log'`` or ``yformatter='log'`` to\n# `~proplot.axes.CartesianAxes.format`.\n# * To make its behavior consistent with `~proplot.constructor.Locator` and\n# `~proplot.constructor.Formatter`, the `~proplot.constructor.Scale`\n# constructor function returns instances of `~matplotlib.scale.ScaleBase`,\n# and `~matplotlib.axes.Axes.set_xscale` and\n# `~matplotlib.axes.Axes.set_yscale` now accept these class instances in\n# addition to \"registered\" names like ``'log'``.\n# * While matplotlib axis scales must be instantiated with an\n# `~matplotlib.axis.Axis` instance (for backwards compatibility reasons),\n# ProPlot axis scales can be instantiated without the axis instance\n# (e.g., ``pplt.LogScale()`` instead of ``pplt.LogScale(ax.xaxis)``).\n# * The default `subs` for the ``'symlog'`` axis scale is now ``np.arange(1, 10)``,\n# and the default `linthresh` is now ``1``. Also the ``'log'`` and ``'symlog'``\n# axis scales now accept the keywords `base`, `linthresh`, `linscale`, and\n# `subs` rather than keywords with trailing ``x`` or ``y``.\n\n# %%\nimport proplot as pplt\nimport numpy as np\nN = 200\nlw = 3\npplt.rc.update({\n 'linewidth': 1, 'ticklabelweight': 'bold', 'axeslabelweight': 'bold'\n})\nfig, axs = pplt.subplots(ncols=2, nrows=2, refwidth=1.8, share=0)\naxs.format(suptitle='Axis scales demo', ytickminor=True)\n\n# Linear and log scales\naxs[0].format(yscale='linear', ylabel='linear scale')\naxs[1].format(ylim=(1e-3, 1e3), yscale='log', ylabel='log scale')\naxs[:2].plot(np.linspace(0, 1, N), np.linspace(0, 1000, N), lw=lw)\n\n# Symlog scale\nax = axs[2]\nax.format(yscale='symlog', ylabel='symlog scale')\nax.plot(np.linspace(0, 1, N), np.linspace(-1000, 1000, N), lw=lw)\n\n# Logit scale\nax = axs[3]\nax.format(yscale='logit', ylabel='logit scale')\nax.plot(np.linspace(0, 1, N), np.linspace(0.01, 0.99, N), lw=lw)\npplt.rc.reset()\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_scales_new:\n#\n# Special axis scales\n# -------------------\n#\n# ProPlot introduces several new axis scales. The ``'cutoff'`` scale (see\n# `~proplot.scale.CutoffScale`) is useful when the statistical distribution\n# of your data is very unusual. The ``'sine'`` scale (see\n# `~proplot.scale.SineLatitudeScale`) scales the axis with a sine function,\n# resulting in an *area weighted* spherical latitude coordinate, and the\n# ``'mercator'`` scale (see `~proplot.scale.MercatorLatitudeScale`) scales\n# the axis with the Mercator projection latitude coordinate. The\n# ``'inverse'`` scale (see `~proplot.scale.InverseScale`) can be useful when\n# working with spectral data, especially with\n# :ref:`\"dual\" unit axes <ug_dual>`.\n\n# %%\nimport proplot as pplt\nimport numpy as np\nfig, axs = pplt.subplots(nrows=4, refaspect=(5, 1), figwidth=6, sharex=False)\nax = axs[0]\n\n# Sample data\nx = np.linspace(0, 4 * np.pi, 100)\ndy = np.linspace(-1, 1, 5)\ny1 = np.sin(x)\ny2 = np.cos(x)\nstate = np.random.RandomState(51423)\ndata = state.rand(len(dy) - 1, len(x) - 1)\n\n# Loop through various cutoff scale options\ntitles = ('Zoom out of left', 'Zoom into left', 'Discrete jump', 'Fast jump')\nargs = (\n (np.pi, 3), # speed up\n (3 * np.pi, 1 / 3), # slow down\n (np.pi, np.inf, 3 * np.pi), # discrete jump\n (np.pi, 5, 3 * np.pi) # fast jump\n)\nlocators = (\n np.pi / 3,\n np.pi / 3,\n np.pi * np.append(np.linspace(0, 1, 4), np.linspace(3, 4, 4)),\n np.pi * np.append(np.linspace(0, 1, 4), np.linspace(3, 4, 4)),\n)\nfor ax, iargs, title, locator in zip(axs, args, titles, locators):\n ax.pcolormesh(x, dy, data, cmap='grays', cmap_kw={'right': 0.8})\n for y, color in zip((y1, y2), ('coral', 'sky blue')):\n ax.plot(x, y, lw=4, color=color)\n ax.format(\n xscale=('cutoff', *iargs), title=title,\n xlim=(0, 4 * np.pi), ylabel='wave amplitude',\n xformatter='pi', xlocator=locator,\n xtickminor=False, xgrid=True, ygrid=False, suptitle='Cutoff axis scales demo'\n )\n\n# %%\nimport proplot as pplt\nimport numpy as np\n\n# Create figure\npplt.rc.reset()\nstate = np.random.RandomState(51423)\ncolors = ('coral', 'sky blue')\nfig, axs = pplt.subplots(nrows=2, ncols=3, refwidth=1.7, share=0, order='F')\naxs.format(\n toplabels=('Geographic scales', 'Exponential scales', 'Power scales'),\n)\n\n# Geographic scales\nn = 20\nx = np.linspace(-180, 180, n)\ny1 = np.linspace(-85, 85, n)\ny2 = np.linspace(-85, 85, n)\ndata = state.rand(len(x) - 1, len(y2) - 1)\nfor ax, scale, color in zip(axs[:2], ('sine', 'mercator'), colors):\n ax.plot(x, y1, '-', color=color, lw=4)\n ax.pcolormesh(x, y2, data, cmap='grays', cmap_kw={'right': 0.8})\n ax.format(\n title=scale.title() + ' y-axis', yscale=scale, ytickloc='left',\n yformatter='deg', grid=False, ylocator=20,\n xscale='linear', xlim=None, ylim=(-85, 85)\n )\n\n# Exp scales\nx = np.linspace(0, 1, 50)\ny = 10 * x\ndata = state.rand(len(y) - 1, len(x) - 1)\nfor ax, a, c, color in zip(axs[2:4], (np.e, 2), (0.5, 2), colors):\n ax.pcolormesh(x, y, data, cmap='grays', cmap_kw={'right': 0.8})\n ax.plot(x, y, lw=4, color=color)\n ax.format(\n ylim=(0.1, 10), yscale=('exp', a, c),\n title=f\"${(a, 'e')[a == np.e]}^{{{(c, '')[c == 1]}x}}$\"\n )\n\n# Power scales\nfor ax, power, color in zip(axs[4:], (2, 1 / 4), colors):\n ax.pcolormesh(x, y, data, cmap='grays', cmap_kw={'right': 0.8})\n ax.plot(x, y, lw=4, color=color)\n ax.format(\n ylim=(0.1, 10), yscale=('power', power),\n title=f'$x^{{{power}}}$'\n )\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_dual:\n#\n# Dual unit scales\n# ----------------\n#\n# The `~proplot.axes.CartesianAxes.dualx` and\n# `~proplot.axes.CartesianAxes.dualy` methods can be used to draw duplicate\n# *x* and *y* axes meant to represent *alternate units* in the same\n# coordinate range as the \"parent\" axis. This feature is powered by the\n# `~proplot.scale.FuncScale` class.\n#\n# `~proplot.axes.CartesianAxes.dualx` and `~proplot.axes.CartesianAxes.dualy`\n# accept either (1) a single linear forward function, (2) a pair of arbitrary\n# forward and inverse functions, or (3) a scale name or scale class instance.\n# In the latter case, the scale's transforms are used for the forward and\n# inverse functions, and the scale's default locators and formatters are used\n# for the default `~proplot.scale.FuncScale` locators and formatters.\n#\n# In the below examples, we generate dual axes with each of these three methods. Note\n# that the \"parent\" axis scale is now arbitrary -- in the first example shown below,\n# we create a `~proplot.axes.CartesianAxes.dualx` axis for an axis scaled by the\n# `symlog scale <https://matplotlib.org/stable/gallery/scales/symlog_demo.html>`__.\n\n# %%\nimport proplot as pplt\npplt.rc.update({'grid.alpha': 0.4, 'linewidth': 1, 'grid.linewidth': 1})\nc1 = pplt.scale_luminance('cerulean', 0.5)\nc2 = pplt.scale_luminance('red', 0.5)\nfig, axs = pplt.subplots(\n [[1, 1, 2, 2], [0, 3, 3, 0]],\n share=0, refaspect=2.2, refwidth=3\n)\naxs.format(\n suptitle='Duplicate axes with custom transformations',\n xcolor=c1, gridcolor=c1,\n ylocator=[], yformatter=[]\n)\n\n# Meters and kilometers\nax = axs[0]\nax.format(xlim=(0, 5000), xlabel='meters')\nax.dualx(\n lambda x: x * 1e-3,\n label='kilometers', grid=True, color=c2, gridcolor=c2\n)\n\n# Kelvin and Celsius\nax = axs[1]\nax.format(xlim=(200, 300), xlabel='temperature (K)')\nax.dualx(\n lambda x: x - 273.15,\n label='temperature (\\N{DEGREE SIGN}C)', grid=True, color=c2, gridcolor=c2\n)\n\n# With symlog parent\nax = axs[2]\nax.format(xlim=(-100, 100), xscale='symlog', xlabel='MegaJoules')\nax.dualx(\n lambda x: x * 1e6,\n label='Joules', formatter='log', grid=True, color=c2, gridcolor=c2\n)\npplt.rc.reset()\n\n# %%\nimport proplot as pplt\npplt.rc.update({'grid.alpha': 0.4, 'linewidth': 1, 'grid.linewidth': 1})\nc1 = pplt.scale_luminance('cerulean', 0.5)\nc2 = pplt.scale_luminance('red', 0.5)\nfig, axs = pplt.subplots(ncols=2, share=0, refaspect=0.4, refwidth=1.8)\naxs.format(suptitle='Duplicate axes with special transformations')\n\n# Pressure as the linear scale, height on opposite axis (scale height 7km)\nax = axs[0]\nax.format(\n xformatter='null', ylabel='pressure (hPa)',\n ylim=(1000, 10), xlocator=[], ycolor=c1, gridcolor=c1\n)\nax.dualy(\n 'height', label='height (km)', ticks=2.5, color=c2, gridcolor=c2, grid=True\n)\n\n# Height as the linear scale, pressure on opposite axis (scale height 7km)\nax = axs[1] # span\nax.format(\n xformatter='null', ylabel='height (km)', ylim=(0, 20), xlocator='null',\n grid=True, gridcolor=c2, ycolor=c2\n)\nax.dualy(\n 'pressure', label='pressure (hPa)', locator=100, color=c1, gridcolor=c1, grid=True,\n)\npplt.rc.reset()\n\n# %%\nimport proplot as pplt\nimport numpy as np\npplt.rc.margin = 0\nc1 = pplt.scale_luminance('cerulean', 0.5)\nc2 = pplt.scale_luminance('red', 0.5)\nfig, ax = pplt.subplots(refaspect=(3, 1), figwidth=6)\n\n# Sample data\ncutoff = 1 / 5\nx = np.linspace(0.01, 0.5, 1000) # in wavenumber days\nresponse = (np.tanh(-((x - cutoff) / 0.03)) + 1) / 2 # response func\nax.axvline(cutoff, lw=2, ls='-', color=c2)\nax.fill_between([cutoff - 0.03, cutoff + 0.03], 0, 1, color=c2, alpha=0.3)\nax.plot(x, response, color=c1, lw=2)\n\n# Add inverse scale to top\nax.format(\n xlabel='wavenumber (days$^{-1}$)', ylabel='response', grid=False,\n title='Imaginary response function',\n suptitle='Duplicate axes with wavenumber and period',\n)\nax = ax.dualx(\n 'inverse', locator='log', locator_kw={'subs': (1, 2, 5)}, label='period (days)'\n)\npplt.rc.reset()\n"
]
| [
[
"numpy.sin",
"numpy.random.RandomState",
"numpy.tanh",
"numpy.arange",
"numpy.cos",
"numpy.linspace",
"numpy.datetime64"
]
]
|
jirifilip/CBA | [
"59168ef6fb4c9e319475f9a7498446ba5ff306e1"
]
| [
"pyarc/qcba/transforms/extend.py"
]
| [
"import pandas\nimport numpy as np\nimport math\n\nfrom ..data_structures import QuantitativeDataFrame, Interval\n\nclass RuleExtender:\n \n def __init__(self, dataframe):\n \n if type(dataframe) != QuantitativeDataFrame:\n raise Exception(\n \"type of dataset must be pandas.DataFrame\"\n )\n \n self.__dataframe = dataframe\n \n \n \n def transform(self, rules):\n \n copied_rules = [ rule.copy() for rule in rules ]\n\n progress_bar_len = 50\n copied_rules_len = len(copied_rules)\n progress_bar = \"#\" * progress_bar_len\n progress_bar_empty = \" \" * progress_bar_len\n last_progress_bar_idx = -1\n\n extended_rules = []\n\n #print(\"len: \", copied_rules_len)\n\n for i, rule in enumerate(copied_rules):\n current_progress_bar_idx = math.floor(i / copied_rules_len * progress_bar_len)\n \n if last_progress_bar_idx != current_progress_bar_idx:\n last_progress_bar_idx = current_progress_bar_idx\n \n progress_string = \"[\" + progress_bar[:last_progress_bar_idx] + progress_bar_empty[last_progress_bar_idx:] + \"]\"\n \n print(*progress_string, sep=\"\")\n\n extended_rules.append(self.__extend(rule))\n \n return extended_rules\n \n \n \n def __extend(self, rule):\n ext = self.__extend_rule(rule)\n \n return ext\n \n def __extend_rule(self, rule, min_improvement=0, min_conditional_improvement=-0.01):\n \n # check improvemnt argument ranges\n \n current_best = rule\n direct_extensions = self.__get_extensions(rule)\n \n current_best.update_properties(self.__dataframe)\n \n while True:\n extension_succesful = False\n\n direct_extensions = self.__get_extensions(current_best)\n\n #print(\"extending - new cycle\")\n \n for candidate in direct_extensions:\n #print(\"\\tcandidate - direct extensions\")\n candidate.update_properties(self.__dataframe)\n \n delta_confidence = candidate.confidence - current_best.confidence\n delta_support = candidate.support - current_best.support\n \n \n if self.__crisp_accept(delta_confidence, delta_support, min_improvement):\n current_best = candidate\n extension_succesful = True\n break\n \n \n if self.__conditional_accept(delta_confidence, min_conditional_improvement):\n enlargement = candidate\n \n while True:\n enlargement = self.get_beam_extensions(enlargement)\n \n if not enlargement:\n break\n \n candidate.update_properties(self.__dataframe)\n enlargement.update_properties(self.__dataframe)\n\n delta_confidence = enlargement.confidence - current_best.confidence\n delta_support = enlargement.support - current_best.support\n\n if self.__crisp_accept(delta_confidence, delta_support, min_improvement):\n current_best = enlargement\n extension_succesful = True\n \n elif self.__conditional_accept(delta_confidence, min_conditional_improvement):\n continue\n \n else:\n break\n \n \n if extension_succesful == True:\n break\n \n\n else:\n # continue to next candidate\n continue\n \n \n if extension_succesful == False:\n break\n \n return current_best\n \n \n def __get_extensions(self, rule):\n extended_rules = []\n \n for literal in rule.antecedent:\n attribute, interval = literal\n \n neighborhood = self.__get_direct_extensions(literal)\n \n for extended_literal in neighborhood:\n # copy the rule so the extended literal\n # can replace the default literal\n copied_rule = rule.copy()\n \n # find the index of the literal\n # so that it can be replaced\n current_literal_index = copied_rule.antecedent.index(literal)\n \n copied_rule.antecedent[current_literal_index] = extended_literal\n copied_rule.was_extended = True\n copied_rule.extended_literal = extended_literal\n \n extended_rules.append(copied_rule)\n\n extended_rules.sort(reverse=True)\n \n return extended_rules\n \n \n def __get_direct_extensions(self, literal):\n \"\"\"\n ensure sort and unique\n before calling functions\n \"\"\"\n \n attribute, interval = literal\n\n # if nominal\n # needs correction to return null and skip when extending\n if type(interval) == str:\n return [literal]\n \n vals = self.__dataframe.column(attribute)\n vals_len = vals.size\n\n mask = interval.test_membership(vals)\n\n # indices of interval members\n # we want to extend them \n # once to the left\n # and once to the right\n # bu we have to check if resulting\n # indices are not larger than value size\n member_indexes = np.where(mask)[0]\n\n first_index = member_indexes[0]\n last_index = member_indexes[-1]\n\n first_index_modified = first_index - 1\n last_index_modified = last_index + 1\n \n no_left_extension = False\n no_right_extension = False\n\n if first_index_modified < 0:\n no_left_extension = True\n\n # if last_index_modified is larger than\n # available indices\n if last_index_modified > vals_len - 1:\n no_right_extension = True\n\n\n new_left_bound = interval.minval\n new_right_bound = interval.maxval\n\n if not no_left_extension:\n new_left_bound = vals[first_index_modified]\n\n if not no_right_extension:\n new_right_bound = vals[last_index_modified]\n\n\n # prepare return values\n extensions = []\n\n if not no_left_extension:\n # when values are [1, 2, 3, 3, 4, 5]\n # and the corresponding interval is (2, 4)\n # instead of resulting interval being (1, 4)\n \n temp_interval = Interval(\n new_left_bound,\n interval.maxval,\n True,\n interval.right_inclusive\n )\n\n extensions.append((attribute, temp_interval))\n\n if not no_right_extension:\n\n temp_interval = Interval(\n interval.minval,\n new_right_bound,\n interval.left_inclusive,\n True\n )\n\n extensions.append((attribute, temp_interval))\n\n return extensions\n \n \n # make private\n def get_beam_extensions(self, rule):\n if not rule.was_extended:\n return None\n\n # literal which extended the rule\n literal = rule.extended_literal\n \n extended_literal = self.__get_direct_extensions(literal)\n \n if not extended_literal:\n return None\n \n copied_rule = rule.copy()\n \n literal_index = copied_rule.antecedent.index(literal)\n \n # so that literal is not an array\n copied_rule.antecedent[literal_index] = extended_literal[0]\n copied_rule.was_extended = True\n copied_rule.extended_literal = extended_literal[0]\n \n return copied_rule\n\n \n \n def __crisp_accept(self, delta_confidence, delta_support, min_improvement):\n if delta_confidence >= min_improvement and delta_support > 0:\n return True\n else:\n return False\n \n def __conditional_accept(self, delta_conf, min_improvement):\n if delta_conf >= min_improvement:\n return True\n \n "
]
| [
[
"numpy.where"
]
]
|
olgatsiouri1996/biomisc | [
"b4fdaf3dd49816b7ca9da1d200ab4443455ab784"
]
| [
"fasta_manipulation/tab_to_singlefastas.py"
]
| [
"# python3\nimport itertools\nimport argparse\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nimport pandas as pd\n# input arguments\nap = argparse.ArgumentParser(description=\"convert each row of a tabular file with the fasta headers and sequences in each row in single-fasta files\")\nap.add_argument(\"-in\", \"--input_file\", required=True, help=\"input txt file\")\nargs = vars(ap.parse_args())\n# main\ndf = pd.read_csv(args['input_file'], header=None, sep=\"\\t\")\n# select ids and sequence columns, convert to lists\nheaders = df.iloc[:,0].values.tolist()\nsequences = df.iloc[:,1].values.tolist()\n# iter elements on pairs to export in single fasta files\nfor (ids, seq) in zip(headers, sequences):\n\tseq_for_fasta=SeqRecord(Seq(str(seq)),id=str(ids),description=\"\")\n\tSeqIO.write(seq_for_fasta, \"\".join([str(ids),\".fasta\"]), \"fasta\")\n\n"
]
| [
[
"pandas.read_csv"
]
]
|
robbinc91/MeshSegNet | [
"b2ce6818c31170fa0cfb0951a4574a05613097b2"
]
| [
"step2_get_list.py"
]
| [
"import numpy as np\nimport os\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\nif __name__ == '__main__':\n\n data_path = './augmentation_vtk_data/'\n output_path = './'\n num_augmentations = 20\n train_size = 0.8\n with_flip = True\n\n num_samples = 36 # define number of samples\n sample_list = list(range(1, num_samples+1))\n sample_name = 'A{0}_Sample_0{1}_d.vtp'\n\n # get valid sample list\n valid_sample_list = []\n for i_sample in sample_list:\n for i_aug in range(num_augmentations):\n if os.path.exists(os.path.join(data_path, sample_name.format(i_aug, i_sample))):\n valid_sample_list.append(i_sample)\n\n # remove duplicated\n sample_list = list(dict.fromkeys(valid_sample_list))\n sample_list = np.asarray(sample_list)\n #print(sample_list)\n\n i_cv = 0\n kf = KFold(n_splits=6, shuffle=False)\n for train_idx, test_idx in kf.split(sample_list):\n\n i_cv += 1\n print('Round:', i_cv)\n\n train_list, test_list = sample_list[train_idx], sample_list[test_idx]\n train_list, val_list = train_test_split(train_list, train_size=0.8, shuffle=True)\n\n print('Training list:\\n', train_list, '\\nValidation list:\\n', val_list, '\\nTest list:\\n', test_list)\n\n #training\n train_name_list = []\n for i_sample in train_list:\n for i_aug in range(num_augmentations):\n #print('Computing Sample: {0}; Aug: {1}...'.format(i_sample, i_aug))\n subject_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample)\n train_name_list.append(os.path.join(data_path, subject_name))\n if with_flip:\n subject2_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample+1000)\n train_name_list.append(os.path.join(data_path, subject2_name))\n\n with open(os.path.join(output_path, 'train_list_{0}.csv'.format(i_cv)), 'w') as file:\n for f in train_name_list:\n file.write(f+'\\n')\n\n #validation\n val_name_list = []\n for i_sample in val_list:\n for i_aug in range(num_augmentations):\n #print('Computing Sample: {0}; Aug: {1}...'.format(i_sample, i_aug))\n subject_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample)\n val_name_list.append(os.path.join(data_path, subject_name))\n if with_flip:\n subject2_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample+1000)\n val_name_list.append(os.path.join(data_path, subject2_name))\n\n with open(os.path.join(output_path, 'val_list_{0}.csv'.format(i_cv)), 'w') as file:\n for f in val_name_list:\n file.write(f+'\\n')\n\n #test\n test_df = pd.DataFrame(data=test_list, columns=['Test ID'])\n test_df.to_csv('test_list_{}.csv'.format(i_cv), index=False)\n\n\n print('--------------------------------------------')\n print('with flipped samples:', with_flip)\n print('# of train:', len(train_name_list))\n print('# of validation:', len(val_name_list))\n print('--------------------------------------------')\n"
]
| [
[
"sklearn.model_selection.KFold",
"pandas.DataFrame",
"numpy.asarray",
"sklearn.model_selection.train_test_split"
]
]
|
bopopescu/classic_diff_geom | [
"2b1d88becbc8cb30962e0995cc78e429e0f5589f"
]
| [
"src/sage/plot/graphics.py"
]
| [
"# -*- encoding: utf-8 -*-\nr\"\"\"\nGraphics objects\n\nThis file contains the definition of the classes :class:`Graphics` and\n:class:`GraphicsArray`. Usually, you don't create these classes directly\n(although you can do it), you would use :func:`plot` or\n:func:`graphics_array` instead.\n\nAUTHORS:\n\n- Jeroen Demeyer (2012-04-19): split off this file from plot.py (:trac:`12857`)\n- Punarbasu Purkayastha (2012-05-20): Add logarithmic scale (:trac:`4529`)\n\n\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2006 Alex Clemesha <[email protected]>\n# Copyright (C) 2006-2008 William Stein <[email protected]>\n# Copyright (C) 2010 Jason Grout\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# http://www.gnu.org/licenses/\n#*****************************************************************************\n\nimport os\nimport sage.misc.misc\nfrom sage.misc.html import html\nfrom sage.misc.temporary_file import tmp_filename, graphics_filename\nfrom sage.structure.sage_object import SageObject\nfrom sage.misc.decorators import suboptions\nfrom colors import rgbcolor\n\nALLOWED_EXTENSIONS = ['.eps', '.pdf', '.png', '.ps', '.sobj', '.svg']\nDEFAULT_DPI = 100\nDOCTEST_MODE_FILE = os.path.join(sage.misc.misc.SAGE_TMP, 'test.png')\n\ndef show_default(default=None):\n r\"\"\"\n Set the default for showing plots using any plot commands. If\n called with no arguments, returns the current default.\n\n If this is ``True`` (the default) then any plot object\n when displayed will be displayed as an actual plot instead of text,\n i.e., the show command is not needed.\n\n EXAMPLES:\n\n The default starts out as ``True`` in interactive use and\n ``False`` in doctests::\n\n sage: show_default()\n doctest:1: DeprecationWarning: this is done automatically by the doctest framework\n See http://trac.sagemath.org/14469 for details.\n False\n \"\"\"\n from sage.misc.superseded import deprecation\n deprecation(14469, 'this is done automatically by the doctest framework')\n import sage.doctest\n if default is None:\n return not sage.doctest.DOCTEST_MODE\n sage.doctest.DOCTEST_MODE = not bool(default)\n\n# If do_verify is True, options are checked when drawing a\n# GraphicsPrimitive. See primitive.py\ndo_verify = True\n\ndef is_Graphics(x):\n \"\"\"\n Return True if `x` is a Graphics object.\n\n EXAMPLES::\n\n sage: from sage.plot.graphics import is_Graphics\n sage: is_Graphics(1)\n False\n sage: is_Graphics(disk((0.0, 0.0), 1, (0, pi/2)))\n True\n \"\"\"\n return isinstance(x, Graphics)\n\nclass Graphics(SageObject):\n \"\"\"\n The Graphics object is an empty list of graphics objects. It is\n useful to use this object when initializing a for loop where\n different graphics object will be added to the empty object.\n\n EXAMPLES::\n\n sage: G = Graphics(); print G\n Graphics object consisting of 0 graphics primitives\n sage: c = circle((1,1), 1)\n sage: G+=c; print G\n Graphics object consisting of 1 graphics primitive\n\n Here we make a graphic of embedded isosceles triangles, coloring\n each one with a different color as we go::\n\n sage: h=10; c=0.4; p=0.5;\n sage: G = Graphics()\n sage: for x in srange(1,h+1):\n ....: l = [[0,x*sqrt(3)],[-x/2,-x*sqrt(3)/2],[x/2,-x*sqrt(3)/2],[0,x*sqrt(3)]]\n ....: G+=line(l,color=hue(c + p*(x/h)))\n sage: G.show(figsize=[5,5])\n\n We can change the scale of the axes in the graphics before displaying.::\n\n sage: G = plot(exp, 1, 10)\n sage: G.show(scale='semilogy')\n\n TESTS:\n\n From :trac:`4604`, ensure Graphics can handle 3d objects::\n\n sage: g = Graphics()\n sage: g += sphere((1, 1, 1), 2)\n sage: g.show()\n\n We check that graphics can be pickled (we can't use equality on\n graphics so we just check that the load/dump cycle gives a\n :class:`Graphics` instance)::\n\n sage: g = Graphics()\n sage: g2 = loads(dumps(g))\n sage: g2.show()\n\n ::\n\n sage: isinstance(g2, Graphics)\n True\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create a new empty Graphics objects with all the defaults.\n\n EXAMPLES::\n\n sage: G = Graphics()\n \"\"\"\n self._axes_color = (0, 0, 0)\n self._axes_label_color = (0, 0, 0)\n self._axes_width = 0.8\n self._bbox_extra_artists = []\n self._extra_kwds = {}\n self._fontsize = 10\n self._legend_colors = []\n self._legend_opts = {}\n self._objects = []\n self._show_axes = True\n self._show_legend = False\n self._tick_label_color = (0, 0, 0)\n\n def set_aspect_ratio(self, ratio):\n \"\"\"\n Set the aspect ratio, which is the ratio of height and width\n of a unit square (i.e., height/width of a unit square), or\n 'automatic' (expand to fill the figure).\n\n INPUT:\n\n\n - ``ratio`` - a positive real number or 'automatic'\n\n\n EXAMPLES: We create a plot of the upper half of a circle, but it\n doesn't look round because the aspect ratio is off::\n\n sage: P = plot(sqrt(1-x^2),(x,-1,1)); P\n\n So we set the aspect ratio and now it is round::\n\n sage: P.set_aspect_ratio(1)\n sage: P.aspect_ratio()\n 1.0\n sage: P\n\n Note that the aspect ratio is inherited upon addition (which takes\n the max of aspect ratios of objects whose aspect ratio has been\n set)::\n\n sage: P + plot(sqrt(4-x^2),(x,-2,2))\n\n In the following example, both plots produce a circle that looks\n twice as tall as wide::\n\n sage: Q = circle((0,0), 0.5); Q.set_aspect_ratio(2)\n sage: (P + Q).aspect_ratio(); P+Q\n 2.0\n sage: (Q + P).aspect_ratio(); Q+P\n 2.0\n \"\"\"\n if ratio != 'auto' and ratio != 'automatic':\n ratio = float(ratio)\n if ratio <= 0:\n raise ValueError(\"the aspect ratio must be positive or 'automatic'\")\n else:\n ratio = 'automatic'\n self._extra_kwds['aspect_ratio'] = ratio\n\n def aspect_ratio(self):\n \"\"\"\n Get the current aspect ratio, which is the ratio of height to\n width of a unit square, or 'automatic'.\n\n OUTPUT: a positive float (height/width of a unit square), or 'automatic'\n (expand to fill the figure).\n\n EXAMPLES:\n\n The default aspect ratio for a new blank Graphics object is 'automatic'::\n\n sage: P = Graphics()\n sage: P.aspect_ratio()\n 'automatic'\n\n The aspect ratio can be explicitly set different than the object's default::\n\n sage: P = circle((1,1), 1)\n sage: P.aspect_ratio()\n 1.0\n sage: P.set_aspect_ratio(2)\n sage: P.aspect_ratio()\n 2.0\n sage: P.set_aspect_ratio('automatic')\n sage: P.aspect_ratio()\n 'automatic'\n \"\"\"\n return self._extra_kwds.get('aspect_ratio', 'automatic')\n\n def legend(self, show=None):\n r\"\"\"\n Set whether or not the legend is shown by default.\n\n INPUT:\n\n - ``show`` - (default: None) a boolean\n\n If called with no input, return the current legend setting.\n\n EXAMPLES:\n\n By default no legend is displayed::\n\n sage: P = plot(sin)\n sage: P.legend()\n False\n\n But if we put a label then the legend is shown::\n\n sage: P = plot(sin, legend_label='sin')\n sage: P.legend()\n True\n\n We can turn it on or off::\n\n sage: P.legend(False)\n sage: P.legend()\n False\n sage: P.legend(True)\n sage: P # show with the legend\n \"\"\"\n if show is None:\n return self._show_legend\n else:\n self._show_legend = bool(show)\n\n def set_legend_options(self, **kwds):\n r\"\"\"\n Set various legend options.\n\n INPUT:\n\n - ``title`` - (default: None) string, the legend title\n\n - ``ncol`` - (default: 1) positive integer, the number of columns\n\n - ``columnspacing`` - (default: None) the spacing between columns\n\n - ``borderaxespad`` - (default: None) float, length between the axes and the legend\n\n - ``back_color`` - (default: (0.9, 0.9, 0.9)) This parameter can be a string\n denoting a color or an RGB tuple. The string can be a color name\n as in ('red', 'green', 'yellow', ...) or a floating point number\n like '0.8' which gets expanded to (0.8, 0.8, 0.8). The\n tuple form is just a floating point RGB tuple with all values ranging\n from 0 to 1.\n\n - ``handlelength`` - (default: 0.05) float, the length of the legend handles\n\n - ``handletextpad`` - (default: 0.5) float, the pad between the legend handle and text\n\n - ``labelspacing`` - (default: 0.02) float, vertical space between legend entries\n\n - ``loc`` - (default: 'best') May be a string, an integer or a tuple. String or\n integer inputs must be one of the following:\n\n - 0, 'best'\n\n - 1, 'upper right'\n\n - 2, 'upper left'\n\n - 3, 'lower left'\n\n - 4, 'lower right'\n\n - 5, 'right'\n\n - 6, 'center left'\n\n - 7, 'center right'\n\n - 8, 'lower center'\n\n - 9, 'upper center'\n\n - 10, 'center'\n\n - Tuple arguments represent an absolute (x, y) position on the plot\n in axes coordinates (meaning from 0 to 1 in each direction).\n\n - ``markerscale`` - (default: 0.6) float, how much to scale the markers in the legend.\n\n - ``numpoints`` - (default: 2) integer, the number of points in the legend for line\n\n - ``borderpad`` - (default: 0.6) float, the fractional whitespace inside the legend border\n (between 0 and 1)\n\n - ``font_family`` - (default: 'sans-serif') string, one of 'serif', 'sans-serif',\n 'cursive', 'fantasy', 'monospace'\n\n - ``font_style`` - (default: 'normal') string, one of 'normal', 'italic', 'oblique'\n\n - ``font_variant`` - (default: 'normal') string, one of 'normal', 'small-caps'\n\n - ``font_weight`` - (default: 'medium') string, one of 'black', 'extra bold', 'bold',\n 'semibold', 'medium', 'normal', 'light'\n\n - ``font_size`` - (default: 'medium') string, one of 'xx-small', 'x-small', 'small',\n 'medium', 'large', 'x-large', 'xx-large' or an absolute font size (e.g. 12)\n\n - ``shadow`` - (default: False) boolean - draw a shadow behind the legend\n\n - ``fancybox`` - (default: False) a boolean. If True, draws a frame with a round\n fancybox.\n\n These are all keyword arguments.\n\n OUTPUT: a dictionary of all current legend options\n\n EXAMPLES:\n\n By default, no options are set::\n\n sage: p = plot(tan, legend_label='tan')\n sage: p.set_legend_options()\n {}\n\n We build a legend with a shadow::\n\n sage: p.set_legend_options(shadow=True)\n sage: p.set_legend_options()['shadow']\n True\n\n To set the legend position to the center of the plot, all these\n methods are roughly equivalent::\n\n sage: p.set_legend_options(loc='center'); p\n\n ::\n\n sage: p.set_legend_options(loc=10); p\n\n ::\n\n sage: p.set_legend_options(loc=(0.5,0.5)); p # aligns the bottom of the box to the center\n \"\"\"\n if len(kwds) == 0:\n return self._legend_opts\n else:\n self._legend_opts.update(kwds)\n\n\n def get_axes_range(self):\n \"\"\"\n Returns a dictionary of the range of the axes for this graphics\n object. This is fall back to the ranges in get_minmax_data() for\n any value which the user has not explicitly set.\n\n .. warning::\n\n Changing the dictionary returned by this function does not\n change the axes range for this object. To do that, use the\n :meth:`set_axes_range` method.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: list(sorted(L.get_axes_range().items()))\n [('xmax', 3.0), ('xmin', 1.0), ('ymax', 5.0), ('ymin', -4.0)]\n sage: L.set_axes_range(xmin=-1)\n sage: list(sorted(L.get_axes_range().items()))\n [('xmax', 3.0), ('xmin', -1.0), ('ymax', 5.0), ('ymin', -4.0)]\n \"\"\"\n axes_range = self.get_minmax_data()\n axes_range.update(self._get_axes_range_dict())\n return axes_range\n\n def set_axes_range(self, xmin=None, xmax=None, ymin=None, ymax=None):\n \"\"\"\n Set the ranges of the `x` and `y` axes.\n\n INPUT:\n\n\n - ``xmin, xmax, ymin, ymax`` - floats\n\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L.set_axes_range(-1, 20, 0, 2)\n sage: d = L.get_axes_range()\n sage: d['xmin'], d['xmax'], d['ymin'], d['ymax']\n (-1.0, 20.0, 0.0, 2.0)\n \"\"\"\n l = locals()\n axes_range = self._get_axes_range_dict()\n for name in ['xmin', 'xmax', 'ymin', 'ymax']:\n if l[name] is not None:\n axes_range[name] = float(l[name])\n\n axes_range = set_axes_range\n\n def _get_axes_range_dict(self):\n \"\"\"\n Returns the underlying dictionary used to store the user's\n custom ranges for the axes on this object.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L._get_axes_range_dict()\n {}\n sage: L.set_axes_range(xmin=-1)\n sage: L._get_axes_range_dict()\n {'xmin': -1.0}\n \"\"\"\n try:\n return self._axes_range\n except AttributeError:\n self._axes_range = {}\n return self._axes_range\n\n def fontsize(self, s=None):\n \"\"\"\n Set the font size of axes labels and tick marks.\n\n INPUT:\n\n\n - ``s`` - integer, a font size in points.\n\n\n If called with no input, return the current fontsize.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L.fontsize()\n 10\n sage: L.fontsize(20)\n sage: L.fontsize()\n 20\n\n All the numbers on the axes will be very large in this plot::\n\n sage: L\n \"\"\"\n if s is None:\n try:\n return self._fontsize\n except AttributeError:\n self._fontsize = 10\n return self._fontsize\n self._fontsize = int(s)\n\n def axes(self, show=None):\n \"\"\"\n Set whether or not the `x` and `y` axes are shown\n by default.\n\n INPUT:\n\n\n - ``show`` - bool\n\n\n If called with no input, return the current axes setting.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n\n By default the axes are displayed.\n\n ::\n\n sage: L.axes()\n True\n\n But we turn them off, and verify that they are off\n\n ::\n\n sage: L.axes(False)\n sage: L.axes()\n False\n\n Displaying L now shows a triangle but no axes.\n\n ::\n\n sage: L\n \"\"\"\n if show is None:\n try:\n return self._show_axes\n except AttributeError:\n self._show_axes = True\n return self._show_axes\n self._show_axes = bool(show)\n\n def axes_color(self, c=None):\n \"\"\"\n Set the axes color.\n\n If called with no input, return the current axes_color setting.\n\n INPUT:\n\n\n - ``c`` - an RGB color 3-tuple, where each tuple entry\n is a float between 0 and 1\n\n\n EXAMPLES: We create a line, which has like everything a default\n axes color of black.\n\n ::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L.axes_color()\n (0, 0, 0)\n\n We change the axes color to red and verify the change.\n\n ::\n\n sage: L.axes_color((1,0,0))\n sage: L.axes_color()\n (1.0, 0.0, 0.0)\n\n When we display the plot, we'll see a blue triangle and bright red\n axes.\n\n ::\n\n sage: L\n \"\"\"\n if c is None:\n try:\n return self._axes_color\n\n except AttributeError:\n self._axes_color = (0.0, 0.0, 0.0)\n return self._axes_color\n self._axes_color = rgbcolor(c)\n\n def axes_labels(self, l=None):\n \"\"\"\n Set the axes labels.\n\n INPUT:\n\n\n - ``l`` - (default: None) a list of two strings or\n None\n\n\n OUTPUT: a 2-tuple of strings\n\n If l is None, returns the current ``axes_labels``,\n which is itself by default None. The default labels are both\n empty.\n\n EXAMPLES: We create a plot and put x and y axes labels on it.\n\n ::\n\n sage: p = plot(sin(x), (x, 0, 10))\n sage: p.axes_labels(['$x$','$y$'])\n sage: p.axes_labels()\n ('$x$', '$y$')\n\n Now when you plot p, you see x and y axes labels::\n\n sage: p\n\n Notice that some may prefer axes labels which are not\n typeset::\n\n sage: plot(sin(x), (x, 0, 10), axes_labels=['x','y'])\n\n TESTS:\n\n Unicode strings are acceptable; see :trac:`13161`. Note that\n this does not guarantee that matplotlib will handle the strings\n properly, although it should.\n\n ::\n\n sage: c = circle((0,0), 1)\n sage: c.axes_labels(['axe des abscisses', u'axe des ordonnées'])\n sage: c._axes_labels\n ('axe des abscisses', u'axe des ordonn\\xc3\\xa9es')\n\n \"\"\"\n if l is None:\n try:\n return self._axes_labels\n except AttributeError:\n self._axes_labels = None\n return self._axes_labels\n if not isinstance(l, (list, tuple)):\n raise TypeError(\"l must be a list or tuple\")\n if len(l) != 2:\n raise ValueError(\"l must have length 2\")\n self._axes_labels = tuple(l)\n\n def axes_label_color(self, c=None):\n r\"\"\"\n Set the color of the axes labels.\n\n The axes labels are placed at the edge of the x and y axes, and are\n not on by default (use the ``axes_labels`` command to\n set them; see the example below). This function just changes their\n color.\n\n INPUT:\n\n\n - ``c`` - an RGB 3-tuple of numbers between 0 and 1\n\n\n If called with no input, return the current axes_label_color\n setting.\n\n EXAMPLES: We create a plot, which by default has axes label color\n black.\n\n ::\n\n sage: p = plot(sin, (-1,1))\n sage: p.axes_label_color()\n (0, 0, 0)\n\n We change the labels to be red, and confirm this::\n\n sage: p.axes_label_color((1,0,0))\n sage: p.axes_label_color()\n (1.0, 0.0, 0.0)\n\n We set labels, since otherwise we won't see anything.\n\n ::\n\n sage: p.axes_labels(['$x$ axis', '$y$ axis'])\n\n In the plot below, notice that the labels are red::\n\n sage: p\n \"\"\"\n if c is None:\n try:\n return self._axes_label_color\n except AttributeError:\n self._axes_label_color = (0, 0, 0)\n return self._axes_label_color\n self._axes_label_color = rgbcolor(c)\n\n\n def axes_width(self, w=None):\n r\"\"\"\n Set the axes width. Use this to draw a plot with really fat or\n really thin axes.\n\n INPUT:\n\n\n - ``w`` - a float\n\n\n If called with no input, return the current\n ``axes_width`` setting.\n\n EXAMPLE: We create a plot, see the default axes width (with funny\n Python float rounding), then reset the width to 10 (very fat).\n\n ::\n\n sage: p = plot(cos, (-3,3))\n sage: p.axes_width()\n 0.8\n sage: p.axes_width(10)\n sage: p.axes_width()\n 10.0\n\n Finally we plot the result, which is a graph with very fat axes.\n\n ::\n\n sage: p\n \"\"\"\n if w is None:\n try:\n return self._axes_width\n except AttributeError:\n self._axes_width = True\n return self._axes_width\n self._axes_width = float(w)\n\n def tick_label_color(self, c=None):\n \"\"\"\n Set the color of the axes tick labels.\n\n INPUT:\n\n\n - ``c`` - an RGB 3-tuple of numbers between 0 and 1\n\n\n If called with no input, return the current tick_label_color\n setting.\n\n EXAMPLES::\n\n sage: p = plot(cos, (-3,3))\n sage: p.tick_label_color()\n (0, 0, 0)\n sage: p.tick_label_color((1,0,0))\n sage: p.tick_label_color()\n (1.0, 0.0, 0.0)\n sage: p\n \"\"\"\n if c is None:\n try:\n return self._tick_label_color\n except AttributeError:\n self._tick_label_color = (0, 0, 0)\n return self._tick_label_color\n self._tick_label_color = rgbcolor(c)\n\n def _repr_(self):\n r\"\"\"\n Return a string representation of the graphics objects.\n\n OUTPUT:\n\n String.\n\n EXAMPLES:\n\n We create a plot and call :meth:`show` on it, which causes it\n to be displayed as a plot::\n\n sage: P = plot(cos, (-1,1))\n sage: P.show()\n\n Just doing this also displays the plot::\n\n sage: P\n\n Using the Python `repr` or `str` commands do not display the\n plot::\n\n sage: repr(P)\n 'Graphics object consisting of 1 graphics primitive'\n sage: str(P)\n 'Graphics object consisting of 1 graphics primitive'\n sage: print(P)\n Graphics object consisting of 1 graphics primitive\n\n TESTS::\n\n sage: P._repr_()\n 'Graphics object consisting of 1 graphics primitive'\n \"\"\"\n return self.__str__()\n\n def _graphics_(self):\n \"\"\"\n Show graphics.\n\n The presence of this method is used by the displayhook to\n decide that we want to see a graphical output by default.\n\n OUTPUT:\n\n Return ``True`` if graphical output was generated (might not\n be shown in doctest mode), otherwise ``False``.\n\n EXAMPLES::\n\n sage: g = Graphics()\n sage: g._graphics_()\n True\n sage: [g, g]\n [Graphics object consisting of 0 graphics primitives,\n Graphics object consisting of 0 graphics primitives]\n \"\"\"\n self.show()\n return True\n\n def __str__(self):\n r\"\"\"\n Return string representation of this plot.\n\n OUTPUT:\n\n String.\n\n EXAMPLES::\n\n sage: S = circle((0,0), 2); S.__str__()\n 'Graphics object consisting of 1 graphics primitive'\n sage: str(S)\n 'Graphics object consisting of 1 graphics primitive'\n sage: print S\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n s = \"Graphics object consisting of %s graphics primitives\"%(len(self))\n if len(self) == 1:\n s = s[:-1]\n return s\n\n def __getitem__(self, i):\n \"\"\"\n Returns the ith graphics primitive object:\n\n EXAMPLE::\n\n sage: G = circle((1,1),2) + circle((2,2),5); print G\n Graphics object consisting of 2 graphics primitives\n sage: G[1]\n Circle defined by (2.0,2.0) with r=5.0\n \"\"\"\n return self._objects[i]\n\n def __len__(self):\n \"\"\"\n If G is of type Graphics, then len(G) gives the number of distinct\n graphics primitives making up that object.\n\n EXAMPLES::\n\n sage: G = circle((1,1),1) + circle((1,2),1) + circle((1,2),5); print G\n Graphics object consisting of 3 graphics primitives\n sage: len(G)\n 3\n \"\"\"\n return len(self._objects)\n\n def __delitem__(self, i):\n \"\"\"\n If G is of type Graphics, then del(G[i]) removes the ith distinct\n graphic primitive making up that object.\n\n EXAMPLES::\n\n sage: G = circle((1,1),1) + circle((1,2),1) + circle((1,2),5); print G\n Graphics object consisting of 3 graphics primitives\n sage: len(G)\n 3\n sage: del(G[2])\n sage: print G\n Graphics object consisting of 2 graphics primitives\n sage: len(G)\n 2\n \"\"\"\n del self._objects[int(i)]\n\n def __setitem__(self, i, x):\n \"\"\"\n You can replace a GraphicPrimitive (point, line, circle, etc...) in\n a Graphics object G with any other GraphicPrimitive\n\n EXAMPLES::\n\n sage: G = circle((1,1),1) + circle((1,2),1) + circle((1,2),5); print G\n Graphics object consisting of 3 graphics primitives\n\n ::\n\n sage: p = polygon([[1,3],[2,-2],[1,1],[1,3]]); print p\n Graphics object consisting of 1 graphics primitive\n\n ::\n\n sage: G[1] = p[0]\n sage: G # show the plot\n \"\"\"\n from sage.plot.primitive import GraphicPrimitive\n if not isinstance(x, GraphicPrimitive):\n raise TypeError(\"x must be a GraphicPrimitive\")\n self._objects[int(i)] = x\n\n def __radd__(self, other):\n \"\"\"\n Compute and return other + this graphics object.\n\n This only works when other is a Python int equal to 0. In all other\n cases a TypeError is raised. The main reason for this function is\n to make summing a list of graphics objects easier.\n\n EXAMPLES::\n\n sage: S = circle((0,0), 2)\n sage: print int(0) + S\n Graphics object consisting of 1 graphics primitive\n sage: print S + int(0)\n Graphics object consisting of 1 graphics primitive\n\n The following would fail were it not for this function::\n\n sage: v = [circle((0,0), 2), circle((2,3), 1)]\n sage: print sum(v)\n Graphics object consisting of 2 graphics primitives\n \"\"\"\n if isinstance(other, (int, long)) and other == 0:\n return self\n raise TypeError\n\n def __add__(self, other):\n \"\"\"\n If you have any Graphics object G1, you can always add any other\n amount of Graphics objects G2,G3,... to form a new Graphics object:\n G4 = G1 + G2 + G3.\n\n The xmin, xmax, ymin, and ymax properties of the graphics objects\n are expanded to include all objects in both scenes. If the aspect\n ratio property of either or both objects are set, then the larger\n aspect ratio is chosen, with 'automatic' being overridden by a\n numeric aspect ratio.\n\n If one of the graphics object is set to show a legend, then\n the resulting object will also be set to show a legend. Legend\n options are propagated if set. If the same legend option is\n present in both arguments, the latter value is used.\n\n EXAMPLES::\n\n sage: g1 = plot(abs(sqrt(x^3-1)), (x,1,5), frame=True)\n sage: g2 = plot(-abs(sqrt(x^3-1)), (x,1,5), color='red')\n sage: g1 + g2 # displays the plot\n\n TESTS:\n\n Extra keywords to show are propagated::\n\n sage: (g1 + g2)._extra_kwds=={'aspect_ratio': 'automatic', 'frame': True}\n True\n sage: g1.set_aspect_ratio(2)\n sage: (g1+g2).aspect_ratio()\n 2.0\n sage: g2.set_aspect_ratio(3)\n sage: (g1+g2).aspect_ratio()\n 3.0\n\n As are legend options, :trac:`12936`::\n\n sage: p1 = plot(x, x, 0, 1)\n sage: p2 = p1\n sage: p1.set_legend_options(back_color = 'white')\n sage: p2.set_legend_options(shadow = True)\n sage: p3 = p1 + p2\n sage: p3._legend_opts\n {'shadow': True, 'back_color': 'white'}\n\n If the same legend option is specified more than once, the\n latter takes precedence::\n\n sage: p1 = plot(x, x, 0, 1)\n sage: p2 = p1\n sage: p1.set_legend_options(shadow = True)\n sage: p2.set_legend_options(shadow = False)\n sage: p3 = p1 + p2\n sage: p3._legend_opts\n {'shadow': False}\n\n \"\"\"\n if isinstance(other, int) and other == 0:\n return self\n if not isinstance(other, Graphics):\n from sage.plot.plot3d.base import Graphics3d\n if isinstance(other, Graphics3d):\n return self.plot3d() + other\n raise TypeError(\"other (=%s) must be a Graphics objects\"%other)\n g = Graphics()\n g._objects = self._objects + other._objects\n g._show_legend = self._show_legend or other._show_legend\n g._extra_kwds.update(self._extra_kwds)\n g._extra_kwds.update(other._extra_kwds)\n g._legend_colors = self._legend_colors + other._legend_colors\n g._legend_opts.update(self._legend_opts)\n g._legend_opts.update(other._legend_opts)\n if self.aspect_ratio()=='automatic':\n g.set_aspect_ratio(other.aspect_ratio())\n elif other.aspect_ratio()=='automatic':\n g.set_aspect_ratio(self.aspect_ratio())\n else:\n g.set_aspect_ratio(max(self.aspect_ratio(), other.aspect_ratio()))\n return g\n\n def add_primitive(self, primitive):\n \"\"\"\n Adds a primitive to this graphics object.\n\n EXAMPLES:\n\n We give a very explicit example::\n\n sage: G = Graphics()\n sage: from sage.plot.line import Line\n sage: from sage.plot.arrow import Arrow\n sage: L = Line([3,4,2,7,-2],[1,2,e,4,5.],{'alpha':1,'thickness':2,'rgbcolor':(0,1,1),'legend_label':''})\n sage: A = Arrow(2,-5,.1,.2,{'width':3,'head':0,'rgbcolor':(1,0,0),'linestyle':'dashed','zorder':8,'legend_label':''})\n sage: G.add_primitive(L)\n sage: G.add_primitive(A)\n sage: G\n \"\"\"\n self._objects.append(primitive)\n\n def plot(self, *args, **kwds):\n \"\"\"\n Draw a 2D plot of this graphics object, which just returns this\n object since this is already a 2D graphics object.\n\n EXAMPLES::\n\n sage: S = circle((0,0), 2)\n sage: S.plot() is S\n True\n \"\"\"\n return self\n\n def plot3d(self, z=0, **kwds):\n \"\"\"\n Returns an embedding of this 2D plot into the xy-plane of 3D space,\n as a 3D plot object. An optional parameter z can be given to\n specify the z-coordinate.\n\n EXAMPLES::\n\n sage: sum([plot(z*sin(x), 0, 10).plot3d(z) for z in range(6)]) # long time\n \"\"\"\n from sage.plot.plot3d.base import Graphics3dGroup\n g = Graphics3dGroup([g.plot3d(**kwds) for g in self._objects])\n if z:\n g = g.translate(0,0,z)\n return g\n\n @classmethod\n def _extract_kwds_for_show(cls, kwds, ignore=[]):\n \"\"\"\n Extract keywords relevant to show() from the provided dictionary.\n\n EXAMPLES::\n\n sage: kwds = {'f': lambda x: x, 'xmin': 0, 'figsize': [1,1], 'plot_points': (40, 40)}\n sage: G_kwds = Graphics._extract_kwds_for_show(kwds, ignore='xmin')\n sage: kwds # Note how this action modifies the passed dictionary\n {'xmin': 0, 'plot_points': (40, 40), 'f': <function <lambda> at ...>}\n sage: G_kwds\n {'figsize': [1, 1]}\n\n This method is intended to be used with _set_extra_kwds(). Here is an\n idiom to ensure the correct keywords will get passed on to show()::\n\n sage: options = {} # Usually this will come from an argument\n sage: g = Graphics()\n sage: g._set_extra_kwds(Graphics._extract_kwds_for_show(options))\n \"\"\"\n result = {}\n for option in cls.SHOW_OPTIONS:\n if option not in ignore:\n try:\n result[option] = kwds.pop(option)\n except KeyError:\n pass\n return result\n\n def _set_extra_kwds(self, kwds):\n \"\"\"\n Set a dictionary of keywords that will get passed on to show().\n\n TESTS::\n\n sage: g = Graphics()\n sage: g._extra_kwds\n {}\n sage: g._set_extra_kwds({'figsize': [10,10]})\n sage: g._extra_kwds\n {'figsize': [10, 10]}\n sage: g.show() # Now the (blank) plot will be extra large\n \"\"\"\n self._extra_kwds = kwds\n\n def _set_scale(self, figure, scale=None, base=None):\n \"\"\"\n Set the scale of the axes in the current figure. This function is\n only for internal use.\n\n INPUT:\n - ``figure`` -- the matplotlib figure instance.\n - ``scale`` -- the scale of the figure. Values it can take are\n ``\"linear\"``, ``\"loglog\"``, ``\"semilogx\"``, ``\"semilogy\"``. See\n :meth:`show` for other options it can take.\n - ``base`` -- the base of the logarithm if a logarithmic scale is\n set. See :meth:`show` for the options it can take.\n\n OUTPUT:\n The scale in the form of a tuple: (xscale, yscale, basex, basey)\n\n EXAMPLES::\n\n sage: p = plot(x,1,10)\n sage: fig = p.matplotlib()\n sage: p._set_scale(fig, scale='linear', base=2)\n ('linear', 'linear', 10, 10)\n sage: p._set_scale(fig, scale='semilogy', base=2)\n ('linear', 'log', 10, 2)\n sage: p._set_scale(fig, scale=('loglog', 2, 3))\n ('log', 'log', 2, 3)\n sage: p._set_scale(fig, scale=['semilogx', 2])\n ('log', 'linear', 2, 10)\n\n TESTS::\n\n sage: p._set_scale(fig, 'log')\n Traceback (most recent call last):\n ...\n ValueError: The scale must be one of 'linear', 'loglog', 'semilogx' or 'semilogy' -- got 'log'\n sage: p._set_scale(fig, ('loglog', 1))\n Traceback (most recent call last):\n ...\n ValueError: The base of the logarithm must be greater than 1\n \"\"\"\n if scale is None:\n return ('linear', 'linear', 10, 10)\n if isinstance(scale, (list, tuple)):\n if len(scale) != 2 and len(scale) != 3:\n raise ValueError(\"If the input is a tuple, it must be of \"\n \"the form (scale, base) or (scale, basex, basey)\")\n if len(scale) == 2:\n base = scale[1]\n else:\n base = scale[1:]\n scale = scale[0]\n\n if scale not in ('linear', 'loglog', 'semilogx', 'semilogy'):\n raise ValueError(\"The scale must be one of 'linear', 'loglog',\"\n \" 'semilogx' or 'semilogy' -- got '{0}'\".format(scale))\n\n if isinstance(base, (list, tuple)):\n basex, basey = base\n elif base is None:\n basex = basey = 10\n else:\n basex = basey = base\n\n if basex <= 1 or basey <= 1:\n raise ValueError(\"The base of the logarithm must be greater \"\n \"than 1\")\n\n ax = figure.get_axes()[0]\n xscale = yscale = 'linear'\n if scale == 'linear':\n basex = basey = 10\n elif scale == 'loglog':\n ax.set_xscale('log', basex=basex)\n ax.set_yscale('log', basey=basey)\n xscale = yscale = 'log'\n elif scale == 'semilogx':\n ax.set_xscale('log', basex=basex)\n basey = 10\n xscale = 'log'\n elif scale == 'semilogy':\n ax.set_yscale('log', basey=basey)\n basex = 10\n yscale = 'log'\n\n return (xscale, yscale, basex, basey)\n\n\n # This dictionary has the default values for the keywords to show(). When\n # show is invoked with keyword arguments, those arguments are merged with\n # this dictionary to create a set of keywords with the defaults filled in.\n # Then, those keywords are passed on to save().\n\n # NOTE: If you intend to use a new parameter in show(), you should update\n # this dictionary to contain the default value for that parameter.\n\n SHOW_OPTIONS = dict(filename=None,\n # axes options\n axes=None, axes_labels=None, axes_pad=.02,\n base=None, scale=None,\n xmin=None, xmax=None, ymin=None, ymax=None,\n # Figure options\n aspect_ratio=None, dpi=DEFAULT_DPI, fig_tight=True,\n figsize=None, fontsize=None, frame=False,\n title=None, title_pos=None, transparent=False,\n # Grid options\n gridlines=None, gridlinesstyle=None,\n hgridlinesstyle=None, vgridlinesstyle=None,\n # Legend options\n legend_options={}, show_legend=None,\n # Ticks options\n ticks=None, tick_formatter=None, ticks_integer=False,\n # Text options\n typeset='default')\n\n @suboptions('legend',\n back_color=(0.9, 0.9, 0.9), borderpad=0.6,\n borderaxespad=None,\n columnspacing=None,\n fancybox=False, font_family='sans-serif',\n font_size='medium', font_style='normal',\n font_variant='normal', font_weight='medium',\n handlelength=0.05, handletextpad=0.5,\n labelspacing=0.02, loc='best',\n markerscale=0.6, ncol=1, numpoints=2,\n shadow=False, title=None)\n def show(self, **kwds):\n \"\"\"\n Show this graphics image with the default image viewer.\n\n OPTIONAL INPUT:\n\n - ``filename`` - (default: None) string\n\n - ``dpi`` - dots per inch\n\n - ``figsize`` - [width, height]\n\n - ``fig_tight`` - (default: True) whether to clip the drawing\n tightly around drawn objects. If True, then the resulting\n image will usually not have dimensions corresponding to\n ``figsize``. If False, the resulting image will have\n dimensions corresponding to ``figsize``.\n\n - ``aspect_ratio`` - the perceived height divided by the\n perceived width. For example, if the aspect ratio is set to ``1``, circles\n will look round and a unit square will appear to have sides\n of equal length, and if the aspect ratio is set ``2``, vertical units will be\n twice as long as horizontal units, so a unit square will be twice as\n high as it is wide. If set to ``'automatic'``, the aspect ratio\n is determined by ``figsize`` and the picture fills the figure.\n\n - ``axes`` - (default: True)\n\n - ``axes_labels`` - (default: None) list (or tuple) of two\n strings; the first is used as the label for the horizontal\n axis, and the second for the vertical axis.\n\n - ``fontsize`` - (default: current setting -- 10) positive\n integer; used for axes labels; if you make this very large,\n you may have to increase figsize to see all labels.\n\n - ``frame`` - (default: False) draw a frame around the image\n\n - ``gridlines`` - (default: None) can be any of the following:\n\n - None, False: do not add grid lines.\n\n - True, \"automatic\", \"major\": add grid lines at major ticks of the axes.\n\n - \"minor\": add grid at major and minor ticks.\n\n - [xlist,ylist]: a tuple or list containing\n two elements, where xlist (or ylist) can be\n any of the following.\n\n\n - None, False: don't add horizontal (or vertical) lines.\n\n - True, \"automatic\", \"major\": add horizontal (or vertical) grid lines at\n the major ticks of the axes.\n\n - \"minor\": add horizontal (or vertical) grid lines at major and minor ticks of\n axes.\n\n - an iterable yielding numbers n or pairs (n,opts), where n\n is the coordinate of the line and opt is a dictionary of\n MATPLOTLIB options for rendering the line.\n\n\n - ``gridlinesstyle, hgridlinesstyle, vgridlinesstyle`` -\n (default: None) a dictionary of MATPLOTLIB options for the\n rendering of the grid lines, the horizontal grid lines or the\n vertical grid lines, respectively.\n\n - ``linkmode`` - (default: False) If True a string containing a link\n to the produced file is returned.\n\n - ``transparent`` - (default: False) If True, make the background transparent.\n\n - ``axes_pad`` - (default: 0.02) The percentage of the axis\n range that is added to each end of each axis. This helps\n avoid problems like clipping lines because of line-width,\n etc. To get axes that are exactly the specified limits, set\n ``axes_pad`` to zero.\n\n - ``ticks_integer`` - (default: False) guarantee that the ticks\n are integers (the ``ticks`` option, if specified, will\n override this)\n\n - ``ticks`` - A matplotlib locator for the major ticks, or\n a number. There are several options. For more information about\n locators, type ``from matplotlib import ticker`` and then\n ``ticker?``.\n\n - If this is a locator object, then it is the locator for\n the horizontal axis. A value of None means use the default\n locator.\n\n - If it is a list of two locators, then the first is for the\n horizontal axis and one for the vertical axis. A value of\n None means use the default locator (so a value of\n [None, my_locator] uses my_locator for the vertical axis and\n the default for the horizontal axis).\n\n - If in either case above one of the entries is a number `m`\n (something which can be coerced to a float), it will be\n replaced by a MultipleLocator which places major ticks at\n integer multiples of `m`. See examples.\n\n - If in either case above one of the entries is a list of\n numbers, it will be replaced by a FixedLocator which places\n ticks at the locations specified. This includes the case of\n of the empty list, which will give no ticks. See examples.\n\n - ``tick_formatter`` - A matplotlib formatter for the major\n ticks. There are several options. For more information about\n formatters, type ``from matplotlib import ticker`` and then\n ``ticker?``.\n\n If the value of this keyword is a single item, then this will\n give the formatting for the horizontal axis *only* (except for\n the ``\"latex\"`` option). If it is a list or tuple, the first\n is for the horizontal axis, the second for the vertical axis.\n The options are below:\n\n - If one of the entries is a formatter object, then it used.\n A value of None means to use the default locator (so using\n ``tick_formatter=[None, my_formatter]`` uses my_formatter\n for the vertical axis and the default for the horizontal axis).\n\n - If one of the entries is a symbolic constant such as `\\pi`,\n `e`, or `sqrt(2)`, ticks will be formatted nicely at rational\n multiples of this constant.\n\n .. warning::\n\n This should only be used with the ``ticks`` option using nice\n rational multiples of that constant!\n\n - If one of the entries is the string ``\"latex\"``, then the\n formatting will be nice typesetting of the ticks. This is\n intended to be used when the tick locator for at least one of\n the axes is a list including some symbolic elements. This uses\n matplotlib's internal LaTeX rendering engine. If you want to\n use an external LaTeX compiler, then set the keyword option\n ``typeset``. See examples.\n\n - ``title`` - (default: None) The title for the plot\n\n - ``title_pos`` - (default: None) The position of the title for the\n plot. It must be a tuple or a list of two real numbers\n ``(x_pos, y_pos)`` which indicate the relative position of the\n title within the plot. The plot itself can be considered to\n occupy, in relative terms, the region within a unit square\n `[0,1]\\\\times[0,1]`. The title text is centered around the\n horizontal factor ``x_pos`` of the plot. The baseline of the\n title text is present at the vertical factor ``y_pos`` of the\n plot. Hence, ``title_pos=(0.5, 0.5)`` will center the title in\n the plot, whereas ``title_pos=(0.5, 1.1)`` will center the\n title along the horizontal direction, but will place the title\n a fraction `0.1` times above the plot.\n\n - If the first entry is a list of strings (or numbers), then the\n formatting for the horizontal axis will be typeset with the strings\n present in the list. Each entry of the list of strings must be\n provided with a corresponding number in the first entry of\n ``ticks`` to indicate its position on the axis. To typeset the\n strings with ``\"latex\"`` enclose them within ``\"$\"`` symbols. To\n have similar custom formatting of the labels along the vertical\n axis, the second entry must be a list of strings and the second\n entry of ``ticks`` must also be a list of numbers which give the\n positions of the labels. See the examples below.\n\n - ``show_legend`` - (default: None) If True, show the legend\n\n - ``legend_*`` - all the options valid for :meth:`set_legend_options`\n prefixed with ``legend_``\n\n - ``base`` - (default: 10) the base of the logarithm if\n a logarithmic scale is set. This must be greater than 1. The base\n can be also given as a list or tuple ``(basex, basey)``.\n ``basex`` sets the base of the logarithm along the horizontal\n axis and ``basey`` sets the base along the vertical axis.\n\n - ``scale`` -- (default: ``\"linear\"``) string. The scale of the axes.\n Possible values are\n\n - ``\"linear\"`` -- linear scaling of both the axes\n - ``\"loglog\"`` -- sets both the horizontal and vertical axes to\n logarithmic scale\n - ``\"semilogx\"`` -- sets only the horizontal axis to logarithmic\n scale.\n - ``\"semilogy\"`` -- sets only the vertical axis to logarithmic\n scale.\n\n The scale can be also be given as single argument that is a list\n or tuple ``(scale, base)`` or ``(scale, basex, basey)``.\n\n .. note::\n\n - If the ``scale`` is ``\"linear\"``, then irrespective of what\n ``base`` is set to, it will default to 10 and will remain\n unused.\n\n - ``typeset`` -- (default: ``\"default\"``) string. The type of\n font rendering that should be used for the text. The possible\n values are\n\n - ``\"default\"`` -- Uses matplotlib's internal text rendering\n engine called Mathtext ( see\n http://matplotlib.org/users/mathtext.html ). If you have\n modified the default matplotlib settings, for instance via\n a matplotlibrc file, then this option will not change any of\n those settings.\n - ``\"latex\"`` -- LaTeX is used for rendering the fonts. This\n requires LaTeX, dvipng and Ghostscript to be installed.\n - ``\"type1\"`` -- Type 1 fonts are used by matplotlib in the text\n in the figure. This requires LaTeX, dvipng and Ghostscript to\n be installed.\n\n EXAMPLES::\n\n sage: c = circle((1,1), 1, color='red')\n sage: c.show(xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n You could also just make the picture larger by changing ``figsize``::\n\n sage: c.show(figsize=8, xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n You can turn off the drawing of the axes::\n\n sage: show(plot(sin,-4,4), axes=False)\n\n You can also label the axes. Putting something in dollar\n signs formats it as a mathematical expression::\n\n sage: show(plot(sin,-4,4), axes_labels=('$x$','$y$'))\n\n You can add a title to a plot::\n\n sage: show(plot(sin,-4,4), title='A plot of $\\sin(x)$')\n\n You can also provide the position for the title to the plot. In the\n plot below the title is placed on the bottom left of the figure.::\n\n sage: plot(sin, -4, 4, title='Plot sin(x)', title_pos=(0.05,-0.05))\n\n If you want all the text to be rendered by using an external LaTeX\n installation then set the ``typeset`` to ``\"latex\"``. This\n requires that LaTeX, dvipng and Ghostscript be installed::\n\n sage: plot(x, typeset='latex') # optional - latex\n\n If you want all the text in your plot to use Type 1 fonts, then\n set the ``typeset`` option to ``\"type1\"``. This requires that\n LaTeX, dvipng and Ghostscript be installed::\n\n sage: plot(x, typeset='type1') # optional - latex\n\n You can turn on the drawing of a frame around the plots::\n\n sage: show(plot(sin,-4,4), frame=True)\n\n You can make the background transparent::\n\n sage: plot(sin(x), (x, -4, 4), transparent=True)\n\n We can change the scale of the axes in the graphics before\n displaying::\n\n sage: G = plot(exp, 1, 10)\n sage: G.show(scale='semilogy')\n\n We can change the base of the logarithm too. The following changes\n the vertical axis to be on log scale, and with base 2. Note that\n the ``base`` argument will ignore any changes to the axis which is\n in linear scale.::\n\n sage: G.show(scale='semilogy', base=2) # y axis as powers of 2\n\n ::\n\n sage: G.show(scale='semilogy', base=(3,2)) # base ignored for x-axis\n\n The scale can be also given as a 2-tuple or a 3-tuple.::\n\n sage: G.show(scale=('loglog', 2.1)) # both x and y axes in base 2.1\n\n ::\n\n sage: G.show(scale=('loglog', 2, 3)) # x in base 2, y in base 3\n\n The base need not be an integer, though it does have to be made\n a float.::\n\n sage: G.show(scale='semilogx', base=float(e)) # base is e\n\n Logarithmic scale can be used for various kinds of plots. Here are\n some examples.::\n\n sage: G = list_plot(map(lambda i: 10**i, range(10)))\n sage: G.show(scale='semilogy')\n\n ::\n\n sage: G = parametric_plot((x, x**2), (x, 1, 10))\n sage: G.show(scale='loglog')\n\n ::\n\n sage: disk((5,5), 4, (0, 3*pi/2)).show(scale='loglog',base=2)\n\n ::\n\n sage: x, y = var('x, y')\n sage: G = plot_vector_field((2^x,y^2),(x,1,10),(y,1,100))\n sage: G.show(scale='semilogx',base=2)\n\n But be sure to only plot things that will have a wide enough range\n for the logarithmic scale to be interpretable::\n\n sage: G = arc((2,3), 2, 1, angle=pi/2, sector=(0,pi/2))\n sage: G.show(scale=('loglog', 2))\n Traceback (most recent call last):\n ...\n ValueError: Either expand the range of the dependent variable to allow two different integer powers of your `base`, or change your `base` to a smaller number.\n\n Add grid lines at the major ticks of the axes.\n\n ::\n\n sage: c = circle((0,0), 1)\n sage: c.show(gridlines=True)\n sage: c.show(gridlines=\"automatic\")\n sage: c.show(gridlines=\"major\")\n\n Add grid lines at the major and minor ticks of the axes.\n\n ::\n\n sage: u,v = var('u v')\n sage: f = exp(-(u^2+v^2))\n sage: p = plot_vector_field(f.gradient(), (u,-2,2), (v,-2,2))\n sage: p.show(gridlines=\"minor\")\n\n Add only horizontal or vertical grid lines.\n\n ::\n\n sage: p = plot(sin,-10,20)\n sage: p.show(gridlines=[None, \"automatic\"])\n sage: p.show(gridlines=[\"minor\", False])\n\n Add grid lines at specific positions (using lists/tuples).\n\n ::\n\n sage: x, y = var('x, y')\n sage: p = implicit_plot((y^2-x^2)*(x-1)*(2*x-3)-4*(x^2+y^2-2*x)^2, \\\n ....: (x,-2,2), (y,-2,2), plot_points=1000)\n sage: p.show(gridlines=[[1,0],[-1,0,1]])\n\n Add grid lines at specific positions (using iterators).\n\n ::\n\n sage: def maple_leaf(t):\n ....: return (100/(100+(t-pi/2)^8))*(2-sin(7*t)-cos(30*t)/2)\n sage: p = polar_plot(maple_leaf, -pi/4, 3*pi/2, color=\"red\",plot_points=1000) # long time\n sage: p.show(gridlines=( [-3,-2.75,..,3], xrange(-1,5,2) )) # long time\n\n Add grid lines at specific positions (using functions).\n\n ::\n\n sage: y = x^5 + 4*x^4 - 10*x^3 - 40*x^2 + 9*x + 36\n sage: p = plot(y, -4.1, 1.1)\n sage: xlines = lambda a,b: [z for z,m in y.roots()]\n sage: p.show(gridlines=[xlines, [0]], frame=True, axes=False)\n\n Change the style of all the grid lines.\n\n ::\n\n sage: b = bar_chart([-3,5,-6,11], color='red')\n sage: b.show(gridlines=([-1,-0.5,..,4],True),\n ....: gridlinesstyle=dict(color=\"blue\", linestyle=\":\"))\n\n Change the style of the horizontal or vertical grid lines\n separately.\n\n ::\n\n sage: p = polar_plot(2 + 2*cos(x), 0, 2*pi, color=hue(0.3))\n sage: p.show(gridlines=True,\n ....: hgridlinesstyle=dict(color=\"orange\", linewidth=1.0),\n ....: vgridlinesstyle=dict(color=\"blue\", linestyle=\":\"))\n\n Change the style of each grid line individually.\n\n ::\n\n sage: x, y = var('x, y')\n sage: p = implicit_plot((y^2-x^2)*(x-1)*(2*x-3)-4*(x^2+y^2-2*x)^2,\n ....: (x,-2,2), (y,-2,2), plot_points=1000)\n sage: p.show(gridlines=(\n ....: [\n ....: (1,{\"color\":\"red\",\"linestyle\":\":\"}),\n ....: (0,{\"color\":\"blue\",\"linestyle\":\"--\"})\n ....: ],\n ....: [\n ....: (-1,{\"color\":\"red\",\"linestyle\":\":\"}),\n ....: (0,{\"color\":\"blue\",\"linestyle\":\"--\"}),\n ....: (1,{\"color\":\"red\",\"linestyle\":\":\"}),\n ....: ]\n ....: ),\n ....: gridlinesstyle=dict(marker='x',color=\"black\"))\n\n Grid lines can be added to contour plots.\n\n ::\n\n sage: f = sin(x^2 + y^2)*cos(x)*sin(y)\n sage: c = contour_plot(f, (x, -4, 4), (y, -4, 4), plot_points=100)\n sage: c.show(gridlines=True, gridlinesstyle={'linestyle':':','linewidth':1, 'color':'red'})\n\n Grid lines can be added to matrix plots.\n\n ::\n\n sage: M = MatrixSpace(QQ,10).random_element()\n sage: matrix_plot(M).show(gridlines=True)\n\n By default, Sage increases the horizontal and vertical axes\n limits by a certain percentage in all directions. This is\n controlled by the ``axes_pad`` parameter. Increasing the range\n of the axes helps avoid problems with lines and dots being\n clipped because the linewidth extends beyond the axes. To get\n axes limits that are exactly what is specified, set\n ``axes_pad`` to zero. Compare the following two examples\n\n ::\n\n sage: plot(sin(x), (x, -pi, pi),thickness=2)+point((pi, -1), pointsize=15)\n sage: plot(sin(x), (x, -pi, pi),thickness=2,axes_pad=0)+point((pi, -1), pointsize=15)\n\n Via matplotlib, Sage allows setting of custom ticks. See above\n for more details.\n\n Here the labels are not so useful::\n\n sage: plot(sin(pi*x), (x, -8, 8))\n\n Now put ticks at multiples of 2::\n\n sage: plot(sin(pi*x), (x, -8, 8), ticks=2)\n\n Or just choose where you want the ticks::\n\n sage: plot(sin(pi*x), (x, -8, 8), ticks=[[-7,-3,0,3,7],[-1/2,0,1/2]])\n\n Or no ticks at all::\n\n sage: plot(sin(pi*x), (x, -8, 8), ticks=[[],[]])\n\n This can be very helpful in showing certain features of plots. ::\n\n sage: plot(1.5/(1+e^(-x)), (x, -10, 10)) # doesn't quite show value of inflection point\n\n ::\n\n sage: plot(1.5/(1+e^(-x)), (x, -10, 10), ticks=[None, 1.5/4]) # It's right at f(x)=0.75!\n\n But be careful to leave enough room for at least two major ticks, so that\n the user can tell what the scale is::\n\n sage: plot(x^2,(x,1,8),ticks=6).show()\n Traceback (most recent call last):\n ...\n ValueError: Expand the range of the independent variable to\n allow two multiples of your tick locator (option `ticks`).\n\n We can also do custom formatting if you need it. See above for full\n details::\n\n sage: plot(2*x+1,(x,0,5),ticks=[[0,1,e,pi,sqrt(20)],2],tick_formatter=\"latex\")\n\n This is particularly useful when setting custom ticks in multiples\n of `\\pi`.\n\n ::\n\n sage: plot(sin(x),(x,0,2*pi),ticks=pi/3,tick_formatter=pi)\n\n But keep in mind that you will get exactly the formatting you asked\n for if you specify both formatters. The first syntax is recommended\n for best style in that case. ::\n\n sage: plot(arcsin(x),(x,-1,1),ticks=[None,pi/6],tick_formatter=[\"latex\",pi]) # Nice-looking!\n\n ::\n\n sage: plot(arcsin(x),(x,-1,1),ticks=[None,pi/6],tick_formatter=[None,pi]) # Not so nice-looking\n\n Custom tick labels can be provided by providing the keyword\n ``tick_formatter`` with the list of labels, and simultaneously\n providing the keyword ``ticks`` with the positions of the labels. ::\n\n sage: plot(x, (x,0,3), ticks=[[1,2.5],[0.5,1,2]], tick_formatter=[[\"$x_1$\",\"$x_2$\"],[\"$y_1$\",\"$y_2$\",\"$y_3$\"]])\n\n The following sets the custom tick labels only along the horizontal\n axis. ::\n\n sage: plot(x**2, (x,0,2), ticks=[[1,2], None], tick_formatter=[[\"$x_1$\",\"$x_2$\"], None])\n\n If the number of tick labels do not match the number of positions of\n tick labels, then it results in an error.::\n\n sage: plot(x**2, (x,0,2), ticks=[[2], None], tick_formatter=[[\"$x_1$\",\"$x_2$\"], None]).show()\n Traceback (most recent call last):\n ...\n ValueError: If the first component of the list `tick_formatter` is a list then the first component of `ticks` must also be a list of equal length.\n\n When using logarithmic scale along the axis, make sure to have\n enough room for two ticks so that the user can tell what the scale\n is. This can be effected by increasing the range of the independent\n variable, or by changing the ``base``.::\n\n sage: p = list_plot(range(1, 10), plotjoined=True)\n sage: p.show(scale='loglog')\n Traceback (most recent call last):\n ...\n ValueError: Either expand the range of the dependent variable to allow two different integer powers of your `base`, or change your `base` to a smaller number.\n sage: p.show(scale='loglog', base=8) # this works.\n\n When using ``title_pos``, it must be ensured that a list or a tuple\n of length two is used. Otherwise, an error is raised.::\n\n sage; plot(x, -4, 4, title='Plot x', title_pos=0.05)\n Traceback (most recent call last):\n ...\n ValueError: 'title_pos' must be a list or tuple of two real numbers.\n\n \"\"\"\n # This option should not be passed on to save().\n linkmode = kwds.pop('linkmode', False)\n\n if sage.doctest.DOCTEST_MODE:\n kwds.pop('filename', None)\n self.save(DOCTEST_MODE_FILE, **kwds)\n elif sage.plot.plot.EMBEDDED_MODE:\n kwds.setdefault('filename', graphics_filename())\n self.save(**kwds)\n if linkmode == True:\n return \"<img src='cell://%s'>\" % kwds['filename']\n else:\n html(\"<img src='cell://%s'>\" % kwds['filename'])\n else:\n kwds.setdefault('filename', tmp_filename(ext='.png'))\n self.save(**kwds)\n os.system('%s %s 2>/dev/null 1>/dev/null &'\n % (sage.misc.viewer.png_viewer(), kwds['filename']))\n\n def xmin(self, xmin=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.xmin()\n -1.0\n sage: g.xmin(-3)\n sage: g.xmin()\n -3.0\n \"\"\"\n if xmin is None:\n return self.get_axes_range()['xmin']\n else:\n self.set_axes_range(xmin=xmin)\n\n def xmax(self, xmax=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.xmax()\n 3.0\n sage: g.xmax(10)\n sage: g.xmax()\n 10.0\n \"\"\"\n if xmax is None:\n return self.get_axes_range()['xmax']\n else:\n self.set_axes_range(xmax=xmax)\n\n def ymin(self, ymin=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.ymin()\n 1.0\n sage: g.ymin(-3)\n sage: g.ymin()\n -3.0\n \"\"\"\n if ymin is None:\n return self.get_axes_range()['ymin']\n else:\n self.set_axes_range(ymin=ymin)\n\n def ymax(self, ymax=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.ymax()\n 2.0\n sage: g.ymax(10)\n sage: g.ymax()\n 10.0\n \"\"\"\n if ymax is None:\n return self.get_axes_range()['ymax']\n else:\n self.set_axes_range(ymax=ymax)\n\n\n def get_minmax_data(self):\n \"\"\"\n Return a dictionary whose keys give the xmin, xmax, ymin, and ymax\n data for this graphic.\n\n .. warning::\n\n The returned dictionary is mutable, but changing it does\n not change the xmin/xmax/ymin/ymax data. The minmax data is a function\n of the primitives which make up this Graphics object. To change the\n range of the axes, call methods :meth:`xmin`, :meth:`xmax`,\n :meth:`ymin`, :meth:`ymax`, or :meth:`set_axes_range`.\n\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: list(sorted(g.get_minmax_data().items()))\n [('xmax', 3.0), ('xmin', -1.0), ('ymax', 2.0), ('ymin', 1.0)]\n\n Note that changing ymax doesn't change the output of get_minmax_data::\n\n sage: g.ymax(10)\n sage: list(sorted(g.get_minmax_data().items()))\n [('xmax', 3.0), ('xmin', -1.0), ('ymax', 2.0), ('ymin', 1.0)]\n \"\"\"\n objects = self._objects\n if objects:\n minmax_data = [o.get_minmax_data() for o in objects]\n xmin = min(d['xmin'] for d in minmax_data)\n xmax = max(d['xmax'] for d in minmax_data)\n ymin = min(d['ymin'] for d in minmax_data)\n ymax = max(d['ymax'] for d in minmax_data)\n # check for NaN's: weird thing -- only way I know to check if a float\n # is a NaN is to check if it is not equal to itself.\n if xmin!=xmin:\n xmin=0; sage.misc.misc.verbose(\"xmin was NaN (setting to 0)\", level=0)\n if xmax!=xmax:\n xmax=0; sage.misc.misc.verbose(\"xmax was NaN (setting to 0)\", level=0)\n if ymin!=ymin:\n ymin=0; sage.misc.misc.verbose(\"ymin was NaN (setting to 0)\", level=0)\n if ymax!=ymax:\n ymax=0; sage.misc.misc.verbose(\"ymax was NaN (setting to 0)\", level=0)\n else:\n xmin = xmax = ymin = ymax = 0\n\n if xmin == xmax:\n xmin -= 1\n xmax += 1\n if ymin == ymax:\n ymin -= 1\n ymax += 1\n return {'xmin':xmin, 'xmax':xmax, 'ymin':ymin, 'ymax':ymax}\n\n def _matplotlib_tick_formatter(self, subplot, base=(10, 10),\n locator_options={}, scale=('linear', 'linear'),\n tick_formatter=(None, None), ticks=(None, None),\n xmax=None, xmin=None, ymax=None, ymin=None):\n r\"\"\"\n Take a matplotlib subplot instance representing the graphic and set\n the ticks formatting. This function is only for internal use.\n\n INPUT:\n - ``subplot`` -- the subplot instance.\n\n EXAMPLES::\n\n sage: from matplotlib.figure import Figure\n sage: p = plot(x); d = p.get_minmax_data()\n sage: subplot = Figure().add_subplot(111)\n sage: p._objects[0]._render_on_subplot(subplot)\n sage: p._matplotlib_tick_formatter(subplot, **d)\n (<matplotlib.axes.AxesSubplot object at ...>,\n <matplotlib.ticker.MaxNLocator object at ...>,\n <matplotlib.ticker.MaxNLocator object at ...>,\n <matplotlib.ticker.OldScalarFormatter object at ...>,\n <matplotlib.ticker.OldScalarFormatter object at ...>)\n \"\"\"\n # This function is created to refactor some code that is repeated\n # in the matplotlib function\n from matplotlib.ticker import (FixedLocator, Locator,\n LogFormatterMathtext, LogLocator, MaxNLocator,\n MultipleLocator, NullLocator, OldScalarFormatter)\n\n x_locator, y_locator = ticks\n #---------------------- Location of x-ticks ---------------------#\n\n if x_locator is None:\n if scale[0] == 'log':\n x_locator = LogLocator(base=base[0])\n else:\n x_locator = MaxNLocator(**locator_options)\n elif isinstance(x_locator,Locator):\n pass\n elif x_locator == []:\n x_locator = NullLocator()\n elif isinstance(x_locator,list):\n x_locator = FixedLocator(x_locator)\n else: # x_locator is a number which can be made a float\n from sage.functions.other import ceil, floor\n if floor(xmax/x_locator)-ceil(xmin/x_locator)>1:\n x_locator=MultipleLocator(float(x_locator))\n else: # not enough room for two major ticks\n raise ValueError('Expand the range of the independent '\n 'variable to allow two multiples of your tick locator '\n '(option `ticks`).')\n\n #---------------------- Location of y-ticks ---------------------#\n if y_locator is None:\n if scale[1] == 'log':\n y_locator = LogLocator(base=base[1])\n else:\n y_locator = MaxNLocator(**locator_options)\n elif isinstance(y_locator,Locator):\n pass\n elif y_locator == []:\n y_locator = NullLocator()\n elif isinstance(y_locator,list):\n y_locator = FixedLocator(y_locator)\n else: # y_locator is a number which can be made a float\n from sage.functions.other import ceil, floor\n if floor(ymax/y_locator)-ceil(ymin/y_locator)>1:\n y_locator=MultipleLocator(float(y_locator))\n else: # not enough room for two major ticks\n raise ValueError('Expand the range of the dependent '\n 'variable to allow two multiples of your tick locator '\n '(option `ticks`).')\n\n x_formatter, y_formatter = tick_formatter\n from matplotlib.ticker import FuncFormatter, FixedFormatter\n from sage.misc.latex import latex\n from sage.symbolic.ring import SR\n #---------------------- Formatting x-ticks ----------------------#\n if x_formatter is None:\n if scale[0] == 'log':\n x_formatter = LogFormatterMathtext(base=base[0])\n else:\n x_formatter = OldScalarFormatter()\n elif x_formatter in SR:\n from misc import _multiple_of_constant\n x_const = x_formatter\n x_formatter = FuncFormatter(lambda n,pos:\n _multiple_of_constant(n,pos,x_const))\n elif x_formatter == \"latex\":\n if scale[0] == 'log':\n # We need to strip out '\\\\mathdefault' from the string\n x_formatter = FuncFormatter(lambda n,pos:\n LogFormatterMathtext(base=base[0])(n,pos).replace(\n \"\\\\mathdefault\",\"\"))\n else:\n x_formatter = FuncFormatter(lambda n,pos: '$%s$'%latex(n))\n elif isinstance(x_formatter, (list, tuple)):\n if (not isinstance(ticks[0], (list, tuple)) or\n len(ticks[0]) != len(x_formatter)):\n raise ValueError(\"If the first component of the list \"\n \"`tick_formatter` is a list then the first component \"\n \"of `ticks` must also be a list of equal length.\")\n x_formatter = FixedFormatter(x_formatter)\n #---------------------- Formatting y-ticks ----------------------#\n if y_formatter is None:\n if scale[1] == 'log':\n y_formatter = LogFormatterMathtext(base=base[1])\n else:\n y_formatter = OldScalarFormatter()\n elif y_formatter in SR:\n from misc import _multiple_of_constant\n y_const = y_formatter\n y_formatter = FuncFormatter(lambda n,pos:\n _multiple_of_constant(n,pos,y_const))\n elif y_formatter == \"latex\":\n if scale[1] == 'log':\n # We need to strip out '\\\\mathdefault' from the string\n y_formatter = FuncFormatter(lambda n,pos:\n LogFormatterMathtext(base=base[1])(n,pos).replace(\n \"\\\\mathdefault\",\"\"))\n else:\n y_formatter = FuncFormatter(lambda n,pos: '$%s$'%latex(n))\n elif isinstance(y_formatter, (list, tuple)):\n if (not isinstance(ticks[1], (list, tuple)) or\n len(ticks[1]) != len(y_formatter)):\n raise ValueError(\"If the second component of the list \"\n \"`tick_formatter` is a list then the second component \"\n \"of `ticks` must also be a list of equal length.\")\n y_formatter = FixedFormatter(y_formatter)\n\n subplot.xaxis.set_major_locator(x_locator)\n subplot.yaxis.set_major_locator(y_locator)\n subplot.xaxis.set_major_formatter(x_formatter)\n subplot.yaxis.set_major_formatter(y_formatter)\n\n # Check for whether there will be too few ticks in the log scale case\n # If part of the data is nonpositive, we assume there are enough ticks\n if scale[0] == 'log' and xmin > 0:\n import math\n base0 = base[0]\n if (math.floor(math.log(xmax)/math.log(base0)) -\n math.ceil(math.log(xmin)/math.log(base0)) < 1):\n raise ValueError('Either expand the range of the independent '\n 'variable to allow two different integer powers of your `base`, '\n 'or change your `base` to a smaller number.')\n if scale[1] == 'log' and ymin > 0:\n import math\n base1 = base[1]\n if (math.floor(math.log(ymax)/math.log(base1)) -\n math.ceil(math.log(ymin)/math.log(base1)) < 1):\n raise ValueError('Either expand the range of the dependent '\n 'variable to allow two different integer powers of your `base`, '\n 'or change your `base` to a smaller number.')\n\n return (subplot, x_locator, y_locator, x_formatter, y_formatter)\n\n def matplotlib(self, filename=None,\n xmin=None, xmax=None, ymin=None, ymax=None,\n figsize=None, figure=None, sub=None,\n axes=None, axes_labels=None, fontsize=None,\n frame=False, verify=True,\n aspect_ratio = None,\n gridlines=None, gridlinesstyle=None,\n vgridlinesstyle=None, hgridlinesstyle=None,\n show_legend=None, legend_options={},\n axes_pad=0.02, ticks_integer=None,\n tick_formatter=None, ticks=None, title=None,\n title_pos=None, base=None, scale=None,\n typeset='default'):\n r\"\"\"\n Return a matplotlib figure object representing the graphic\n\n EXAMPLES::\n\n sage: c = circle((1,1),1)\n sage: print c.matplotlib()\n Figure(640x480)\n\n To obtain the first matplotlib axes object inside of the\n figure, you can do something like the following.\n\n ::\n\n sage: p=plot(sin(x), (x, -2*pi, 2*pi))\n sage: figure=p.matplotlib()\n sage: axes=figure.axes[0]\n\n For input parameters, see the documentation for the\n :meth:`show` method (this function accepts all except the\n transparent argument).\n\n TESTS:\n\n We verify that :trac:`10291` is fixed::\n\n sage: p = plot(sin(x), (x, -2*pi, 2*pi))\n sage: figure = p.matplotlib()\n sage: axes_range = p.get_axes_range()\n sage: figure = p.matplotlib()\n sage: axes_range2 = p.get_axes_range()\n sage: axes_range == axes_range2\n True\n\n We verify that legend options are properly handled (:trac:`12960`).\n First, we test with no options, and next with an incomplete set of\n options.::\n\n sage: p = plot(x, legend_label='aha')\n sage: p.legend(True)\n sage: pm = p.matplotlib()\n sage: pm = p.matplotlib(legend_options={'font_size':'small'})\n\n The title should not overlap with the axes labels nor the frame in\n the following plot (see :trac:`10512`)::\n\n sage: plot(sin(x^2), (x, -3, 3), title='Plot of sin(x^2)', axes_labels=['x','y'],frame=True)\n\n ``typeset`` must not be set to an arbitrary string::\n\n sage: plot(x, typeset='garbage')\n Traceback (most recent call last):\n ...\n ValueError: typeset must be set to one of 'default', 'latex', or\n 'type1'; got 'garbage'.\n\n We verify that numerical options are changed to float before saving (:trac:`14741`).\n By default, Sage 5.10 changes float objects to the `RealLiteral` type.\n The patch changes them to float before creating `matplotlib` objects.::\n\n sage: f = lambda x, y : (abs(cos((x + I * y) ** 4)) - 1)\n sage: g = implicit_plot(f,(-4, 4),(-3, 3),linewidth=0.6)\n sage: gm = g.matplotlib() # without the patch, this goes BOOM -- er, TypeError\n \"\"\"\n if not isinstance(ticks, (list, tuple)):\n ticks = (ticks, None)\n\n from sage.symbolic.ring import SR\n if not isinstance(tick_formatter, (list, tuple)): # make sure both formatters typeset or both don't\n if tick_formatter == \"latex\" or tick_formatter in SR:\n tick_formatter = (tick_formatter, \"latex\")\n else:\n tick_formatter = (tick_formatter, None)\n\n self.set_axes_range(xmin, xmax, ymin, ymax)\n d = self.get_axes_range()\n xmin = d['xmin']\n xmax = d['xmax']\n ymin = d['ymin']\n ymax = d['ymax']\n\n x_pad=(xmax-xmin)*float(axes_pad)\n y_pad=(ymax-ymin)*float(axes_pad)\n\n xmin-=x_pad\n xmax+=x_pad\n ymin-=y_pad\n ymax+=y_pad\n\n global do_verify\n do_verify = verify\n\n if axes is None:\n axes = self._show_axes\n\n from matplotlib.figure import Figure\n from matplotlib import rcParams\n if typeset == 'type1': # Requires LaTeX, dvipng, gs to be installed.\n rcParams['ps.useafm'] = True\n rcParams['pdf.use14corefonts'] = True\n rcParams['text.usetex'] = True\n elif typeset == 'latex': # Requires LaTeX, dvipng, gs to be installed.\n rcParams['ps.useafm'] = False\n rcParams['pdf.use14corefonts'] = False\n rcParams['text.usetex'] = True\n elif typeset != 'default': # We won't change (maybe user-set) defaults\n raise ValueError(\"typeset must be set to one of 'default', 'latex',\"\n \" or 'type1'; got '{}'.\".format(typeset))\n\n self.fontsize(fontsize)\n self.axes_labels(l=axes_labels)\n\n if figsize is not None and not isinstance(figsize, (list, tuple)):\n default_width, default_height=rcParams['figure.figsize']\n figsize=(figsize, default_height*figsize/default_width)\n\n if figure is None:\n figure=Figure(figsize=figsize)\n\n #the incoming subplot instance\n subplot = sub\n if not subplot:\n subplot = figure.add_subplot(111)\n if aspect_ratio is None:\n aspect_ratio=self.aspect_ratio()\n if aspect_ratio == 'automatic':\n subplot.set_aspect('auto', adjustable='box')\n else:\n subplot.set_aspect(aspect_ratio, adjustable='box')\n #add all the primitives to the subplot\n old_opts = dict()\n for g in self._objects:\n opts, old_opts[g] = g.options(), g.options()\n for k,v in opts.items():\n try:\n if v.parent() in sage.categories.fields.Fields(): opts[k] = float(v)\n except (AttributeError, TypeError): pass\n g.set_options(opts)\n g._render_on_subplot(subplot)\n if hasattr(g, '_bbox_extra_artists'):\n self._bbox_extra_artists.extend(g._bbox_extra_artists)\n\n #--------------------------- Set the scale -----------------------#\n xscale, yscale, basex, basey = self._set_scale(figure, scale=scale,\n base=base)\n\n #-------------------------- Set the legend -----------------------#\n if show_legend is None:\n show_legend = self._show_legend\n\n if show_legend:\n from matplotlib.font_manager import FontProperties\n lopts = dict()\n lopts.update(legend_options)\n lopts.update(self._legend_opts)\n prop = FontProperties(\n family = lopts.pop('font_family', 'sans-serif'),\n size = lopts.pop('font_size', 'medium'),\n style = lopts.pop('font_style', 'normal'),\n weight = lopts.pop('font_weight', 'medium'),\n variant = lopts.pop('font_variant', 'normal')\n )\n color = lopts.pop('back_color', (0.9, 0.9, 0.9))\n leg = subplot.legend(prop=prop, **lopts)\n if leg is None:\n sage.misc.misc.warn(\"legend requested but no items are labeled\")\n else:\n # color\n lframe = leg.get_frame()\n lframe.set_facecolor(color)\n from sage.plot.colors import to_mpl_color\n for txt,color in zip(leg.get_texts(), self._legend_colors):\n if color is not None:\n txt.set_color(to_mpl_color(color))\n\n subplot.set_xlim([xmin, xmax])\n subplot.set_ylim([ymin, ymax])\n\n locator_options=dict(nbins=9,steps=[1,2,5,10],integer=ticks_integer)\n\n if axes is None:\n axes = self._show_axes\n\n for spine in subplot.spines.values():\n spine.set_color(self._axes_color)\n spine.set_linewidth(self._axes_width)\n\n\n if frame:\n # For now, set the formatter to the old one, since that is\n # sort of what we are used to. We should eventually look at\n # the default one to see if we like it better.\n\n (subplot, x_locator, y_locator,\n x_formatter, y_formatter) = self._matplotlib_tick_formatter(\n subplot, base=(basex, basey),\n locator_options=locator_options,\n scale=(xscale, yscale),\n tick_formatter=tick_formatter, ticks=ticks,\n xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin)\n\n subplot.set_frame_on(True)\n if axes and xscale == 'linear' and yscale == 'linear':\n if (ymin<=0 and ymax>=0) or (ymax<=0 and ymin>=0):\n subplot.axhline(color=self._axes_color,\n linewidth=self._axes_width)\n if (xmin<=0 and xmax>=0) or (xmax<=0 and xmin>=0):\n subplot.axvline(color=self._axes_color,\n linewidth=self._axes_width)\n\n elif axes:\n ymiddle=False\n xmiddle=False\n # Note that the user may specify a custom xmin and xmax which\n # flips the axis horizontally. Hence we need to check for both\n # the possibilities in the if statements below. Similar\n # comments hold for ymin and ymax.\n if xscale == 'log':\n if xmax > xmin:\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_position(('outward',10))\n subplot.yaxis.set_ticks_position('left')\n subplot.yaxis.set_label_position('left')\n yaxis='left'\n elif xmax < xmin:\n subplot.spines['left'].set_visible(False)\n subplot.spines['right'].set_position(('outward',10))\n subplot.yaxis.set_ticks_position('right')\n subplot.yaxis.set_label_position('right')\n yaxis='right'\n elif (xmin > 0 and xmax > xmin) or (xmax > 0 and xmin > xmax):\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_position(('outward',10))\n subplot.yaxis.set_ticks_position('left')\n subplot.yaxis.set_label_position('left')\n yaxis='left'\n elif (xmax < 0 and xmax > xmin) or (xmin < 0 and xmin > xmax):\n subplot.spines['left'].set_visible(False)\n subplot.spines['right'].set_position(('outward',10))\n subplot.yaxis.set_ticks_position('right')\n subplot.yaxis.set_label_position('right')\n yaxis='right'\n else:\n subplot.spines['left'].set_position('zero')\n subplot.yaxis.set_ticks_position('left')\n subplot.yaxis.set_label_position('left')\n subplot.spines['right'].set_visible(False)\n ymiddle=True\n yaxis='left'\n\n if yscale == 'log':\n if ymax > ymin:\n subplot.spines['top'].set_visible(False)\n subplot.spines['bottom'].set_position(('outward',10))\n subplot.xaxis.set_ticks_position('bottom')\n subplot.xaxis.set_label_position('bottom')\n xaxis='bottom'\n elif ymax < ymin:\n subplot.spines['bottom'].set_visible(False)\n subplot.spines['top'].set_position(('outward',10))\n subplot.xaxis.set_ticks_position('top')\n subplot.xaxis.set_label_position('top')\n xaxis='top'\n elif (ymin > 0 and ymax > ymin) or (ymax > 0 and ymin > ymax):\n subplot.spines['top'].set_visible(False)\n subplot.spines['bottom'].set_position(('outward',10))\n subplot.xaxis.set_ticks_position('bottom')\n subplot.xaxis.set_label_position('bottom')\n xaxis='bottom'\n elif (ymax < 0 and ymax > ymin) or (ymin < 0 and ymin > ymax):\n subplot.spines['bottom'].set_visible(False)\n subplot.spines['top'].set_position(('outward',10))\n subplot.xaxis.set_ticks_position('top')\n subplot.xaxis.set_label_position('top')\n xaxis='top'\n else:\n subplot.spines['bottom'].set_position('zero')\n subplot.xaxis.set_ticks_position('bottom')\n subplot.xaxis.set_label_position('bottom')\n subplot.spines['top'].set_visible(False)\n xmiddle=True\n xaxis='bottom'\n\n # For now, set the formatter to the old one, since that is\n # sort of what we are used to. We should eventually look at\n # the default one to see if we like it better.\n\n (subplot, x_locator, y_locator,\n x_formatter, y_formatter) = self._matplotlib_tick_formatter(\n subplot, base=(basex, basey),\n locator_options=locator_options,\n scale=(xscale, yscale),\n tick_formatter=tick_formatter, ticks=ticks,\n xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin)\n\n # Make ticklines go on both sides of the axes\n # if xmiddle:\n # for t in subplot.xaxis.get_majorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(8)\n # for t in subplot.xaxis.get_minorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(4)\n\n # if ymiddle:\n # for t in subplot.yaxis.get_majorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(8)\n # for t in subplot.yaxis.get_minorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(4)\n\n # Make the zero tick labels disappear if the axes cross\n # inside the picture, but only if log scale is not used\n if (xmiddle and ymiddle and xscale == 'linear' and\n yscale == 'linear'):\n from sage.plot.plot import SelectiveFormatter\n subplot.yaxis.set_major_formatter(SelectiveFormatter(\n subplot.yaxis.get_major_formatter(), skip_values=[0]))\n subplot.xaxis.set_major_formatter(SelectiveFormatter(\n subplot.xaxis.get_major_formatter(), skip_values=[0]))\n\n else:\n for spine in subplot.spines.values():\n spine.set_visible(False)\n from matplotlib.ticker import NullFormatter, NullLocator\n subplot.xaxis.set_major_formatter(NullFormatter())\n subplot.yaxis.set_major_formatter(NullFormatter())\n subplot.xaxis.set_major_locator(NullLocator())\n subplot.yaxis.set_major_locator(NullLocator())\n\n if frame or axes:\n # Make minor tickmarks, unless we specify fixed ticks or no ticks\n # We do this change only on linear scale, otherwise matplotlib\n # errors out with a memory error.\n from matplotlib.ticker import (AutoMinorLocator, FixedLocator,\n LogLocator, NullLocator)\n if isinstance(x_locator, (NullLocator, FixedLocator)):\n subplot.xaxis.set_minor_locator(NullLocator())\n elif xscale == 'linear':\n subplot.xaxis.set_minor_locator(AutoMinorLocator())\n else: # log scale\n from sage.misc.misc import srange\n base_inv = 1.0/basex\n subs = map(float, srange(2*base_inv, 1, base_inv))\n subplot.xaxis.set_minor_locator(LogLocator(base=basex,\n subs=subs))\n if isinstance(y_locator, (NullLocator, FixedLocator)):\n subplot.yaxis.set_minor_locator(NullLocator())\n elif yscale == 'linear':\n subplot.yaxis.set_minor_locator(AutoMinorLocator())\n else: # log scale\n from sage.misc.misc import srange\n base_inv = 1.0/basey\n subs = map(float, srange(2*base_inv, 1, base_inv))\n subplot.yaxis.set_minor_locator(LogLocator(base=basey,\n subs=subs))\n\n # Set the color and fontsize of ticks\n figure.get_axes()[0].tick_params(color=self._axes_color,\n labelcolor=self._tick_label_color,\n labelsize=self._fontsize, which='both')\n\n\n if gridlines is not None:\n if isinstance(gridlines, (list, tuple)):\n vgridlines,hgridlines=gridlines\n else:\n hgridlines=gridlines\n vgridlines=gridlines\n\n if gridlinesstyle is None:\n # Set up the default grid style\n gridlinesstyle=dict(color='black',linestyle=':',linewidth=0.5)\n\n vgridstyle=gridlinesstyle.copy()\n if vgridlinesstyle is not None:\n vgridstyle.update(vgridlinesstyle)\n\n hgridstyle=gridlinesstyle.copy()\n if hgridlinesstyle is not None:\n hgridstyle.update(hgridlinesstyle)\n\n if hgridlines=='minor':\n hgridstyle['which']='both'\n if vgridlines=='minor':\n vgridstyle['which']='both'\n\n if hasattr(hgridlines, '__iter__'):\n hlines=iter(hgridlines)\n hgridstyle.pop(\"minor\",None)\n for hline in hlines:\n if isinstance(hline, (list, tuple)):\n hl, style=hline\n st=hgridstyle.copy()\n st.update(style)\n else:\n hl=hline\n st=hgridstyle\n subplot.axhline(hl,**st)\n else:\n if hgridlines not in (None, False):\n subplot.yaxis.grid(True, **hgridstyle)\n\n if hasattr(vgridlines, '__iter__'):\n vlines=iter(vgridlines)\n vgridstyle.pop(\"minor\",None)\n for vline in vlines:\n if isinstance(vline, (list, tuple)):\n vl, style=vline\n st=vgridstyle.copy()\n st.update(style)\n else:\n vl=vline\n st=vgridstyle\n subplot.axvline(vl,**st)\n else:\n if vgridlines not in (None, False):\n subplot.xaxis.grid(True, **vgridstyle)\n\n\n\n if self._axes_labels is not None:\n label_options={}\n label_options['color']=self._axes_label_color\n label_options['size']=self._fontsize\n subplot.set_xlabel(self._axes_labels[0], **label_options)\n subplot.set_ylabel(self._axes_labels[1], **label_options)\n\n\n if axes is True and frame is False:\n # We set the label positions according to where we are\n # drawing the axes.\n if xaxis=='bottom':\n yaxis_labely=subplot.get_ylim()[1]\n yaxis_labeloffset=8\n yaxis_vert='bottom'\n xaxis_labely=0\n xaxis_vert='baseline'\n else:\n yaxis_labely=subplot.get_ylim()[0]\n yaxis_labeloffset=-8\n yaxis_vert='top'\n xaxis_labely=1\n xaxis_vert='top'\n\n if yaxis=='left':\n xaxis_labelx=subplot.get_xlim()[1]\n xaxis_labeloffset=8\n xaxis_horiz='left'\n yaxis_labelx=0\n else:\n xaxis_labelx=subplot.get_xlim()[0]\n xaxis_labeloffset=-8\n xaxis_horiz='right'\n yaxis_labelx=1\n\n from matplotlib.transforms import offset_copy\n xlabel=subplot.xaxis.get_label()\n xlabel.set_horizontalalignment(xaxis_horiz)\n xlabel.set_verticalalignment(xaxis_vert)\n trans=subplot.spines[xaxis].get_transform()\n labeltrans=offset_copy(trans, figure, x=xaxis_labeloffset,\n y=0, units='points')\n subplot.xaxis.set_label_coords(x=xaxis_labelx,\n y=xaxis_labely, transform=labeltrans)\n\n ylabel=subplot.yaxis.get_label()\n ylabel.set_horizontalalignment('center')\n ylabel.set_verticalalignment(yaxis_vert)\n ylabel.set_rotation('horizontal')\n trans=subplot.spines[yaxis].get_transform()\n labeltrans=offset_copy(trans, figure, x=0,\n y=yaxis_labeloffset, units='points')\n subplot.yaxis.set_label_coords(x=yaxis_labelx,\n y=yaxis_labely, transform=labeltrans)\n\n # This option makes the xlim and ylim limits not take effect\n # todo: figure out which limits were specified, and let the\n # free limits autoscale\n #subplot.autoscale_view(tight=True)\n if title is not None:\n if title_pos is not None:\n if ((not isinstance(title_pos, (list, tuple)))\n or (len(title_pos) != 2)):\n raise ValueError(\"'title_pos' must be a list or tuple \"\n \"of two real numbers.\")\n title_pos = (float(title_pos[0]), float(title_pos[1]))\n\n if (frame) or (axes_labels is None):\n if title_pos is not None:\n subplot.set_title(title, fontsize=fontsize,\n position=title_pos)\n else:\n subplot.set_title(title, fontsize=fontsize)\n else: # frame is false axes is not None, and neither is axes_labels\n # Then, the title is moved up to avoid overlap with axes labels\n if title_pos is None:\n title_pos = (0.5, 1.05)\n subplot.set_title(title, fontsize=fontsize, position=title_pos)\n\n for g in self._objects:\n g.set_options(old_opts[g])\n\n return figure\n\n def save_image(self, filename=None, *args, **kwds):\n r\"\"\"\n Save an image representation of self. The image type is\n determined by the extension of the filename. For example,\n this could be ``.png``, ``.jpg``, ``.gif``, ``.pdf``,\n ``.svg``. Currently this is implemented by calling the\n :meth:`save` method of self, passing along all arguments and\n keywords.\n\n .. Note::\n\n Not all image types are necessarily implemented for all\n graphics types. See :meth:`save` for more details.\n\n EXAMPLES::\n\n sage: c = circle((1,1), 1, color='red')\n sage: filename = os.path.join(SAGE_TMP, 'test.png')\n sage: c.save_image(filename, xmin=-1, xmax=3, ymin=-1, ymax=3)\n \"\"\"\n self.save(filename, *args, **kwds)\n\n\n # ALLOWED_EXTENSIONS is the list of recognized formats.\n # filename argument is written explicitly so that it can be used as a\n # positional one, which is a very likely usage for this function.\n @suboptions('legend',\n back_color=(0.9, 0.9, 0.9), borderpad=0.6,\n borderaxespad=None,\n columnspacing=None,\n fancybox=False, font_family='sans-serif',\n font_size='medium', font_style='normal',\n font_variant='normal', font_weight='medium',\n handlelength=0.05, handletextpad=0.5,\n labelspacing=0.02, loc='best',\n markerscale=0.6, ncol=1, numpoints=2,\n shadow=False, title=None)\n def save(self, filename=None, **kwds):\n r\"\"\"\n Save the graphics to an image file.\n\n INPUT:\n\n - ``filename`` -- a string (default: autogenerated), the filename and\n the image format given by the extension, which can be one of the\n following:\n\n * ``.eps``,\n\n * ``.pdf``,\n\n * ``.png``,\n\n * ``.ps``,\n\n * ``.sobj`` (for a Sage object you can load later),\n\n * ``.svg``,\n\n * empty extension will be treated as ``.sobj``.\n\n All other keyword arguments will be passed to the plotter.\n\n OUTPUT:\n\n - none.\n\n EXAMPLES::\n\n sage: c = circle((1,1), 1, color='red')\n sage: filename = os.path.join(SAGE_TMP, 'test.png')\n sage: c.save(filename, xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n To make a figure bigger or smaller, use ``figsize``::\n\n sage: c.save(filename, figsize=5, xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n By default, the figure grows to include all of the graphics and text,\n so the final image may not be exactly the figure size you specified.\n If you want a figure to be exactly a certain size, specify the keyword\n ``fig_tight=False``::\n\n sage: c.save(filename, figsize=[8,4], fig_tight=False,\n ....: xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n You can also pass extra options to the plot command instead of this\n method, e.g. ::\n\n sage: plot(x^2 - 5, (x, 0, 5), ymin=0).save(tmp_filename(ext='.png'))\n\n will save the same plot as the one shown by this command::\n\n sage: plot(x^2 - 5, (x, 0, 5), ymin=0)\n\n (This test verifies that :trac:`8632` is fixed.)\n\n TESTS:\n\n Legend labels should save correctly::\n\n sage: P = plot(x,(x,0,1),legend_label='$xyz$')\n sage: P.set_legend_options(back_color=(1,0,0))\n sage: P.set_legend_options(loc=7)\n sage: filename=os.path.join(SAGE_TMP, 'test.png')\n sage: P.save(filename)\n\n This plot should save with the frame shown, showing :trac:`7524`\n is fixed (same issue as :trac:`7981` and :trac:`8632`)::\n\n sage: var('x,y')\n (x, y)\n sage: a = plot_vector_field((x,-y),(x,-1,1),(y,-1,1))\n sage: filename=os.path.join(SAGE_TMP, 'test2.png')\n sage: a.save(filename)\n \"\"\"\n options = dict()\n options.update(self.SHOW_OPTIONS)\n options.update(self._extra_kwds)\n options.update(kwds)\n dpi = options.pop('dpi')\n transparent = options.pop('transparent')\n fig_tight = options.pop('fig_tight')\n\n if filename is None:\n filename = options.pop('filename')\n if filename is None:\n filename = graphics_filename()\n ext = os.path.splitext(filename)[1].lower()\n\n if ext not in ALLOWED_EXTENSIONS:\n raise ValueError(\"allowed file extensions for images are '\"\n + \"', '\".join(ALLOWED_EXTENSIONS) + \"'!\")\n elif ext in ['', '.sobj']:\n SageObject.save(self, filename)\n else:\n from matplotlib import rcParams\n rc_backup = (rcParams['ps.useafm'], rcParams['pdf.use14corefonts'],\n rcParams['text.usetex']) # save the rcParams\n figure = self.matplotlib(**options)\n # You can output in PNG, PS, EPS, PDF, or SVG format, depending\n # on the file extension.\n # matplotlib looks at the file extension to see what the renderer should be.\n # The default is FigureCanvasAgg for PNG's because this is by far the most\n # common type of files rendered, like in the notebook, for example.\n # if the file extension is not '.png', then matplotlib will handle it.\n from matplotlib.backends.backend_agg import FigureCanvasAgg\n figure.set_canvas(FigureCanvasAgg(figure))\n # this messes up the aspect ratio!\n #figure.canvas.mpl_connect('draw_event', pad_for_tick_labels)\n\n # tight_layout adjusts the *subplot* parameters so ticks aren't cut off, etc.\n figure.tight_layout()\n\n if fig_tight is True:\n figure.savefig(filename, dpi=dpi, bbox_inches='tight',\n bbox_extra_artists=self._bbox_extra_artists,\n transparent=transparent)\n else:\n figure.savefig(filename, dpi=dpi,\n transparent=transparent)\n\n # Restore the rcParams to the original, possibly user-set values\n (rcParams['ps.useafm'], rcParams['pdf.use14corefonts'],\n rcParams['text.usetex']) = rc_backup\n\n def description(self):\n r\"\"\"\n Print a textual description to stdout.\n\n This method is mostly used for doctests.\n\n EXAMPLES::\n\n sage: print polytopes.n_cube(2).plot().description()\n Polygon defined by 4 points: [(1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)]\n Line defined by 2 points: [(-1.0, -1.0), (-1.0, 1.0)]\n Line defined by 2 points: [(-1.0, -1.0), (1.0, -1.0)]\n Line defined by 2 points: [(-1.0, 1.0), (1.0, 1.0)]\n Line defined by 2 points: [(1.0, -1.0), (1.0, 1.0)]\n Point set defined by 4 point(s): [(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]\n \"\"\"\n data = []\n for g in self:\n g_zorder = g.options().get('zorder', 0)\n if hasattr(g, 'xdata'):\n g_str = '{0}:\\t{1}'.format(g, zip(g.xdata, g.ydata))\n else:\n g_str = repr(g)\n data.append([g_zorder, g_str, g])\n data.sort()\n return '\\n'.join(g[1] for g in data)\n\nclass GraphicsArray(SageObject):\n \"\"\"\n GraphicsArray takes a (`m` x `n`) list of lists of\n graphics objects and plots them all on one canvas.\n \"\"\"\n def __init__(self, array):\n \"\"\"\n Constructor for ``GraphicsArray`` class. Normally used only\n via :func:`graphics_array` function.\n\n INPUT: a list or list of lists/tuples, all of which are graphics objects\n\n EXAMPLES::\n\n sage: L = [plot(sin(k*x),(x,-pi,pi)) for k in range(10)]\n sage: G = graphics_array(L)\n sage: G.ncols()\n 10\n sage: M = [[plot(x^2)],[plot(x^3)]]\n sage: H = graphics_array(M)\n sage: str(H[1])\n 'Graphics object consisting of 1 graphics primitive'\n\n TESTS::\n\n sage: L = [[plot(sin),plot(cos)],[plot(tan)]]\n sage: graphics_array(L)\n Traceback (most recent call last):\n ...\n TypeError: array (=[[Graphics object consisting of 1 graphics primitive, Graphics object consisting of 1 graphics primitive], [Graphics object consisting of 1 graphics primitive]]) must be a list of lists of Graphics objects\n sage: G = plot(x,(x,0,1))\n sage: graphics_array(G)\n Traceback (most recent call last):\n ...\n TypeError: array (=Graphics object consisting of 1 graphics primitive) must be a list of lists of Graphics objects\n sage: G = [[plot(x,(x,0,1)),x]]\n sage: graphics_array(G)\n Traceback (most recent call last):\n ...\n TypeError: every element of array must be a Graphics object\n \"\"\"\n if not isinstance(array, (list, tuple)):\n raise TypeError(\"array (=%s) must be a list of lists of Graphics objects\"%(array))\n array = list(array)\n self._glist = []\n self._rows = len(array)\n if self._rows > 0:\n if not isinstance(array[0], (list, tuple)):\n array = [array]\n self._rows = 1\n self._cols = len(array[0])\n else:\n self._cols = 0\n self._dims = self._rows*self._cols\n for row in array: #basically flatten the list\n if not isinstance(row, (list, tuple)) or len(row) != self._cols:\n raise TypeError(\"array (=%s) must be a list of lists of Graphics objects\"%(array))\n for g in row:\n if not isinstance(g, Graphics):\n raise TypeError(\"every element of array must be a Graphics object\")\n self._glist.append(g)\n self._figsize = None\n\n def _repr_(self):\n \"\"\"\n Representation of the graphics array.\n\n EXAMPLES::\n\n sage: R = rainbow(6)\n sage: L = [plot(x^n,(x,0,1),color=R[n]) for n in range(6)]\n sage: graphics_array(L,2,3)\n \"\"\"\n return self.__str__()\n\n def _graphics_(self):\n \"\"\"\n Show graphics.\n\n The presence of this method is used by the displayhook to\n decide that we want to see a graphical output by default.\n\n OUTPUT:\n\n Return ``True`` if graphical output was generated (might not\n be shown in doctest mode), otherwise ``False``.\n\n EXAMPLES::\n\n sage: from sage.plot.graphics import GraphicsArray\n sage: g = GraphicsArray([])\n sage: g._graphics_()\n True\n \"\"\"\n self.show()\n return True\n\n def __str__(self):\n \"\"\"\n String representation of the graphics array.\n\n EXAMPLES::\n\n sage: R = rainbow(6)\n sage: L = [plot(x^n,(x,0,1),color=R[n]) for n in range(6)]\n sage: G = graphics_array(L,2,3)\n sage: G.__str__()\n 'Graphics Array of size 2 x 3'\n sage: str(G)\n 'Graphics Array of size 2 x 3'\n \"\"\"\n return \"Graphics Array of size %s x %s\"%(self._rows, self._cols)\n\n def nrows(self):\n \"\"\"\n Number of rows of the graphics array.\n\n EXAMPLES::\n\n sage: R = rainbow(6)\n sage: L = [plot(x^n,(x,0,1),color=R[n]) for n in range(6)]\n sage: G = graphics_array(L,2,3)\n sage: G.nrows()\n 2\n sage: graphics_array(L).nrows()\n 1\n \"\"\"\n return self._rows\n\n def ncols(self):\n \"\"\"\n Number of columns of the graphics array.\n\n EXAMPLES::\n\n sage: R = rainbow(6)\n sage: L = [plot(x^n,(x,0,1),color=R[n]) for n in range(6)]\n sage: G = graphics_array(L,2,3)\n sage: G.ncols()\n 3\n sage: graphics_array(L).ncols()\n 6\n \"\"\"\n return self._cols\n\n def __getitem__(self, i):\n \"\"\"\n Return the ``i``th element of the list of graphics\n in the (flattened) array.\n\n EXAMPLES:\n\n We can access and view individual plots::\n\n sage: M = [[plot(x^2)],[plot(x^3)]]\n sage: H = graphics_array(M)\n sage: H[1]\n\n They can also be represented::\n\n sage: str(H[1])\n 'Graphics object consisting of 1 graphics primitive'\n\n Another example::\n\n sage: L = [plot(sin(k*x),(x,-pi,pi))+circle((k,k),1,color='red') for k in range(10)]\n sage: G = graphics_array(L,5,2)\n sage: str(G[3])\n 'Graphics object consisting of 2 graphics primitives'\n sage: G[3]\n \"\"\"\n i = int(i)\n return self._glist[i]\n\n def __setitem__(self, i, g):\n \"\"\"\n Set the ``i``th element of the list of graphics\n in the (flattened) array.\n\n EXAMPLES::\n\n sage: M = [[plot(x^2)],[plot(x^3)]]\n sage: H = graphics_array(M)\n sage: str(H[1])\n 'Graphics object consisting of 1 graphics primitive'\n\n We can check this is one primitive::\n\n sage: H[1] # the plot of x^3\n\n Now we change it::\n\n sage: H[1] = circle((1,1),2)+points([(1,2),(3,2),(5,5)],color='purple')\n sage: str(H[1])\n 'Graphics object consisting of 2 graphics primitives'\n\n And we visually check that it's different::\n\n sage: H[1] # a circle and some purple points\n \"\"\"\n i = int(i)\n self._glist[i] = g\n\n def _set_figsize_(self, ls):\n \"\"\"\n Set the figsize of all plots in the array.\n\n This is normally only used via the ``figsize`` keyword in\n :meth:`save` or :meth:`show`.\n\n EXAMPLES::\n\n sage: L = [plot(sin(k*x),(x,-pi,pi)) for k in [1..3]]\n sage: G = graphics_array(L)\n sage: G.show(figsize=[5,3]) # smallish and compact\n\n ::\n\n sage: G.show(figsize=[10,20]) # bigger and tall and thin; long time (2s on sage.math, 2012)\n\n ::\n\n sage: G.show(figsize=8) # figure as a whole is a square\n \"\"\"\n # if just one number is passed in for figsize, as documented\n if not isinstance(ls,list):\n ls = [ls,ls]\n # now the list is a list\n m = int(ls[0])\n n = int(ls[1])\n self._figsize = [m,n]\n\n def __len__(self):\n \"\"\"\n Total number of elements of the graphics array.\n\n EXAMPLES::\n\n sage: R = rainbow(6)\n sage: L = [plot(x^n,(x,0,1),color=R[n]) for n in range(6)]\n sage: G = graphics_array(L,2,3)\n sage: G.ncols()\n 3\n sage: graphics_array(L).ncols()\n 6\n \"\"\"\n return len(self._glist)\n\n# This does not work, and can never have worked!\n# To make this work, one would also change the\n# dimensions of the array, but it's not clear there\n# is a canonical way to do this.\n#\n# def append(self, g):\n# \"\"\"\n# Appends a graphic to the array.\n# \"\"\"\n# self._glist.append(g)\n\n def append(self, g):\n \"\"\"\n Appends a graphic to the array. Currently\n not implemented.\n\n TESTS::\n\n sage: from sage.plot.graphics import GraphicsArray\n sage: G = GraphicsArray([plot(sin),plot(cos)])\n sage: G.append(plot(tan))\n Traceback (most recent call last):\n ...\n NotImplementedError: Appending to a graphics array is not yet implemented\n \"\"\"\n raise NotImplementedError('Appending to a graphics array is not yet implemented')\n\n\n def _render(self, filename, dpi=None, figsize=None, axes=None, **args):\n r\"\"\"\n ``_render`` loops over all graphics objects in the array\n and adds them to the subplot. This is only used internally\n when the plot is actually saved or shown.\n\n EXAMPLES::\n\n sage: graphics_array([[plot(sin), plot(cos)], [plot(tan), plot(sec)]])\n\n TESTS::\n\n sage: graphics_array([])\n \"\"\"\n #glist is a list of Graphics objects:\n glist = self._glist\n rows = self._rows\n cols = self._cols\n dims = self._dims\n if rows == 0 or cols == 0:\n glist = [Graphics()]\n rows = cols = dims = 1\n #make a blank matplotlib Figure:\n from matplotlib.figure import Figure\n figure = Figure(figsize)\n global do_verify\n do_verify = True\n for i,g in zip(range(1, dims+1), glist):\n subplot = figure.add_subplot(rows, cols, i)\n g.matplotlib(filename, figure=figure, sub=subplot,\n verify=do_verify, axes = axes, **args)\n g.save(filename, dpi=dpi, figure=figure, sub=subplot,\n verify=do_verify, axes = axes, **args)\n\n def save_image(self, filename=None, *args, **kwds):\n r\"\"\"\n Save an image representation of self. The image type is\n determined by the extension of the filename. For example,\n this could be ``.png``, ``.jpg``, ``.gif``, ``.pdf``,\n ``.svg``. Currently this is implemented by calling the\n :meth:`save` method of self, passing along all arguments and\n keywords.\n\n .. Note::\n\n Not all image types are necessarily implemented for all\n graphics types. See :meth:`save` for more details.\n\n EXAMPLES::\n\n sage: plots = [[plot(m*cos(x + n*pi/4), (x,0, 2*pi)) for n in range(3)] for m in range(1,3)]\n sage: G = graphics_array(plots)\n sage: G.save_image(tmp_filename()+'.png')\n \"\"\"\n self.save(filename, *args, **kwds)\n\n def save(self, filename=None, dpi=DEFAULT_DPI, figsize=None,\n axes = None, **args):\n \"\"\"\n Save the ``graphics_array`` to (for now) a png called\n 'filename'.\n\n OPTIONAL INPUT:\n\n - ``filename`` - (default: None) string\n\n - ``dpi`` - dots per inch\n\n - ``figsize`` - width or [width, height]\n\n - ``axes`` - (default: True)\n\n EXAMPLES::\n\n sage: F = tmp_filename(ext='.png')\n sage: L = [plot(sin(k*x),(x,-pi,pi)) for k in [1..3]]\n sage: G = graphics_array(L)\n sage: G.save(F,500,axes=False) # long time (6s on sage.math, 2012)\n \"\"\"\n if (figsize is not None): self._set_figsize_(figsize)\n self._render(filename, dpi=dpi, figsize=self._figsize, axes = axes, **args)\n\n def show(self, filename=None, dpi=DEFAULT_DPI, figsize=None,\n axes = None, **args):\n r\"\"\"\n Show this graphics array using the default viewer.\n\n OPTIONAL INPUT:\n\n - ``filename`` - (default: None) string\n\n - ``dpi`` - dots per inch\n\n - ``figsize`` - width or [width, height]\n\n - ``axes`` - (default: True)\n\n - ``fontsize`` - positive integer\n\n - ``frame`` - (default: False) draw a frame around the\n image\n\n EXAMPLES: This draws a graphics array with four trig plots and no\n axes in any of the plots.\n\n ::\n\n sage: G = graphics_array([[plot(sin), plot(cos)], [plot(tan), plot(sec)]])\n sage: G.show(axes=False)\n \"\"\"\n if (figsize is not None): self._set_figsize_(figsize)\n if sage.doctest.DOCTEST_MODE:\n self.save(DOCTEST_MODE_FILE,\n dpi=dpi, figsize=self._figsize, axes = axes, **args)\n return\n if sage.plot.plot.EMBEDDED_MODE:\n self.save(filename, dpi=dpi, figsize=self._figsize, axes = axes, **args)\n return\n if filename is None:\n filename = tmp_filename(ext='.png')\n self._render(filename, dpi=dpi, figsize=self._figsize, axes = axes, **args)\n os.system('%s %s 2>/dev/null 1>/dev/null &'%(\n sage.misc.viewer.png_viewer(), filename))\n\n\n"
]
| [
[
"matplotlib.ticker.FixedFormatter",
"matplotlib.ticker.MaxNLocator",
"matplotlib.ticker.AutoMinorLocator",
"matplotlib.ticker.FixedLocator",
"matplotlib.ticker.LogLocator",
"matplotlib.ticker.OldScalarFormatter",
"matplotlib.transforms.offset_copy",
"matplotlib.ticker.LogFormatterMathtext",
"matplotlib.figure.Figure",
"matplotlib.ticker.NullLocator",
"matplotlib.ticker.NullFormatter",
"matplotlib.backends.backend_agg.FigureCanvasAgg"
]
]
|
luoshengyue/License-plate-recognition | [
"ea1a3e48bf7f4d77bcc32029f4808ae27494167e"
]
| [
"debug.py"
]
| [
"# -*- coding: utf-8 -*-\n__author__ = '樱花落舞'\nimport cv2\nimport numpy as np\n\n#用于中间环节对处理图像的输出\n\ndef img_show(filename):\n if filename.dtype == \"float32\":\n filename = filename.astype(np.uint8)\n cv2.imshow(\"img_show\", filename)\n cv2.waitKey(0)\n\n\ndef img_contours(oldimg, box):\n box = np.int0(box)\n oldimg = cv2.drawContours(oldimg, [box], 0, (0, 0, 255), 2)\n cv2.imshow(\"img_contours\", oldimg)\n cv2.waitKey(0)\n\n\ndef img_car(img_contours):\n pic_hight, pic_width = img_contours.shape[:2]\n return pic_hight, pic_width\n"
]
| [
[
"numpy.int0"
]
]
|
sunggg/tvm | [
"636463d16c8f1713a3d93793b60d21dde9b6a6f7"
]
| [
"python/tvm/relay/frontend/onnx.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines\n# pylint: disable=import-outside-toplevel\n\"\"\"ONNX: Open Neural Network Exchange frontend for Relay.\"\"\"\nimport copy\nimport math\nimport warnings\nfrom typing import Optional\n\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.ir import IRModule\nfrom tvm.topi.utils import get_const_tuple\n\nfrom ... import nd as _nd\nfrom .. import analysis\nfrom .. import expr as _expr\nfrom .. import function as _function\nfrom .. import loops as _loops\nfrom .. import op as _op\nfrom .. import qnn as _qnn\nfrom .. import random as _random\nfrom .. import ty as _ty\nfrom .. import vision as _vision\nfrom .common import (\n AttrCvt,\n Renamer,\n autopad,\n ensure_scalar_shape,\n fold_constant,\n get_name,\n get_relay_op,\n gru_cell,\n infer_channels,\n infer_shape,\n infer_type,\n infer_value,\n lstm_cell,\n new_var,\n shape_of,\n try_resolve_var_to_const,\n unbind,\n)\n\n__all__ = [\"from_onnx\"]\n\n# The default configurations of Relay ONNX frontend.\nONNX_DEFAULT_CONFIGS = {\n # By default, TVM converts qualified onnx `matmul` to `transpose(weight) + nn.batch_matmul_NT`.\n # Change this flag to False to directly convert to `nn.batch_matmul`.\n # Note that `nn.batch_matmul` with format other than NT is in experimental, it may have some\n # performance issues.\n \"use_nt_batch_matmul\": True,\n}\n\n\nclass onnx_input(list):\n \"\"\"A helper extension to list that returns None for out of bound indices.\"\"\"\n\n def __getitem__(self, item):\n if isinstance(item, slice):\n if item.stop is None:\n stop = len(self)\n else:\n stop = item.stop\n indices = list(range(stop)[item])\n return [self[i] for i in indices]\n if isinstance(item, int):\n return list(self)[item] if item < len(self) else None\n raise TypeError(\"list indices must be integers or slices, not %s\" % type(item).__name__)\n\n\ndef get_numpy(tensor_proto):\n \"\"\"Grab data in TensorProto and convert to numpy array.\"\"\"\n try:\n from onnx.numpy_helper import to_array\n except ImportError as e:\n raise ImportError(\"Unable to import onnx which is required {}\".format(e))\n return to_array(tensor_proto)\n\n\ndef get_type(elem_type):\n \"\"\"Converts onnx integer datatype to numpy datatype\"\"\"\n try:\n from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE\n except ImportError as e:\n raise ImportError(\"Unable to import onnx which is required {}\".format(e))\n\n return str(TENSOR_TYPE_TO_NP_TYPE[elem_type])\n\n\ndef get_info(info_proto):\n \"\"\"Extract the shape from a ValueInfoProto.\"\"\"\n shape = []\n shape_name = []\n for dim in info_proto.type.tensor_type.shape.dim:\n name = dim.dim_param\n value = dim.dim_value\n if value is None or value == 0:\n value = _ty.Any()\n shape_name.append(name)\n else:\n shape_name.append(value)\n shape.append(value)\n\n name = info_proto.name\n if info_proto.type.tensor_type.elem_type:\n dtype = get_type(info_proto.type.tensor_type.elem_type)\n else:\n dtype = None\n return name, shape, dtype, shape_name\n\n\ndef dimension_picker(prefix, suffix=\"\"):\n \"\"\"Check that dimensions are supported.\"\"\"\n\n def _impl(attr):\n kernel = attr[\"kernel_shape\"]\n if len(kernel) == 1:\n return prefix + \"1d\" + suffix\n if len(kernel) == 2:\n return prefix + \"2d\" + suffix\n if len(kernel) == 3:\n return prefix + \"3d\" + suffix\n msg = \"Only 1D, 2D, and 3D kernels are supported for operator {}.\"\n op_name = prefix + \"1d/2d/3d\"\n raise tvm.error.OpAttributeInvalid(msg.format(op_name))\n\n return _impl\n\n\ndef revert_caffe2_pad(pads):\n \"\"\"Caffe2 requires two times the normal padding.\"\"\"\n if len(pads) == 4:\n pads = pads[:2]\n elif len(pads) == 2:\n pass\n else:\n raise tvm.error.OpAttributeInvalid(\"Number of pads must be either 2 or 4.\")\n return pads\n\n\ndef get_pad_pair(input1d, kernel1d, stride1d, mode):\n \"\"\"infer pad size\"\"\"\n if input1d % stride1d == 0:\n pad = max(kernel1d - stride1d, 0)\n else:\n pad = max(kernel1d - (input1d % stride1d), 0)\n pad_before = pad // 2\n pad_after = pad - pad_before\n if \"LOWER\" in mode:\n return [pad_after, pad_before]\n return [pad_before, pad_after]\n\n\ndef onnx_default_layout(dims, op_name):\n if dims == 1:\n return \"NCW\"\n if dims == 2:\n return \"NCHW\"\n if dims == 3:\n return \"NCDHW\"\n\n msg = \"Only 1D, 2D and 3D layouts are currently supported for operator {}.\"\n raise tvm.error.OpAttributeInvalid(msg.format(op_name))\n\n\ndef onnx_storage_order2layout(storage_order, dims, op_name):\n \"\"\"converter of onnx storage order parameter to tvm storage order format\"\"\"\n if storage_order not in (0, 1):\n raise tvm.error.OpAttributeInvalid(\"Mode of storage_order must be either 0 or 1\")\n\n if dims == 1:\n return \"NCW\" if storage_order == 0 else \"NWC\"\n if dims == 2:\n return \"NCHW\" if storage_order == 0 else \"NHWC\"\n if dims == 3:\n return \"NCDHW\" if storage_order == 0 else \"NDHWC\"\n\n msg = \"Only 1D, 2D and 3D layouts are currently supported for operator {}.\"\n raise tvm.error.OpAttributeInvalid(msg.format(op_name))\n\n\ndef dimension_constraint():\n def _dim_check(attrs):\n if len(attrs[\"kernel_shape\"]) in [1, 2, 3]:\n return True\n return False\n\n return _dim_check, \"Only 1d, 2d and 3d kernel supported.\"\n\n\ndef get_scalar(x, params, dtype=\"float32\"):\n \"\"\"Helper to get a scalar value for Quantized operators.\"\"\"\n if isinstance(x, _expr.Var) and x.name_hint in params:\n return _op.const(params[x.name_hint].numpy(), dtype)\n rank = len(infer_shape(x))\n assert rank <= 1, \"scale and zero_point input must be scalars\"\n if rank == 1:\n x = _op.squeeze(x, [0])\n return _op.cast(x, dtype)\n\n\ndef get_scalar_or_1d_tensor(x, params, dtype=\"float32\"):\n \"\"\"Helper to get a scalar value or 1D tensor for Quantized operators.\"\"\"\n if isinstance(x, _expr.Var) and x.name_hint in params:\n return _op.const(params[x.name_hint].numpy(), dtype)\n rank = len(infer_shape(x))\n assert rank <= 1, \"scale and zero_point input must be scalars or 1D tensors\"\n return _op.cast(x, dtype)\n\n\ndef matmul_out_dtype(inputs, out_dtype):\n \"\"\"Common function to handle MatMul and MatMulInteger16\"\"\"\n a_shape = shape_of(inputs[0])\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1])\n b_rank = infer_shape(b_shape)[0]\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b, out_dtype=out_dtype)\n else:\n a = inputs[0]\n b = inputs[1]\n # broadcast a and b\n a_broadcasted_shape = fold_constant(\n _op.concatenate(\n [\n out_batch,\n _op.strided_slice(a_shape, [a_rank - 2], [a_rank]),\n ],\n 0,\n )\n )\n b_broadcasted_shape = fold_constant(\n _op.concatenate(\n [\n out_batch,\n _op.strided_slice(b_shape, [b_rank - 2], [b_rank]),\n ],\n 0,\n )\n )\n if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape):\n a = _op.transform.broadcast_to(a, a_broadcasted_shape)\n if not tvm.ir.structural_equal(b_shape, b_broadcasted_shape):\n b = _op.transform.broadcast_to(b, b_broadcasted_shape)\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(a, shape_of(a), 3)\n b = flatten_to_nd(b, shape_of(b), 3)\n if ONNX_DEFAULT_CONFIGS[\"use_nt_batch_matmul\"]:\n # Transpose matrix dimensions of b.\n bt = _op.transpose(b, [0, 2, 1])\n # Perform a NT batch matmul.\n output = _op.nn.batch_matmul(a, bt, out_dtype=out_dtype)\n else:\n # Perform a NN batch matmul.\n output = _op.nn.batch_matmul(a, b, out_dtype=out_dtype, transpose_b=False)\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n return _op.reshape(output, fold_constant(final_shape))\n\n if a_rank == 1:\n return _op.squeeze(_op.nn.matmul(_op.expand_dims(inputs[0], axis=0), inputs[1]), axis=[0])\n\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n return _op.nn.dense(inputs[0], input_1_t, out_dtype=out_dtype)\n\n\ndef layer_norm(x, eps, gamma, beta):\n \"\"\"Common function to handle layer norm\"\"\"\n eps_dtype = infer_type(x).checked_type.dtype\n\n u, s = _op.mean_variance(x, axis=-1, keepdims=True)\n output = _op.divide(\n _op.subtract(x, u),\n _op.sqrt(_op.add(s, _op.const(eps, dtype=eps_dtype))),\n )\n output = _op.multiply(output, gamma)\n if beta is not None:\n output = _op.add(output, beta)\n\n return output\n\n\nclass OnnxOpConverter(object):\n \"\"\"A helper class for holding onnx op converters.\"\"\"\n\n @classmethod\n def get_converter(cls, opset):\n \"\"\"Get converter matches given opset.\n\n Parameters\n ----------\n opset: int\n opset from model.\n\n Returns\n -------\n converter, which should be `_impl_vx`. Number x is the biggest\n number smaller than or equal to opset belongs to all support versions.\n \"\"\"\n versions = [int(d.replace(\"_impl_v\", \"\")) for d in dir(cls) if \"_impl_v\" in d]\n versions = sorted(versions + [opset])\n version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1]\n if hasattr(cls, \"_impl_v{}\".format(version)):\n return getattr(cls, \"_impl_v{}\".format(version))\n raise NotImplementedError(\n \"opset version {} of {} not implemented\".format(version, cls.__name__)\n )\n\n\nclass Unary(OnnxOpConverter):\n \"\"\"A helper class for unary op converters.\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 1, \"Unary math op {} takes 1 input, {} given\".format(\n cls.name, len(inputs)\n )\n op_name = cls.name\n return get_relay_op(op_name)(*inputs)\n\n\nclass Elemwise(OnnxOpConverter):\n \"\"\"A helper class for elemwise op converters.\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 2, \"Math op {} take 2 inputs, {} given\".format(cls.name, len(inputs))\n op_name = cls.name\n conv_ops = [\"conv2d\", \"conv2d_transpose\"]\n if attr.get(\"broadcast\", 0) and any(x in str(inputs[0]) for x in conv_ops):\n # TODO(zhreshold): remove hard coded infershape\n axis = int(attr.get(\"axis\", 0))\n inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)\n return get_relay_op(op_name)(*inputs)\n\n\nclass Pool(OnnxOpConverter):\n \"\"\"A helper class for pool op converters.\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n data = inputs[0]\n input_shape = infer_shape(data)\n ndim = len(input_shape)\n\n attr_cvt, data = cls._run_calculation(inputs, attr, params)\n out = attr_cvt([data], attr, params)\n\n if ndim - len(attr[\"kernel_shape\"]) == 1:\n out = _op.squeeze(out, axis=[0])\n return out\n\n @classmethod\n def _run_calculation(cls, inputs, attr, params):\n \"\"\"Helper method to return the processed input data and AttrCvt object\"\"\"\n\n data = inputs[0]\n input_shape = infer_shape(data)\n input_dtype = infer_type(data).checked_type.dtype\n ndim = len(input_shape)\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n if cls.name == \"avg_pool\":\n pad_tuple = []\n for axis in range(len(input_shape) - 2):\n axis_shape = input_shape[2 + axis]\n stride = attr.get(\"strides\", [1] * ndim)[axis]\n kernel = attr[\"kernel_shape\"][axis]\n pad = get_pad_pair(axis_shape, kernel, stride, attr[\"auto_pad\"])\n pad_tuple.append(pad)\n pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair])\n attr[\"pads\"] = pad_tuple\n else:\n # Warning: Pool does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n if \"int\" in input_dtype:\n pad_val = np.iinfo(np.dtype(input_dtype)).min\n else:\n pad_val = np.finfo(np.dtype(input_dtype)).min\n data = autopad(\n data,\n attr.get(\"strides\", [1] * (ndim - 2)),\n attr[\"kernel_shape\"],\n [1] * ndim,\n pad_value=pad_val,\n mode=attr[\"auto_pad\"],\n )\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = tuple([0 for i in range(ndim - 2)])\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator {} is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"], cls.name))\n attr.pop(\"auto_pad\")\n\n if \"storage_order\" in attr:\n attr[\"layout\"] = onnx_storage_order2layout(\n attr[\"storage_order\"], dims=(len(input_shape) - 2), op_name=cls.name\n )\n else:\n if ndim - len(attr[\"kernel_shape\"]) == 1:\n data = _op.expand_dims(data, axis=0)\n input_shape = [1] + list(input_shape)\n\n attr[\"layout\"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=cls.name)\n\n return (\n AttrCvt(\n op_name=dimension_picker(cls.name),\n transforms={\n \"kernel_shape\": \"pool_size\",\n \"pads\": (\"padding\", 0),\n \"dilations\": (\"dilation\", 1),\n },\n ignores=[\"storage_order\"],\n custom_check=dimension_constraint(),\n ),\n data,\n )\n\n\nclass Absolute(Unary):\n \"\"\"Operator converter for Absolute.\"\"\"\n\n name = \"abs\"\n\n\nclass Add(Elemwise):\n \"\"\"Operator converter for Add.\"\"\"\n\n name = \"add\"\n\n\nclass AveragePool(Pool):\n \"\"\"Operator converter for AveragePool.\"\"\"\n\n name = \"avg_pool\"\n\n\nclass QLinearAveragePool(Pool):\n \"\"\"Operator converter for QLinearAveragePool from Microsoft onnxruntime contrib opset.\"\"\"\n\n name = \"avg_pool\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n x_scale = get_scalar(inputs[1], params)\n x_zero_point = get_scalar(inputs[2], params, dtype=\"int32\")\n y_scale = fold_constant(get_scalar(inputs[3], params))\n y_zero_point = get_scalar(inputs[4], params, dtype=\"int32\")\n\n attr_cvt, data = cls._run_calculation(inputs, attr, params)\n\n input_dtype = infer_type(data).checked_type.dtype\n # Onnxruntime doesn't actually do this op in integer, they dequantize to fp32\n # and then requantize afer (according to documentation below)\n # https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearAveragePool\n float_node = _qnn.op.dequantize(data, x_scale, x_zero_point)\n out = attr_cvt([float_node], attr, params)\n return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype)\n\n\nclass BatchNorm(OnnxOpConverter):\n \"\"\"Operator converter for BatchNorm.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n # TODO(zhreshold): 'spatial' is not properly handled here.\n # TODO(vvchernov): 'training_mode' (onnx tag) is not correctly handled, ignore for now\n out = AttrCvt(\n op_name=\"batch_norm\",\n ignores=[\"spatial\", \"is_test\", \"consumed_inputs\", \"momentum\", \"training_mode\"],\n )(inputs, attr, params)\n # We only support test mode, so we return data, moving_mean, moving_var,\n # and then moving_mean and moving_var again as placeholders for\n # the expected \"saved_mean\", \"saved_var\".\n return _expr.TupleWrapper(_expr.Tuple((*out, out[1], out[2])), 5)\n\n\nclass InstanceNorm(OnnxOpConverter):\n \"\"\"Operator converter for BatchNorm.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return AttrCvt(op_name=\"instance_norm\")(inputs, attr, params)\n\n\nclass Conv(OnnxOpConverter):\n \"\"\"Operator converter for Conv.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n # Use shape of input to determine convolution type.\n data = inputs[0]\n kernel = inputs[1]\n input_shape = infer_shape(data)\n ndim = len(input_shape)\n\n kernel_type = infer_type(inputs[1])\n kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]\n\n if \"kernel_shape\" not in attr:\n attr[\"kernel_shape\"] = kernel_shapes[0][2:]\n\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n # Warning: Convolution does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n data = autopad(\n data,\n attr.get(\"strides\", [1] * (ndim - 2)),\n attr[\"kernel_shape\"],\n attr.get(\"dilations\", [1] * (ndim - 2)),\n mode=attr[\"auto_pad\"],\n )\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = [0 for i in range(ndim - 2)]\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator Conv is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"]))\n attr.pop(\"auto_pad\")\n\n attr[\"channels\"] = kernel_shapes[0][0]\n out = AttrCvt(\n op_name=dimension_picker(\"conv\"),\n transforms={\n \"kernel_shape\": \"kernel_size\",\n \"dilations\": (\"dilation\", 1),\n \"pads\": (\"padding\", 0),\n \"group\": (\"groups\", 1),\n },\n custom_check=dimension_constraint(),\n )([data, kernel], attr, params)\n\n use_bias = len(inputs) == 3\n if use_bias:\n out = _op.nn.bias_add(out, inputs[2])\n return out\n\n\nclass ConvTranspose(OnnxOpConverter):\n \"\"\"Operator converter for ConvTranspose.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n # get number of channels\n out_type = infer_type(inputs[1])\n kernel_shape = [get_const_tuple(out_type.checked_type.shape)]\n out_channels = kernel_shape[0][1] * attr.get(\"group\", 1)\n attr[\"channels\"] = out_channels\n groups = attr.get(\"group\", 1)\n\n if \"kernel_shape\" not in attr:\n attr[\"kernel_shape\"] = kernel_shape[0][2:]\n\n attr[\"groups\"] = groups\n # infer pads for auto_pad\n data = inputs[0]\n input_shape = infer_shape(data)\n ndim = len(input_shape)\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n # Warning: Convolution does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n data = autopad(\n data,\n attr.get(\"strides\", [1] * (ndim - 2)),\n attr[\"kernel_shape\"],\n attr.get(\"dilations\", [1] * (ndim - 2)),\n deconv=True,\n mode=attr[\"auto_pad\"],\n )\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = tuple([0 for i in range(ndim - 2)])\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator Conv is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"]))\n attr.pop(\"auto_pad\")\n\n out = AttrCvt(\n op_name=dimension_picker(\"conv\", \"_transpose\"),\n transforms={\n \"kernel_shape\": \"kernel_size\",\n \"dilations\": (\"dilation\", 1),\n \"pads\": (\"padding\", 0),\n \"group\": (\"groups\", 1),\n },\n disables=[\"output_shape\"],\n custom_check=dimension_constraint(),\n )([data, inputs[1]], attr, params)\n use_bias = len(inputs) == 3\n if use_bias:\n out = _op.nn.bias_add(out, inputs[2])\n return out\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n # get number of channels\n out_type = infer_type(inputs[1])\n kernel_shape = [get_const_tuple(out_type.checked_type.shape)]\n out_channels = kernel_shape[0][1] * attr.get(\"group\", 1)\n attr[\"channels\"] = out_channels\n groups = attr.get(\"group\", 1)\n\n if \"kernel_shape\" not in attr:\n attr[\"kernel_shape\"] = kernel_shape[0][2:]\n\n attr[\"groups\"] = groups\n # infer pads for auto_pad\n data = inputs[0]\n input_shape = infer_shape(data)\n ndim = len(input_shape)\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n # Warning: Convolution does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n kernel_shape = attr[\"kernel_shape\"]\n kndim = len(kernel_shape)\n dilations = attr.get(\"dilations\", [1] * kndim)\n output_padding = attr.get(\"output_padding\", [0] * kndim)\n strides = attr[\"strides\"]\n total_pad = [0] * kndim\n for i in range(kndim):\n total_pad[i] = (\n output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - strides[i]\n )\n left = [p // 2 for p in total_pad]\n right = [total_pad[i] - left[i] for i in range(kndim)]\n if \"LOWER\" in attr[\"auto_pad\"]:\n pad = left + right\n else:\n pad = right + left\n attr[\"pads\"] = pad\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = tuple([0 for i in range(ndim - 2)])\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator Conv is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"]))\n attr.pop(\"auto_pad\")\n\n out = AttrCvt(\n op_name=dimension_picker(\"conv\", \"_transpose\"),\n transforms={\n \"kernel_shape\": \"kernel_size\",\n \"dilations\": (\"dilation\", 1),\n \"pads\": (\"padding\", 0),\n \"group\": (\"groups\", 1),\n },\n disables=[\"output_shape\"],\n custom_check=dimension_constraint(),\n )([data, inputs[1]], attr, params)\n use_bias = len(inputs) == 3\n if use_bias:\n out = _op.nn.bias_add(out, inputs[2])\n return out\n\n\nclass GlobalAveragePool(OnnxOpConverter):\n \"\"\"Operator converter for GlobalAveragePool\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n rank = len(infer_shape(inputs[0]))\n if rank == 3:\n return _op.nn.global_avg_pool1d(inputs[0])\n if rank == 4:\n return _op.nn.global_avg_pool2d(inputs[0])\n if rank == 5:\n return _op.nn.global_avg_pool3d(inputs[0])\n raise NotImplementedError(\n \"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD.\"\n % (rank - 2),\n )\n\n\nclass QLinearGlobalAveragePool(OnnxOpConverter):\n \"Operator converter for QLinearGlobalAveragePool from Microsoft onnxruntime contrib opset.\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n rank = len(infer_shape(inputs[0]))\n\n x_scale = get_scalar(inputs[1], params)\n x_zero_point = get_scalar(inputs[2], params, dtype=\"int32\")\n y_scale = fold_constant(get_scalar(inputs[3], params))\n y_zero_point = get_scalar(inputs[4], params, dtype=\"int32\")\n\n input_dtype = infer_type(inputs[0]).checked_type.dtype\n\n # Onnxruntime documentation does not mention that this global avg_pool should follow the\n # sequence dequantize -> float op -> quantize, but that is how QLinearAveragePool is done.\n #\n # This op also follows the same pattern since qnn op is not available right now.\n # TODO: Generate QNN op to perform quantized operation instead of dequant -> op -> quant\n x = _qnn.op.dequantize(inputs[0], x_scale, x_zero_point)\n if rank == 3:\n out = _op.nn.global_avg_pool1d(x)\n elif rank == 4:\n out = _op.nn.global_avg_pool2d(x)\n elif rank == 5:\n out = _op.nn.global_avg_pool3d(x)\n else:\n raise NotImplementedError(\n \"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD.\"\n % (rank - 2),\n )\n return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype)\n\n\nclass GlobalMaxPool(OnnxOpConverter):\n \"\"\"Operator converter for GlobalMaxPool\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n rank = len(infer_shape(inputs[0]))\n if rank == 3:\n return _op.nn.global_max_pool1d(inputs[0])\n if rank == 4:\n return _op.nn.global_max_pool2d(inputs[0])\n if rank == 5:\n return _op.nn.global_max_pool3d(inputs[0])\n raise NotImplementedError(\n \"Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD.\"\n % (rank - 2),\n )\n\n\nclass Div(Elemwise):\n \"\"\"Operator converter for Divide.\"\"\"\n\n name = \"divide\"\n\n\nclass Elu(OnnxOpConverter):\n \"\"\"Operator converter for Elu.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = float(attr.get(\"alpha\", 1.0))\n return _expr.const(-alpha) * _op.nn.relu(\n _expr.const(1.0) - _op.exp(inputs[0])\n ) + _op.nn.relu(inputs[0])\n\n\nclass Gelu(OnnxOpConverter):\n \"\"\"Operator converter for Gelu from Microsoft onnxruntime contrib opset.\n\n gelu(x) = 0.5x(1 + erf(x/sqrt(2)))\n \"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n x = inputs[0]\n\n # Declare consts\n const_dtype = infer_type(x).checked_type.dtype\n half = _expr.const(0.5, dtype=const_dtype)\n one = _expr.const(1.0, dtype=const_dtype)\n sqrt2 = _expr.const(math.sqrt(2), dtype=const_dtype)\n\n # Compute gelu\n term1 = _op.multiply(half, x)\n erf = _op.erf(_op.divide(x, sqrt2))\n term2 = _op.add(one, erf)\n return _op.multiply(term1, term2)\n\n\nclass BiasGelu(OnnxOpConverter):\n \"\"\"Operator converter for BiasGelu from Microsoft onnxruntime contrib opset.\n\n bias_gelu(x, b) = 0.5(x, b)(1 + erf((x + b)/sqrt(2)))\n \"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n x = inputs[0]\n b = inputs[1]\n\n b_shape = infer_shape(b)\n assert len(b_shape) == 1, \"BiasGelu bias term must be a 1D tensor\"\n\n inp = _op.add(x, b)\n return Gelu._impl_v1([inp], attr, params)\n\n\nclass EmbedLayerNormalization(OnnxOpConverter):\n \"\"\"Operator converter for EmbedLayerNormalization from Microsoft onnxruntime contrib opset.\n\n This layer embeds the input tokens, sums them, and applies layer normalization.\n \"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n input_ids = inputs[0]\n segment_ids = inputs[1]\n word_emb = inputs[2]\n pos_emb = inputs[3]\n segment_emb = inputs[4]\n gamma = inputs[5]\n beta = inputs[6]\n\n mask = inputs[7]\n pos_ids = inputs[8]\n\n eps = attr.get(\"epsilon\", 1e-12)\n\n (batch_size, seq_len) = infer_shape(input_ids)\n\n if segment_ids:\n assert segment_emb\n\n if pos_ids is None:\n pos_ids = _op.const([list(range(seq_len))] * batch_size, dtype=\"int32\")\n\n word_vec = _op.take(word_emb, input_ids, axis=0)\n segment_vec = _op.take(segment_emb, segment_ids, axis=0)\n pos_vec = _op.take(pos_emb, pos_ids, axis=0)\n\n vec_sum = _op.add(word_vec, pos_vec)\n if segment_ids:\n vec_sum = _op.add(vec_sum, segment_vec)\n\n ln = layer_norm(vec_sum, eps, gamma, beta)\n\n mask_index = _op.const(np.zeros((batch_size,), dtype=\"int32\"))\n if mask:\n # calculate number of words per sentence\n mask_index = _op.sum(mask, axis=1)\n\n # TODO(@anwang2009): onnxruntime v1.10.0 requires a third output of vec_sum\n return _expr.TupleWrapper(_expr.Tuple([ln, mask_index]), 2)\n\n\nclass SkipLayerNormalization(OnnxOpConverter):\n \"\"\"Operator converter for SkipLayerNormalization from Microsoft onnxruntime contrib opset.\n\n This layer sums the two input tensors (along with optional bias), and applies layer\n normalization.\n \"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n data = inputs[0]\n skip = inputs[1]\n gamma = inputs[2]\n beta = inputs[3]\n bias = inputs[4]\n\n assert (\n beta is not None and bias is not None\n ), \"SkipLayerNormalization import currently only supports required beta and bias\"\n\n eps = attr.get(\"epsilon\", 1e-12)\n\n x = _op.add(data, skip)\n if bias is not None:\n x = _op.add(x, bias)\n\n output = layer_norm(x, eps, gamma, beta)\n\n # onnxruntime doesn't compute the other outputs, despite the documentation\n placeholder = _op.const(0, dtype=\"float32\")\n\n return _expr.TupleWrapper(_expr.Tuple([output, placeholder, placeholder]), 3)\n\n\nclass Attention(OnnxOpConverter):\n \"\"\"Operator converter for Attention from Microsoft onnxruntime contrib opset.\n\n This is the self-attention mechanism used in transformer models.\n \"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n num_heads = attr[\"num_heads\"]\n assert (\n \"qkv_hidden_sizes\" not in attr\n ), \"different hidden sizes for Q, K, V are not currently supported\"\n assert \"unidirectional\" not in attr, \"unidirectional attention not current supported\"\n\n # (batch, seq, in_hidden)\n input_emb = inputs[0]\n\n # (in_hidden, 3 * out_hidden), where out_hidden = num_heads * head_size\n weight = inputs[1]\n\n # (3 * out_hidden,)\n bias = inputs[2]\n\n # 1. ( batch, 1, max_seq, max_seq)\n # 2. ( batch, past_seq + seq,)\n # 3. ( batch, seq, past_seq + seq,)\n # 4. ( batch,)\n # 5. (2 * batch,)\n # For now, we only support case 2.\n mask_index = inputs[3]\n\n # (2, batch, num_heads, past_seq, head_size)\n past = inputs[4]\n\n # (batch, num_heads, seq, seq)\n extra_add = inputs[5]\n\n (batch_size, seq_len, _) = infer_shape(input_emb)\n (out_hidden_x3,) = infer_shape(bias)\n assert out_hidden_x3 % 3 == 0, \"bias shape should be divisible by 3\"\n out_hidden = out_hidden_x3 // 3\n assert (\n out_hidden % num_heads == 0\n ), \"output hidden size should be divisible by number of attention heads\"\n head_size = out_hidden // num_heads\n\n assert (\n mask_index is not None\n ), \"Attention import currently only supports required mask_index\"\n mask_index_shape = infer_shape(mask_index)\n assert (\n len(mask_index_shape) == 2\n and mask_index_shape[0] == batch_size\n and mask_index_shape[1] == seq_len\n ), \"currently only support (batch_size, sequence_length) mask index\"\n\n assert past is None, \"past K, V state is not currently supported\"\n assert extra_add is None, \"extra add to QxK not currently supported\"\n\n # split weight and biases and do the matmuls\n w_Q, w_K, w_V = _op.split(weight, 3, axis=1)\n b_Q, b_K, b_V = _op.split(bias, 3, axis=0)\n # need to merge batch dimensions since TVM matmul is 2D\n input_emb = _op.reverse_reshape(input_emb, (-1, 0))\n Q = _op.add(_op.nn.matmul(input_emb, w_Q), b_Q)\n K = _op.add(_op.nn.matmul(input_emb, w_K), b_K)\n V = _op.add(_op.nn.matmul(input_emb, w_V), b_V)\n\n # massage tensors in preparation for batched matmul\n def massage(tensor):\n tensor = _op.reshape(tensor, (batch_size, seq_len, num_heads, head_size))\n\n # (batch_size, num_heads, seq_len, head_size)\n tensor = _op.transpose(tensor, axes=[0, 2, 1, 3])\n\n # (batch_size * num_heads, seq_len, head_size)\n return _op.reverse_reshape(tensor, (-1, 0, 0))\n\n Q = massage(Q)\n K = massage(K)\n V = massage(V)\n\n K_present = _op.reshape(K, (batch_size, num_heads, seq_len, head_size))\n V_present = _op.reshape(V, (batch_size, num_heads, seq_len, head_size))\n present = _op.stack([K_present, V_present], axis=0)\n\n att_scores = _op.nn.batch_matmul(Q, K, transpose_a=False, transpose_b=True)\n score_dtype = infer_type(att_scores).checked_type.dtype\n att_scores = _op.divide(\n att_scores,\n _op.const(np.sqrt(head_size), dtype=infer_type(att_scores).checked_type.dtype),\n )\n att_scores = _op.reshape(att_scores, (batch_size, num_heads, seq_len, seq_len))\n\n # build the attention mask\n att_mask = _op.cast(mask_index, score_dtype)\n att_mask = _op.expand_dims(att_mask, 1, num_newaxis=2)\n att_mask = _op.subtract(_op.const(1, dtype=score_dtype), att_mask)\n att_mask = _op.multiply(att_mask, _op.const(-10000, dtype=score_dtype))\n\n # apply the mask\n att_scores = _op.add(att_scores, att_mask)\n att_scores = _op.reshape(att_scores, (batch_size * num_heads, seq_len, seq_len))\n\n att_probs = _op.nn.softmax(att_scores, axis=-1)\n\n output = _op.nn.batch_matmul(att_probs, V, transpose_a=False, transpose_b=False)\n output = _op.reverse_reshape(output, (-1, num_heads, 0, 0))\n output = _op.transpose(output, axes=[0, 2, 1, 3])\n output = _op.reshape(output, (0, 0, out_hidden))\n\n return _expr.TupleWrapper(_expr.Tuple([output, present]), 2)\n\n\nclass Gemm(OnnxOpConverter):\n \"\"\"Operator converter for Gemm.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 3 or len(inputs) == 2, \"Gemm op take 2 or 3 inputs, {} given\".format(\n len(inputs)\n )\n input0_state = infer_type(inputs[0])\n dtype = input0_state.checked_type.dtype\n # Y = alpha * A * B + beta * C\n alpha = float(attr.get(\"alpha\", 1.0))\n beta = float(attr.get(\"beta\", 1.0))\n transA = int(attr.get(\"transA\", 0))\n transB = int(attr.get(\"transB\", 0))\n # get number of channels\n channels = infer_channels(inputs[1], not transB)\n if transA:\n inputs[0] = _op.transpose(inputs[0], axes=(1, 0))\n if not transB:\n inputs[1] = _op.transpose(inputs[1], axes=(1, 0))\n if len(input0_state.checked_type.shape) != 2:\n inputs[0] = _op.nn.batch_flatten(inputs[0])\n if alpha != 1.0:\n inputs[0] *= _expr.const(alpha, dtype=dtype)\n out = _op.nn.dense(inputs[0], inputs[1], units=channels)\n if len(inputs) == 3:\n out = out + _expr.const(beta, dtype=dtype) * inputs[2]\n return out\n\n\nclass MatMul(OnnxOpConverter):\n \"\"\"Operator converter for MatMul.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 2, \"MatMul op take 2 inputs, {} given\".format(len(inputs))\n # Need to check input shape as batch matmul must be supported.\n return matmul_out_dtype(inputs, out_dtype=infer_type(inputs[0]).checked_type.dtype)\n\n\nclass MatMulInteger16(OnnxOpConverter):\n \"\"\"Operator converter for MatMulInteger16 from Microsoft onnxruntime contrib opset.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n assert len(inputs) == 2, \"MatMulInteger16 op take 2 inputs, {} given\".format(len(inputs))\n a_dtype = infer_type(inputs[0]).checked_type.dtype\n b_dtype = infer_type(inputs[1]).checked_type.dtype\n # Check input data types\n assert a_dtype in (\"int16\", \"uint16\"), \"MatMulInteger16: invalid dtype for first input\"\n assert b_dtype in (\"int16\", \"uint16\"), \"MatMulInteger16: invalid dtype for second input\"\n out_dtype = \"int32\"\n if a_dtype == \"uint16\" and b_dtype == \"uint16\":\n out_dtype = \"uint32\"\n return matmul_out_dtype(inputs, out_dtype)\n\n\nclass Mod(OnnxOpConverter):\n \"\"\"Operator converter for Mod.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 2, \"Mod op take 2 inputs, {} given\".format(len(inputs))\n\n # Note: attr['fmod'] determines whether the operator should behave like np.fmod or np.mod.\n # attr['fmod'] == 0 will behave as np.mod and attr['fmod'] == 1 will force fmod treatment.\n # The relay equivalent of np.fmod is relay.mod and np.mod is relay.floor_mod\n if attr.get(\"fmod\", 0) == 0:\n op_name = \"floor_mod\"\n else:\n op_name = \"mod\"\n\n return AttrCvt(op_name)(inputs, {}, params)\n\n\nclass MaxPool(Pool):\n \"\"\"Operator converter for MaxPool\"\"\"\n\n name = \"max_pool\"\n\n\nclass MaxUnpool(OnnxOpConverter):\n \"\"\"Operator converter for MaxUnpool\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n # Unpack inputs and attributes\n data = inputs[0]\n data_type = infer_type(data).checked_type.dtype\n indices = inputs[1]\n output_shape = inputs[2]\n kernel_shape = attr.get(\"kernel_shape\")\n pads = attr.get(\"pads\", None)\n strides = attr.get(\"strides\", [1] * len(kernel_shape))\n\n # Compute the proper output shape before padding.\n multiplier = _op.concatenate(\n [_expr.const([1, 1], dtype=\"int64\"), _expr.const(list(strides), dtype=\"int64\")], axis=0\n )\n total_output_shape = multiplier * shape_of(data, dtype=\"int64\")\n # Add extra dimensions from kernel size and stride mismatch\n total_output_shape += _op.concatenate(\n [_expr.const([0, 0], \"int64\"), _expr.const(list(kernel_shape), \"int64\")], axis=0\n ) - _op.concatenate(\n [_expr.const([0, 0], \"int64\"), _expr.const(list(strides), \"int64\")], axis=0\n )\n\n # Compute padding amount if output shape is specified.\n if output_shape is not None:\n total_output_shape = output_shape\n\n elif pads is not None:\n # Get pads in the proper format for relay.\n pads = _op.concatenate(\n [_expr.const([0, 0, 0, 0], \"int64\"), _expr.const(list(pads), \"int64\")], axis=0\n )\n pads = _op.reshape(pads, [-1, 2])\n # Compute the total padding per axis.\n total_pad = _op.sum(pads, axis=-1)\n # Reversing maxpool means that padding actually makes our output smaller.\n total_output_shape = total_output_shape - total_pad\n\n # Create a tensor of zeros then scatter our data through it.\n zeros_tensor = _op.zeros(total_output_shape, data_type)\n # We need to flatten all our tensors before scattering.\n flat_tensor = _op.scatter(\n _op.reshape(zeros_tensor, [-1]),\n _op.reshape(indices, [-1]),\n _op.reshape(data, [-1]),\n axis=0,\n )\n # Now reshape back to prepadded shape.\n output_tensor = _op.reshape(flat_tensor, total_output_shape)\n\n return output_tensor\n\n\nclass LpPool(OnnxOpConverter):\n \"\"\"A helper class for lppool op converters.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n dtype = infer_type(inputs[0]).checked_type.dtype\n data = inputs[0]\n input_shape = infer_shape(data)\n ndim = len(input_shape)\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n # Warning: LpPool does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n data = autopad(\n data,\n attr[\"strides\"],\n attr[\"kernel_shape\"],\n [1] * ndim,\n mode=attr[\"auto_pad\"],\n )\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = tuple([0 for i in range(ndim - 2)])\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator {} is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"], \"LpPool\"))\n attr.pop(\"auto_pad\")\n\n if \"storage_order\" in attr:\n attr[\"layout\"] = onnx_storage_order2layout(\n attr[\"storage_order\"], dims=(len(input_shape) - 2), op_name=\"LpPool\"\n )\n else:\n attr[\"layout\"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=\"LpPool\")\n\n p_value = attr.get(\"p\", 2)\n p = _expr.const(p_value, dtype)\n reci_p = _expr.const(1.0 / p_value, dtype)\n data = _op.power(data, p)\n\n out = AttrCvt(\n op_name=dimension_picker(\"avg_pool\"),\n transforms={\"kernel_shape\": \"pool_size\", \"pads\": (\"padding\", 0)},\n extras={\"count_include_pad\": True},\n ignores=[\"p\"],\n custom_check=dimension_constraint(),\n )([data], attr, params)\n kernels = attr[\"kernel_shape\"]\n out = _op.abs(out) * _expr.const(np.prod(kernels).astype(dtype))\n return _op.power(out, reci_p)\n\n\nclass GlobalLpPool(OnnxOpConverter):\n \"\"\"Operator converter for GlobalLpPool.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n # TODO: GlobalLpPool does not yet support dynamic shapes\n in_shape = infer_shape(inputs[0])\n attr[\"kernel_shape\"] = in_shape[2:]\n\n return LpPool._impl_v1(inputs, attr, params)\n\n\nclass Mul(Elemwise):\n \"\"\"Operator converter for Multiply.\"\"\"\n\n name = \"multiply\"\n\n\nclass Pad(OnnxOpConverter):\n \"\"\"Operator converter for Pad.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n pad_width = []\n pads = attr.pop(\"paddings\")\n dims = int(len(pads) / 2)\n for i in range(dims):\n pad_width.append((pads[i], pads[i + dims]))\n attr[\"pad_width\"] = pad_width\n pad_mode = attr.get(\"mode\", b\"constant\").decode(\"utf-8\")\n if pad_mode in [\"constant\", \"edge\", \"reflect\"]:\n attr[\"pad_mode\"] = pad_mode\n attr.pop(\"mode\", None)\n else:\n raise tvm.error.OpAttributeInvalid(\n \"Value \" + pad_mode + ' in attribute \"mode\" is invalid for operator Pad.'\n )\n\n return AttrCvt(\n _op.nn.pad,\n transforms={\n \"value\": \"pad_value\",\n },\n )(inputs, attr, params)\n\n @classmethod\n def _impl_v2(cls, inputs, attr, params):\n pad_width = []\n pads = attr.pop(\"pads\")\n dims = int(len(pads) / 2)\n for i in range(dims):\n pad_width.append((pads[i], pads[i + dims]))\n attr[\"pad_width\"] = pad_width\n pad_mode = attr.get(\"mode\", b\"constant\").decode(\"utf-8\")\n if pad_mode in [\"constant\", \"edge\", \"reflect\"]:\n attr[\"pad_mode\"] = pad_mode\n attr.pop(\"mode\", None)\n else:\n raise tvm.error.OpAttributeInvalid(\n \"Value \" + pad_mode + ' in attribute \"mode\" is invalid for operator Pad.'\n )\n\n return AttrCvt(\n \"pad\",\n transforms={\n \"value\": \"pad_value\",\n },\n )(inputs, attr, params)\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n pads = inputs[1]\n if len(inputs) == 3:\n value = fold_constant(_op.take(inputs[2], _op.const(0)))\n else:\n value = 0.0\n\n pad_width_expr = fold_constant(_op.transpose(_op.reshape(pads, (2, -1))))\n pad_mode = attr.get(\"mode\", b\"constant\").decode(\"utf-8\")\n if not pad_mode in [\"constant\", \"edge\", \"reflect\"]:\n raise tvm.error.OpAttributeInvalid(\n \"Value \" + pad_mode + ' in attribute \"mode\" is invalid for operator Pad.'\n )\n\n return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode)\n\n\nclass ParametricSoftPlus(OnnxOpConverter):\n \"\"\"Operator converter for ParametricSoftPlus.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = _expr.const(float(attr.get(\"alpha\", 1.0)))\n beta = _expr.const(float(attr.get(\"beta\", 1.0)))\n return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.0)) * alpha\n\n\nclass Pow(OnnxOpConverter):\n \"\"\"Operator converter for Pow.\"\"\"\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n x = inputs[0]\n y = inputs[1]\n\n x_type = infer_type(x).checked_type.dtype\n output_type = x_type\n y_type = infer_type(y).checked_type.dtype\n\n if not x_type.startswith(\"float\"):\n x_type = \"float32\"\n x = _op.cast(x, x_type)\n\n if x_type != y_type:\n y = _op.cast(y, x_type)\n\n # TODO: come up with good default integer pow() func for common backends\n result = _op.power(x, y)\n if x_type != output_type:\n return _op.cast(result, output_type)\n return result\n\n\nclass Prelu(OnnxOpConverter):\n \"\"\"Operator converter for Prelu.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 2, \"Prelu need 2 inputs, {} given\".format(len(inputs))\n input_shape = shape_of(inputs[0])\n alpha = _op.broadcast_to_like(inputs[1], inputs[0])\n alpha = _op.reshape(alpha, [-1])\n output = _op.nn.prelu(_op.reshape(inputs[0], [-1]), alpha, axis=0)\n return _op.reshape(output, input_shape)\n\n\nclass Reciprocal(OnnxOpConverter):\n \"\"\"Operator converter for Reciprocal.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n dtype = infer_type(inputs[0]).checked_type.dtype\n return _expr.const(1.0, dtype=dtype) / inputs[0]\n\n\nclass Flatten(OnnxOpConverter):\n \"\"\"Operator converter for Flatten.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 1)\n ishape = shape_of(inputs[0])\n ndim = infer_shape(ishape)[0]\n if axis < 0:\n axis = axis + ndim\n\n if axis == 1:\n out = _op.nn.batch_flatten(inputs[0])\n else:\n pre_shape = _op.prod(_op.strided_slice(ishape, [0], [axis], [1]), keepdims=True)\n post_shape = _op.prod(_op.strided_slice(ishape, [axis], [ndim], [1]), keepdims=True)\n newshape = fold_constant(_op.concatenate([pre_shape, post_shape], axis=0))\n out = _op.reshape(inputs[0], newshape)\n return out\n\n\nclass Reshape(OnnxOpConverter):\n \"\"\"Operator converter for Reshape.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return _op.reshape(inputs[0], attr[\"shape\"])\n\n @classmethod\n def _impl_v5(cls, inputs, attr, params):\n allowzero = attr.get(\"allowzero\", False)\n if get_name(inputs[1]) in params:\n shape = tuple(params[inputs[1].name_hint].numpy().astype(\"int32\"))\n out = _op.reshape(inputs[0], shape, allowzero=allowzero)\n else:\n out = _op.reshape(*inputs, allowzero=allowzero)\n return out\n\n\nclass DepthToSpace(OnnxOpConverter):\n \"\"\"Operator converter for DepthToSpace.\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n block_size = int(attr[\"blocksize\"])\n mode = attr.get(\"mode\", b\"DCR\").decode(\"utf-8\")\n return _op.nn.depth_to_space(inputs[0], block_size, mode=mode)\n\n\nclass SpaceToDepth(OnnxOpConverter):\n \"\"\"Operator converter for SpaceToDepth.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n\n block_size = int(attr[\"blocksize\"])\n return _op.nn.space_to_depth(inputs[0], block_size)\n\n\nclass Concat(OnnxOpConverter):\n \"\"\"Operator converter for Concat.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, args, params):\n return AttrCvt(op_name=\"concatenate\")((inputs,), args)\n\n\nclass Scale(OnnxOpConverter):\n \"\"\"Operator converter for Scale.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n scale = float(attr.get(\"scale\", 1.0))\n return inputs[0] * _expr.const(scale)\n\n\nclass Selu(OnnxOpConverter):\n \"\"\"Operator converter for Selu.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = float(attr.get(\"alpha\", 1.67326319217681884765625))\n gamma = float(attr.get(\"gamma\", 1.05070102214813232421875))\n return _expr.const(gamma) * (\n _expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0]))\n + _op.nn.relu(inputs[0])\n )\n\n\nclass ScaledTanh(OnnxOpConverter):\n \"\"\"Operator converter for ScaledTanh.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = float(attr.get(\"alpha\", 1.0))\n beta = float(attr.get(\"beta\", 1.0))\n return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha)\n\n\nclass Shrink(OnnxOpConverter):\n \"\"\"Operator converter for Shrink.\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n x = inputs[0]\n dtype = infer_type(x).checked_type.dtype\n lambd = _op.const(attr.get(\"lambd\", 0.5), dtype=dtype)\n bias = _op.const(attr.get(\"bias\", 0.0), dtype=dtype)\n\n zeros = _op.zeros_like(x)\n return _op.where(x < -lambd, x + bias, zeros) + _op.where(x > lambd, x - bias, zeros)\n\n\nclass Softsign(OnnxOpConverter):\n \"\"\"Operator converter for Softsign.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return inputs[0] / (_expr.const(1.0) + Absolute.get_converter(1)(inputs, attr, params))\n\n\nclass Sub(Elemwise):\n \"\"\"Operator converter for Subtract.\"\"\"\n\n name = \"subtract\"\n\n\nclass Sum(OnnxOpConverter):\n \"\"\"Operator converter for Sum.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n # Onnx Sum Operator\n for in_index in range(len(inputs) - 1):\n inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])\n\n return inputs[len(inputs) - 1]\n\n\nclass Affine(OnnxOpConverter):\n \"\"\"Operator converter for Affine transformation.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = _expr.const(attr.get(\"alpha\", 1.0))\n beta = _expr.const(attr.get(\"beta\", 0.0))\n return (alpha * inputs[0]) + beta\n\n\nclass ThresholdedRelu(OnnxOpConverter):\n \"\"\"Operator converter for ThresholdedRelu.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = float(attr.get(\"alpha\", 1.0))\n alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))\n mask = _op.greater(inputs[0], alpha_tensor).astype(\"float32\")\n return inputs[0] * mask\n\n\ndef _broadcast_constraint():\n def _broadcast_check(attrs):\n if attrs.get(\"axis\", None):\n return False\n return True\n\n return _broadcast_check, \"Specifying broadcast axis not allowed.\"\n\n\ndef _fully_connected(opset):\n def _impl(inputs, attr, params):\n # get number of channels\n channels = infer_channels(inputs[1], params)\n attr[\"units\"] = channels\n return AttrCvt(\"dense\", ignores=[\"axis\", \"axis_w\"])(inputs, attr)\n\n return _impl\n\n\nclass Upsample(OnnxOpConverter):\n \"\"\"Operator converter for Upsample (nearest mode).\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n scales = attr.get(\"scales\")\n\n input_shape = infer_shape(inputs[0])\n dims = len(input_shape)\n\n if not scales:\n # Here we are going to higher OPSET version.\n assert len(inputs) == 2, \"Upsample op takes 2 inputs, {} given\".format(len(inputs))\n\n if get_name(inputs[1]) in params:\n scales = params[inputs[1].name_hint].numpy()\n else:\n scales = inputs[1]\n if isinstance(scales, _expr.Constant):\n scales = list(scales.data.numpy())\n if not isinstance(scales, _expr.Expr):\n assert scales[0] == 1.0 and scales[1] == 1.0\n\n mode = attr.get(\"mode\")\n if mode == b\"nearest\":\n method = \"nearest_neighbor\"\n elif mode == b\"linear\":\n method = \"trilinear\" if dims == 5 else \"bilinear\"\n else:\n raise tvm.error.OpAttributeInvalid(\n 'Value {} in attribute \"mode\" of operator Upsample is not valid.'.format(mode)\n )\n\n # in 3d case, we use the purely static op\n if dims == 5:\n if isinstance(scales, _expr.Expr):\n scale_h = _op.take(scales, _op.const(3))\n scale_w = _op.take(scales, _op.const(4))\n scale_d = _op.take(scales, _op.const(1))\n else:\n assert len(scales) == 5\n scale_h = scales[-2]\n scale_w = scales[-1]\n scale_d = scales[-3]\n\n layout = \"NCDHW\"\n out = _op.nn.upsampling3d(\n inputs[0],\n scale_d,\n scale_h,\n scale_w,\n layout=layout,\n method=method,\n coordinate_transformation_mode=\"asymmetric\",\n )\n # in 2d case, use dynamic op\n else:\n if isinstance(scales, _expr.Expr):\n scale_h = _op.take(scales, _op.const(3))\n scale_w = _op.take(scales, _op.const(4))\n else:\n assert len(scales) == 4\n scale_h = scales[-2]\n scale_w = scales[-1]\n layout = \"NCHW\"\n\n out = _op.nn.upsampling(\n inputs[0],\n scale_h,\n scale_w,\n layout=layout,\n method=method,\n align_corners=False,\n )\n return out\n\n\nclass Shape(OnnxOpConverter):\n \"\"\"Operator converter for Shape.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return shape_of(inputs[0], \"int64\")\n\n @classmethod\n def _impl_v15(cls, inputs, attr, params):\n start = attr.get(\"start\")\n end = attr.get(\"end\")\n return shape_of(inputs[0], dtype=\"int64\", start=start, end=end)\n\n\nclass CumSum(OnnxOpConverter):\n \"\"\"Operator converter for CumSum.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n data = inputs[0]\n dim = inputs[1]\n\n if dim is not None:\n dim = int(infer_value(dim, params).numpy())\n\n exclusive = attr.get(\"exclusive\", 0)\n reverse = attr.get(\"reverse\", 0)\n\n if reverse != 0:\n out = _op.reverse(data, axis=dim)\n out = _op.cumsum(out, axis=dim, exclusive=exclusive)\n return _op.reverse(out, axis=dim)\n\n return _op.cumsum(data, axis=dim, exclusive=exclusive)\n\n\nclass Cast(OnnxOpConverter):\n \"\"\"Operator converter for Cast.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return AttrCvt(op_name=\"cast\", transforms={\"to\": \"dtype\"})(inputs, attr)\n\n @classmethod\n def _impl_v5(cls, inputs, attr, params):\n try:\n from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE\n\n attr[\"to\"] = str(TENSOR_TYPE_TO_NP_TYPE[attr[\"to\"]])\n except ImportError as e:\n raise ImportError(\"Unable to import onnx.mapping which is required {}\".format(e))\n return AttrCvt(op_name=\"cast\", transforms={\"to\": \"dtype\"})(inputs, attr)\n\n\nclass Unsqueeze(OnnxOpConverter):\n \"\"\"Operator converter for Unsqueeze.\"\"\"\n\n @classmethod\n def run_calculation(cls, tensor, axes):\n axes = sorted(axes)\n for axis in axes:\n tensor = _op.expand_dims(tensor, axis=axis, num_newaxis=1)\n return tensor\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return cls.run_calculation(inputs[0], attr[\"axes\"])\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n if isinstance(inputs[1], _expr.Constant):\n constant_axes = list(inputs[1].data.numpy())\n constant_axes = list(map(int, constant_axes))\n return cls.run_calculation(inputs[0], constant_axes)\n\n rank_input = len(infer_type(inputs[0]).checked_type.shape)\n num_new_axis = int(infer_type(inputs[1]).checked_type.shape[0])\n axes = relay.sort(inputs[1])\n axes = relay.split(axes, num_new_axis).astuple()\n result = inputs[0]\n\n # TODO (AndrewZhaoLuo): investigate performance issues with consecutive\n # dynamic expand_dims on non-llvm targets.\n for i in range(num_new_axis):\n axis = relay.TupleGetItem(axes, i)\n # Unpack scalar\n axis = relay.reshape(axis, [])\n axis = relay.where(\n axis >= relay.const(0, \"int64\"), axis, axis + relay.const(rank_input, \"int64\")\n )\n result = _op.expand_dims(result, axis)\n return result\n\n\nclass Squeeze(OnnxOpConverter):\n \"\"\"Operator converter for Squeeze.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axes\", None)\n return _op.squeeze(inputs[0], axis)\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n ishape = infer_shape(inputs[0])\n axis = inputs[1]\n\n if axis is None:\n # If axes is not provided, all the single dimensions will be removed from the shape.\n if not ishape: # scalar\n return inputs[0]\n\n axis = [i for i in range(len(ishape)) if ishape[i] == 1]\n axis = _op.const(axis)\n\n dtype = infer_type(axis).checked_type.dtype\n\n if isinstance(axis, _expr.Constant):\n constant_axes = list(axis.data.numpy())\n constant_axes = list(map(int, constant_axes))\n return _op.squeeze(inputs[0], constant_axes)\n\n rank = _op.shape_of(_op.shape_of(inputs[0], dtype), dtype)\n axis = _op.where(axis < _op.const(0, dtype), axis + rank, axis)\n return _op.squeeze(inputs[0], fold_constant(axis))\n\n\nclass Split(OnnxOpConverter):\n \"\"\"Operator converter for Split.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n splits = attr.get(\"split\", None)\n if splits is not None and len(splits) > 1:\n indices = []\n index = 0\n for i in splits[:-1]:\n index += i\n indices.append(index)\n # When splits isnt specified divide evenly over axis.\n else:\n indices = attr[\"tvm_custom\"][\"num_outputs\"]\n output = _op.split(inputs[0], indices, attr.get(\"axis\", 0))\n # If the output of split is a single value, unpack if from the TupleWrapper\n if len(output) == 1:\n output = output[0]\n return output\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n splits = inputs[1]\n splits_rank = None\n if splits is not None:\n splits_rank = len(infer_shape(splits))\n if splits is not None and splits_rank > 0:\n if isinstance(splits, _expr.Constant):\n splits = splits.data.asnumpy()\n indices = []\n index = 0\n for i in splits[:-1]:\n index += i\n indices.append(index)\n else:\n raise ValueError(\"Dynamic Split not yet supported\")\n # When splits isnt specified divide evenly over axis.\n else:\n indices = attr[\"tvm_custom\"][\"num_outputs\"]\n output = _op.split(inputs[0], indices, attr.get(\"axis\", 0))\n # If the output of split is a single value, unpack if from the TupleWrapper\n if len(output) == 1:\n output = output[0]\n return output\n\n\nclass Slice(OnnxOpConverter):\n \"\"\"Operator converter for Slice.\"\"\"\n\n @classmethod\n def _common(cls, starts, ends, axes):\n N = max(axes) + 1\n new_axes = list(range(N))\n new_starts = [0] * N\n new_ends = [np.iinfo(np.int32).max] * N\n for i, axis in enumerate(axes):\n new_starts[axis] = starts[i]\n new_ends[axis] = ends[i]\n return new_starts, new_ends, new_axes\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if isinstance(attr[\"starts\"], int):\n attr[\"starts\"] = (attr[\"starts\"],)\n attr[\"ends\"] = (attr[\"ends\"],)\n\n try:\n # Update the starts and ends according to axes if required.\n if isinstance(attr[\"axes\"], int):\n attr[\"axes\"] = (attr[\"axes\"],)\n new_starts, new_ends, new_axes = cls._common(attr[\"starts\"], attr[\"ends\"], attr[\"axes\"])\n attr[\"axes\"] = new_axes\n attr[\"starts\"] = new_starts\n attr[\"ends\"] = new_ends\n except KeyError:\n pass\n begin = list(attr[\"starts\"])\n end = list(attr[\"ends\"])\n\n return _op.strided_slice(inputs[0], begin=begin, end=end)\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n starts = inputs[1]\n ends = inputs[2]\n axes = inputs[3]\n steps = inputs[4]\n\n ishape = infer_shape(inputs[0])\n data_rank = len(ishape)\n\n if axes is not None:\n # Normalize for negative axes\n axes_dtype = infer_type(axes).checked_type.dtype\n axes = fold_constant(\n _op.where(\n axes < _op.const(0, axes_dtype), axes + _op.const(data_rank, axes_dtype), axes\n )\n )\n\n def has_static_axes():\n return (\n isinstance(axes, _expr.Constant)\n and isinstance(starts, _expr.Constant)\n and isinstance(ends, _expr.Constant)\n and (steps is None or isinstance(steps, _expr.Constant))\n )\n\n if axes is not None and has_static_axes():\n axes_np = axes.data.numpy().astype(\"int64\")\n begin_np = starts.data.numpy().astype(\"int64\")\n end_np = ends.data.numpy().astype(\"int64\")\n if steps is None:\n strides_np = np.ones_like(begin_np).astype(\"int64\")\n else:\n strides_np = steps.data.numpy().astype(\"int64\")\n if all([isinstance(ishape[i], int) for i in axes_np]):\n return _op.strided_slice(\n inputs[0], list(begin_np), list(end_np), list(strides_np), axes=list(axes_np)\n )\n\n # Update the starts and ends according to axes if required.\n if axes is not None:\n data_shape = shape_of(inputs[0], dtype=infer_type(ends).checked_type.dtype)\n starts = _op.scatter(\n _op.const([0] * data_rank, dtype=infer_type(starts).checked_type.dtype),\n axes,\n starts,\n axis=0,\n )\n ends = _op.scatter(data_shape, axes, ends, axis=0)\n if steps is not None:\n steps = _op.scatter(\n _op.const([1] * data_rank, dtype=infer_type(steps).checked_type.dtype),\n axes,\n steps,\n axis=0,\n )\n\n if steps is None:\n steps = _op.const([1] * data_rank, dtype=infer_type(starts).checked_type.dtype)\n\n return _op.strided_slice(\n inputs[0], fold_constant(starts), fold_constant(ends), fold_constant(steps)\n )\n\n\ndef normalize_gather_indices(data, indices, axis):\n \"\"\"Make sure gather indicies aren't negative\"\"\"\n ind_dtype = infer_type(indices).checked_type.dtype\n # Normalize the indices to a positive range\n s = _op.take(_op.shape_of(data, dtype=ind_dtype), _op.const(axis, dtype=\"int64\"))\n cond = fold_constant(indices < _op.const(0, ind_dtype))\n if isinstance(cond, _expr.Constant):\n val = cond.data.numpy()\n if val.size == 1:\n cond = val.item()\n if cond:\n indices = indices + s\n return indices\n indices = _op.where(cond, indices + s, indices)\n return indices\n\n\nclass Gather(OnnxOpConverter):\n \"\"\"Operator converter for Gather.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 0)\n data = inputs[0]\n indices = inputs[1]\n indices = normalize_gather_indices(data, indices, axis)\n return _op.take(data, indices, axis)\n\n\nclass GatherElements(OnnxOpConverter):\n \"\"\"Operator converter for GatherElements.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n data = inputs[0]\n indices = inputs[1]\n axis = attr.get(\"axis\", 0)\n indices = normalize_gather_indices(data, indices, axis)\n return _op.gather(data, axis, indices)\n\n\nclass GatherND(OnnxOpConverter):\n \"\"\"Operator converter for GatherND.\"\"\"\n\n @classmethod\n def _impl_common(cls, data, indices, batch_dims=0):\n indices_dims = len(infer_shape(indices))\n indices_shape = infer_shape(indices)\n indices = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1)))\n index_rank = indices_shape[-1]\n return _op.gather_nd(\n data,\n indices,\n batch_dims=batch_dims,\n index_rank=index_rank,\n )\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return cls._impl_common(inputs[0], inputs[1])\n\n @classmethod\n def _impl_v12(cls, inputs, attr, params):\n batch_dims = attr.get(\"batch_dims\", 0)\n return cls._impl_common(inputs[0], inputs[1], batch_dims)\n\n\nclass Compress(OnnxOpConverter):\n \"\"\"Operator converter for compress\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n input_tensor, condition_tensor = inputs\n\n axis = attr.get(\"axis\", None)\n\n # Change one hot tensor to indices e.g. [0, 1, 1, 0, 1] -> [1, 2, 4]\n condition_tensor = _op.reshape(_op.argwhere(condition_tensor), (-1,))\n\n if axis is not None:\n return _op.take(input_tensor, condition_tensor, axis=axis)\n\n # if axis is None, flatten input tensor before selection\n input_tensor = _op.reshape(input_tensor, (-1,))\n return _op.take(input_tensor, condition_tensor, axis=0)\n\n\nclass Scatter(OnnxOpConverter):\n \"\"\"Operator converter for Scatter.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 0)\n return _op.scatter(inputs[0], inputs[1], inputs[2], axis)\n\n\nclass ScatterND(OnnxOpConverter):\n \"\"\"Operator converter for ScatterND.\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n indices_dim = len(infer_shape(inputs[1]))\n axes = list(range(indices_dim))\n return _op.scatter_nd(\n inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2], \"update\"\n )\n\n\nclass EyeLike(OnnxOpConverter):\n \"\"\"Operator converter for EyeLike.\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n in_checked_type = infer_type(inputs[0]).checked_type\n in_dtype = in_checked_type.dtype\n in_shape = list(get_const_tuple(in_checked_type.shape))\n dtype = attr.get(\"dtype\", None)\n if dtype is None:\n dtype = in_dtype\n else:\n dtype = get_type(dtype)\n zeros = _op.zeros(in_shape, dtype)\n dim = in_shape[0]\n indices = _op.arange(_op.const(0), _op.const(dim), dtype=\"int32\")\n ones = _op.full(_op.const(1), (dim,), dtype=dtype)\n k = _op.const(attr.get(\"k\", 0), dtype=\"int32\")\n return _op.scatter_nd(zeros, _op.stack([indices, indices + k], axis=0), ones, \"update\")\n\n\nclass LRN(OnnxOpConverter):\n \"\"\"Operator converter for Local Response Normalization.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n \"\"\"LRN support only NCHW format\n https://github.com/onnx/onnx/blob/main/docs/Operators.md#LRN\n \"\"\"\n axis = 1\n alpha = attr.get(\"alpha\", 0.0001)\n beta = attr.get(\"beta\", 0.75)\n bias = attr.get(\"bias\", 1.0)\n nsize = attr.get(\"size\")\n attr = {\"size\": nsize, \"axis\": axis, \"alpha\": alpha, \"beta\": beta, \"bias\": bias}\n return AttrCvt(\"lrn\")(inputs, attr)\n\n\nclass Maximum(OnnxOpConverter):\n \"\"\"Operator converter for Maximum.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if len(inputs) == 1:\n return inputs[0]\n _max = inputs[0]\n for i in range(1, len(inputs)):\n _max = AttrCvt(\"maximum\")([_max, inputs[i]], {})\n return _max\n\n\nclass Minimum(OnnxOpConverter):\n \"\"\"Operator converter for Minimum.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if len(inputs) == 1:\n return inputs[0]\n _min = inputs[0]\n for i in range(1, len(inputs)):\n _min = AttrCvt(\"minimum\")([_min, inputs[i]], {})\n return _min\n\n\nclass Mean(OnnxOpConverter):\n \"\"\"Operator converter for Mean.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if len(inputs) == 1:\n return inputs[0]\n # avoid overflow\n concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0)\n return _op.mean(concat, axis=0, keepdims=False)\n\n\nclass HardSigmoid(OnnxOpConverter):\n \"\"\"Operator converter for HardSigmoid.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = attr.get(\"alpha\", 0.2)\n beta = attr.get(\"beta\", 0.5)\n transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)\n attr = {\"a_min\": 0, \"a_max\": 1}\n return AttrCvt(\"clip\")([transformX], attr)\n\n\nclass HardSwish(OnnxOpConverter):\n \"\"\"Operator converter for HardSwish.\"\"\"\n\n @classmethod\n def _impl_v14(cls, inputs, attr, params):\n alpha = attr.get(\"alpha\", 1 / 6)\n beta = attr.get(\"beta\", 0.5)\n transformX = inputs[0] * _expr.const(alpha) + _expr.const(beta)\n attr = {\"a_min\": 0, \"a_max\": 1}\n return inputs[0] * AttrCvt(\"clip\")([transformX], attr)\n\n\nclass Reduce(OnnxOpConverter):\n \"\"\"Operator converter for reduce ops.\"\"\"\n\n name = \"\"\n\n @classmethod\n def run_calculation(cls, inputs, axis, keepdims):\n attr = {\"axis\": axis, \"keepdims\": keepdims}\n return AttrCvt(cls.name)(inputs, attr)\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if not infer_shape(inputs[0]): # promote scalar to 1-D tensor\n inputs[0] = _op.expand_dims(inputs[0], axis=0)\n\n if \"axes\" in attr:\n axis = attr.get(\"axes\", 0)\n else:\n axis_len = len(infer_shape(inputs[0]))\n axis = list(range(axis_len))\n\n return cls.run_calculation(inputs, axis, attr.get(\"keepdims\", True))\n\n @classmethod\n def _impl_v12(cls, inputs, attr, params):\n if not infer_shape(inputs[0]): # promote scalar to 1-D tensor\n inputs[0] = _op.expand_dims(inputs[0], axis=0)\n\n if len(inputs) == 2:\n if isinstance(inputs[1], _expr.Constant):\n # Get axis and unpack scalar\n constant_axis = int(inputs[1].data.numpy()[0])\n return cls.run_calculation([inputs[0]], constant_axis, attr.get(\"keepdims\", True))\n\n raise ValueError(\"Dynamic Reduce is not supported yet!\")\n\n return cls._impl_v1(inputs, attr, params)\n\n\nclass ReduceMax(Reduce):\n \"\"\"Operator converter for ReduceMax.\"\"\"\n\n name = \"max\"\n\n\nclass ReduceMin(Reduce):\n \"\"\"Operator converter for ReduceMin.\"\"\"\n\n name = \"min\"\n\n\nclass ReduceSum(Reduce):\n \"\"\"Operator converter for ReduceSum.\"\"\"\n\n name = \"sum\"\n\n\nclass ReduceMean(Reduce):\n \"\"\"Operator converter for ReduceMean.\"\"\"\n\n name = \"mean\"\n\n\nclass ReduceProd(Reduce):\n \"\"\"Operator converter for ReduceProd.\"\"\"\n\n name = \"prod\"\n\n\nclass ReduceLogSumExp(Reduce):\n \"\"\"Operator converter for ReduceLogSumExp.\"\"\"\n\n name = \"logsumexp\"\n\n\nclass ReduceSumSquare(OnnxOpConverter):\n \"\"\"Operator converter for ReduceSumSquare.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if not infer_shape(inputs[0]): # promote scalar to 1-D tensor\n inputs[0] = _op.expand_dims(inputs[0], axis=0)\n\n if \"axes\" in attr:\n axis = attr.get(\"axes\", 0)\n else:\n axis_len = len(infer_shape(inputs[0]))\n axis = list(range(axis_len))\n attr = {\"axis\": axis, \"keepdims\": attr.get(\"keepdims\", True)}\n inputs[0] = inputs[0] * inputs[0]\n\n return AttrCvt(\"sum\")(inputs, attr)\n\n\nclass ReduceL1(OnnxOpConverter):\n \"\"\"Operator converter for ReduceL1.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if not infer_shape(inputs[0]): # promote scalar to 1-D tensor\n inputs[0] = _op.expand_dims(inputs[0], axis=0)\n\n if \"axes\" in attr:\n axis = attr.get(\"axes\", 0)\n else:\n axis_len = len(infer_shape(inputs[0]))\n axis = list(range(axis_len))\n attr = {\"axis\": axis, \"keepdims\": attr.get(\"keepdims\", True)}\n inputs[0] = _op.abs(inputs[0])\n\n return AttrCvt(\"sum\")(inputs, attr)\n\n\nclass ReduceL2(OnnxOpConverter):\n \"\"\"Operator converter for ReduceL2.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if not infer_shape(inputs[0]): # promote scalar to 1-D tensor\n inputs[0] = _op.expand_dims(inputs[0], axis=0)\n\n if \"axes\" in attr:\n axis = attr.get(\"axes\", 0)\n else:\n axis_len = len(infer_shape(inputs[0]))\n axis = list(range(axis_len))\n attr = {\"axis\": axis, \"keepdims\": attr.get(\"keepdims\", True)}\n inputs[0] = inputs[0] * inputs[0]\n out = AttrCvt(\"sum\")(inputs, attr)\n\n return _op.sqrt(out)\n\n\nclass ReduceLogSum(OnnxOpConverter):\n \"\"\"Operator converter for ReduceLogSum.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if not infer_shape(inputs[0]): # promote scalar to 1-D tensor\n inputs[0] = _op.expand_dims(inputs[0], axis=0)\n\n if \"axes\" in attr:\n axis = attr.get(\"axes\", 0)\n else:\n axis_len = len(infer_shape(inputs[0]))\n axis = list(range(axis_len))\n attr = {\"axis\": axis, \"keepdims\": attr.get(\"keepdims\", True)}\n out = AttrCvt(\"sum\")(inputs, attr)\n\n return _op.log(out)\n\n\nclass ArgMax(OnnxOpConverter):\n \"\"\"Operator converter for ArgMax.\"\"\"\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 0)\n keepdims = attr.get(\"keepdims\", True)\n select_last_index = attr.get(\"select_last_index\", False)\n attr = {\"axis\": axis, \"keepdims\": keepdims, \"select_last_index\": select_last_index}\n return _op.cast(AttrCvt(\"argmax\")(inputs, attr), \"int64\")\n\n\nclass ArgMin(OnnxOpConverter):\n \"\"\"Operator converter for ArgMin.\"\"\"\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 0)\n keepdims = attr.get(\"keepdims\", True)\n select_last_index = attr.get(\"select_last_index\", False)\n attr = {\"axis\": axis, \"keepdims\": keepdims, \"select_last_index\": select_last_index}\n return _op.cast(AttrCvt(\"argmin\")(inputs, attr), \"int64\")\n\n\nclass Softmax(OnnxOpConverter):\n \"\"\"Operator converter for Softmax.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 1)\n in_shape = infer_shape(inputs[0])\n ndim = len(in_shape)\n if axis < 0:\n axis += ndim\n if axis == 0:\n reshape_shape = [-1]\n else:\n axis_val = [in_shape[i] for i in range(axis)]\n reshape_shape = [np.prod(axis_val)] + [-1]\n data_reshape = _op.reshape(inputs[0], newshape=reshape_shape)\n out = _op.nn.softmax(data_reshape, axis=-1)\n out = _op.reshape(out, newshape=in_shape)\n return out\n\n @classmethod\n def _impl_v13(cls, inputs, attr, _):\n axis = attr.get(\"axis\", -1)\n ndim = len(infer_shape(inputs[0]))\n if axis < 0:\n axis += ndim\n return _op.nn.softmax(inputs[0], axis=axis)\n\n\nclass LogSoftmax(OnnxOpConverter):\n \"\"\"Operator converter for Softmax.\"\"\"\n\n @classmethod\n def run_calculation(cls, x, axes):\n \"\"\"Run the calculation for Log Softmax calculation.\"\"\"\n m = _op.max(x, axes, keepdims=True)\n e = _op.exp(x - m)\n s = _op.sum(e, axes, keepdims=True)\n return x - m - _op.log(s)\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 1)\n ndim = len(infer_shape(inputs[0]))\n if axis < 0:\n axis += ndim\n axes = list(range(axis, ndim))\n return cls.run_calculation(inputs[0], axes)\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n axis = attr.get(\"axis\", -1)\n ndim = len(infer_shape(inputs[0]))\n if axis < 0:\n axis += ndim\n axes = [axis]\n return cls.run_calculation(inputs[0], axes)\n\n\nclass Hardmax(OnnxOpConverter):\n \"\"\"Operator converter for Hardmax.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n axis = attr.get(\"axis\", 1)\n ndim = len(infer_shape(inputs[0]))\n if axis < 0:\n axis += ndim\n dtype = infer_type(inputs[0]).checked_type.dtype\n\n if axis == 0:\n pre = _op.const([1], \"int64\")\n else:\n pre = _op.prod(\n _op.strided_slice(shape_of(inputs[0]), [0], [axis], [1]), axis=0, keepdims=True\n )\n post = _op.prod(\n _op.strided_slice(shape_of(inputs[0]), [axis], [2147483647], [1]), axis=0, keepdims=True\n )\n newshape = _op.concatenate([pre, post], axis=0)\n x = _op.reshape(inputs[0], fold_constant(newshape))\n argmax = _op.argmax(x, axis=1)\n onehot = _op.one_hot(\n argmax,\n _op.const(1.0, dtype),\n _op.const(0.0, dtype),\n fold_constant(_op.take(shape_of(x), _op.const([1], \"int64\"))),\n 1,\n dtype,\n )\n return _op.reshape(onehot, shape_of(inputs[0]))\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params) -> relay.Expr:\n inferred_type = infer_type(inputs[0])\n dtype = inferred_type.checked_type.dtype\n ndim = len(inferred_type.checked_type.shape)\n axis = attr.get(\"axis\", -1) % ndim\n\n argmax = _op.argmax(inputs[0], axis=axis)\n return _op.one_hot(\n argmax,\n _op.const(1.0, dtype),\n _op.const(0.0, dtype),\n fold_constant(_op.take(shape_of(inputs[0]), _op.const([axis], \"int64\"))),\n axis,\n dtype,\n )\n\n\nclass OneHot(OnnxOpConverter):\n \"\"\"Operator converter for OneHot.\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n # Extract relay one_hot inputs.\n indices, depth, values = inputs\n ndim = len(infer_shape(indices))\n # Split onnx on off values into two separate expressions.\n off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1))\n # Extract the datatype of the output from on_value.\n dtype = infer_type(on_value).checked_type.dtype\n ind_dtype = infer_type(indices).checked_type.dtype\n # Normalize the indices to a positive range\n indices = _op.where(\n indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices\n )\n # set default value when axis is not set in the model\n if \"axis\" not in attr:\n attr[\"axis\"] = -1\n axis = attr[\"axis\"]\n if axis < 0:\n axis += ndim + 1\n\n return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype)\n\n\nclass ConstantOfShape(OnnxOpConverter):\n \"\"\"Operator converter for ConstantOfShape.\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n if \"value\" in attr:\n np_value = get_numpy(attr.pop(\"value\"))[0]\n value = _expr.const(np_value)\n dtype = np_value.dtype.name\n else:\n value = _expr.const(0)\n dtype = \"float32\"\n output = _op.full(value, inputs[0], dtype=dtype)\n return output\n\n\nclass Constant(OnnxOpConverter):\n \"\"\"Operator converter for ConstantOfShape.\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n if \"value\" not in attr:\n raise tvm.errors.OpAttributeRequired(\"no value in Constant\")\n value = attr.pop(\"value\")\n # Constants may rarely have string types. These are likely exported\n # from other frameworks and not actually used in TVM. We'll just use\n # a zero valued constant for compatibility.\n if isinstance(value, bytes):\n np_value = np.asarray([0]).astype(\"int64\")\n else:\n np_value = get_numpy(value)\n dtype = np_value.dtype.name\n value = _expr.const(np_value, dtype)\n return value\n\n\nclass Sign(OnnxOpConverter):\n \"\"\"Operator converter for Sign.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return _op.sign(inputs[0])\n\n\nclass Equal(Elemwise):\n \"\"\"Operator converter for Equal.\"\"\"\n\n name = \"equal\"\n\n\nclass Not(Elemwise):\n \"\"\"Operator converter for Not.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return _op.logical_not(inputs[0])\n\n\nclass And(Elemwise):\n \"\"\"Operator converter for And.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return _op.logical_and(inputs[0], inputs[1])\n\n\nclass Tile(Elemwise):\n \"\"\"Operator converter for Tile\"\"\"\n\n @classmethod\n def _impl_v6(cls, inputs, attr, params):\n return _op.tile(inputs[0], inputs[1])\n\n\nclass Erf(OnnxOpConverter):\n \"\"\"Operator converter for Erf\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n return _op.erf(inputs[0])\n\n\nclass Where(OnnxOpConverter):\n \"\"\"Operator converter for Where\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n return _op.where(*inputs)\n\n\nclass Or(Elemwise):\n \"\"\"Operator converter for Or.\"\"\"\n\n @classmethod\n def _impl_v7(cls, inputs, attr, params):\n return _op.logical_or(inputs[0], inputs[1])\n\n\nclass Expand(OnnxOpConverter):\n \"\"\"Operator converter for Expand.\"\"\"\n\n @classmethod\n def _impl_v8(cls, inputs, attr, params):\n dtype = infer_type(inputs[1]).checked_type.dtype\n in_shape = shape_of(inputs[0], dtype=dtype)\n shape = inputs[1]\n\n # Currently 'op.broadcast_to' expect the rank of the given 'shape'\n # (the 2nd input) is always higher than that of the given 'input' (the 1st input)\n # However, ONNX Expand supports multi-directional broadcasting, which allows\n # above pattern and also some extent of 'shape' can be smaller than the corresponding\n # extent of 'input'. In this case, the extent of 'shape' must be 1.\n # https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md\n # In above cases, we cannot directorly apply 'op.broadcast_to' instead of 'expand'\n # so, here we solved this problem by expanding the given 'shape' itself.\n def expand_shape(in_shape, shape):\n \"\"\"A function expands the shape when the rank is lower than that of the given\n intput. Also it replaces the extent of the shape with the corresponding extent\n of the intput when it is 1.\n \"\"\"\n in_dims = infer_shape(in_shape)[0]\n new_dims = infer_shape(shape)[0]\n\n if in_dims < new_dims:\n in_shape = _op.concatenate(\n [\n _expr.const(\n [\n 1,\n ]\n * (new_dims - in_dims),\n dtype=dtype,\n ),\n in_shape,\n ],\n axis=0,\n )\n elif new_dims < in_dims:\n shape = _op.concatenate(\n [\n _expr.const(\n [\n 1,\n ]\n * (in_dims - new_dims),\n dtype=dtype,\n ),\n shape,\n ],\n axis=0,\n )\n new_shape = _op.maximum(in_shape, shape)\n return new_shape\n\n shape = fold_constant(expand_shape(in_shape, shape))\n return _op.broadcast_to(inputs[0], shape=shape)\n\n\nclass RNN(OnnxOpConverter):\n \"\"\"Operator converter for RNNs such as LSTM and GRU.\"\"\"\n\n @classmethod\n def _activation_helper(cls, activation, alpha, beta):\n convert_map = _get_convert_map(1)\n attrs = {}\n if alpha is not None:\n attrs[\"alpha\"] = alpha\n if beta is not None:\n attrs[\"beta\"] = beta\n return lambda x: convert_map[activation.decode(\"utf-8\")]([x], attrs, {})\n\n @classmethod\n def _activation_needs_alpha(cls, activation):\n needs_alpha = [\n \"Affine\",\n \"LeakyRelu\",\n \"ThresholdedRelu\",\n \"ScaledTanh\",\n \"HardSigmoid\",\n \"Elu\",\n ]\n return activation.decode(\"utf-8\") in needs_alpha\n\n @classmethod\n def _activation_needs_beta(cls, activation):\n needs_beta = [\n \"Affine\",\n \"ScaledTanh\",\n \"HardSigmoid\",\n ]\n return activation.decode(\"utf-8\") in needs_beta\n\n\nclass LSTM(RNN):\n \"\"\"Operator converter for LSTM\"\"\"\n\n @classmethod\n def bidir_lstm_cell(\n cls,\n input_seqs,\n weight_dicts,\n acts,\n ):\n \"\"\"\n Bidirectional LSTM cell\n \"\"\"\n seq_len = len(input_seqs)\n forward_outputs, fw_H_t, fw_C_t = lstm_cell(\n input_seqs,\n **weight_dicts[0],\n f_act=acts[0],\n g_act=acts[1],\n h_act=acts[2],\n )\n\n reverse_outputs, rev_H_t, rev_C_t = lstm_cell(\n input_seqs,\n **weight_dicts[1],\n f_act=acts[3],\n g_act=acts[4],\n h_act=acts[5],\n backwards=True,\n )\n\n final_outputs = []\n for i in range(seq_len):\n final_outputs.append(\n _op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)\n )\n\n return (\n _op.stack(final_outputs, axis=0),\n _op.stack([fw_H_t, rev_H_t], axis=0),\n _op.stack([fw_C_t, rev_C_t], axis=0),\n )\n\n @classmethod\n def _impl_v7(cls, inputs, attr, params):\n # Unpack inputs, note that if optional and not provided then value will be None.\n X = inputs[0]\n Wp = inputs[1]\n Rp = inputs[2]\n Bp = inputs[3]\n # Sequence length currently unused as it can be inferred from shapes.\n # sequence_lens = inputs['sequence_lens']\n Hp_0 = inputs[5]\n Cp_0 = inputs[6]\n Pp = inputs[7]\n\n num_directions = infer_shape(Wp)[0]\n W_dtype = infer_type(Wp).checked_type.dtype\n\n if num_directions not in [1, 2]:\n raise ValueError(\"num_directions must be either 1 or 2!\")\n\n X_shape = infer_shape(X)\n hidden_size = infer_shape(Rp)[-1]\n batch_size = X_shape[1]\n\n # Initialize state if not provided.\n # Otherwise remove bidirectional axis.\n if Hp_0 is None:\n Hp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype)\n if Cp_0 is None:\n Cp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype)\n\n if \"activations\" in attr:\n activations = attr[\"activations\"]\n if len(activations) != 3 * num_directions:\n raise NotImplementedError(\n f\"LSTM assumes 3 * num_directions activation functions are provided\"\n )\n alpha_loc = 0\n alphas = attr.get(\"activation_alpha\", [])\n if isinstance(alphas, float):\n alphas = [alphas]\n beta_loc = 0\n betas = attr.get(\"activation_beta\", [])\n if isinstance(betas, float):\n betas = [betas]\n acts = []\n for i in range(3 * num_directions):\n alpha = None\n beta = None\n activation = activations[i]\n if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:\n alpha = alphas[alpha_loc]\n alpha_loc += 1\n if cls._activation_needs_beta(activation) and len(betas) > beta_loc:\n beta = betas[beta_loc]\n beta_loc += 1\n acts.append(cls._activation_helper(activation, alpha, beta))\n else:\n acts = [_op.sigmoid, _op.tanh, _op.tanh] * num_directions\n\n # TODO (vvchernov): It can be replaced by _op.split if issue #8412 is resolved\n X_steps = unbind(X, axis=0)\n\n H_ts = _op.split(Hp_0, num_directions)\n C_ts = _op.split(Cp_0, num_directions)\n Ws = _op.split(Wp, num_directions)\n Rs = _op.split(Rp, num_directions)\n\n if Bp is not None:\n Bs = _op.split(Bp, num_directions)\n if Pp is not None:\n p_i, p_o, p_f = _op.split(Pp, 3, axis=1)\n\n p_is = _op.split(p_i, num_directions)\n p_fs = _op.split(p_f, num_directions)\n p_os = _op.split(p_o, num_directions)\n\n weights_dicts = []\n for i in range(num_directions):\n weights_dict = {}\n\n weights_dict[\"hidden_state\"] = _op.squeeze(H_ts[i], axis=[0])\n weights_dict[\"cell_state\"] = _op.squeeze(C_ts[i], axis=[0])\n\n # Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o\n mati, mato, matf, matc = _op.split(_op.squeeze(Ws[i], axis=[0]), 4)\n weights_dict[\"w_inp\"] = _op.concatenate([mati, matf, matc, mato], axis=0)\n mati, mato, matf, matc = _op.split(_op.squeeze(Rs[i], axis=[0]), 4)\n weights_dict[\"w_hid\"] = _op.concatenate([mati, matf, matc, mato], axis=0)\n if Bp is not None:\n Bi, Bh = _op.split(Bs[i], 2, -1)\n mati, mato, matf, matc = _op.split(_op.squeeze(Bi, axis=[0]), 4)\n weights_dict[\"b_inp\"] = _op.concatenate([mati, matf, matc, mato], axis=0)\n mati, mato, matf, matc = _op.split(_op.squeeze(Bh, axis=[0]), 4)\n weights_dict[\"b_hid\"] = _op.concatenate([mati, matf, matc, mato], axis=0)\n if Pp is not None:\n weights_dict[\"p_i\"] = _op.squeeze(p_is[i], axis=[0])\n weights_dict[\"p_f\"] = _op.squeeze(p_fs[i], axis=[0])\n weights_dict[\"p_o\"] = _op.squeeze(p_os[i], axis=[0])\n weights_dicts.append(weights_dict)\n\n if num_directions == 2:\n output, H, C = LSTM.bidir_lstm_cell(\n input_seqs=X_steps,\n weight_dicts=weights_dicts,\n acts=acts,\n )\n else:\n # outputs shape = [seqs_num, (batch_size, hidden_size)]\n outputs, H, C = lstm_cell(\n input_seqs=X_steps,\n **weights_dicts[0],\n f_act=acts[0],\n g_act=acts[1],\n h_act=acts[2],\n )\n\n # output shape = (seqs_num, num_directions, batch_size, hidden_size)\n output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)\n H = _op.expand_dims(H, axis=0)\n C = _op.expand_dims(C, axis=0)\n\n return _expr.TupleWrapper(_expr.Tuple((output, H, C)), 3)\n\n\nclass GRU(RNN):\n \"\"\"Operator convert for GRU\"\"\"\n\n @classmethod\n def bidir_gru_cell(\n cls,\n input_seqs,\n weight_dicts,\n acts,\n ):\n \"\"\"\n Bidirectional GRU cell\n \"\"\"\n seq_len = len(input_seqs)\n forward_outputs, fw_H_t = gru_cell(\n input_seqs,\n **weight_dicts[0],\n rz_act=acts[0],\n n_act=acts[1],\n )\n\n reverse_outputs, rev_H_t = gru_cell(\n input_seqs,\n **weight_dicts[1],\n rz_act=acts[2],\n n_act=acts[3],\n backwards=True,\n )\n\n final_outputs = []\n for i in range(seq_len):\n final_outputs.append(\n _op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)\n )\n\n return (\n _op.stack(final_outputs, axis=0),\n _op.stack([fw_H_t, rev_H_t], axis=0),\n )\n\n @classmethod\n def _impl_v7(cls, inputs, attr, params):\n # Unpack inputs, note that if optional and not provided then value will be None.\n X = inputs[0]\n Wp = inputs[1]\n Rp = inputs[2]\n Bp = inputs[3]\n # Sequence length currently unused as it can be inferred from shapes.\n # sequence_lens = inputs['sequence_lens']\n Hp_0 = inputs[5]\n linear_before_reset = attr.get(\"linear_before_reset\", 0)\n\n num_directions = infer_shape(Wp)[0]\n W_dtype = infer_type(Wp).checked_type.dtype\n\n if num_directions not in [1, 2]:\n raise ValueError(\"num_directions must be either 1 or 2!\")\n\n X_shape = infer_shape(X)\n hidden_size = infer_shape(Rp)[-1]\n batch_size = X_shape[1]\n\n if Hp_0 is None:\n Hp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype)\n\n if \"activations\" in attr:\n activations = attr[\"activations\"]\n if len(activations) != 2 * num_directions:\n raise NotImplementedError(\n \"GRU assumes 2 * num_directions activation functions are provided\"\n )\n alpha_loc = 0\n alphas = attr.get(\"activation_alpha\", [])\n if isinstance(alphas, float):\n alphas = [alphas]\n beta_loc = 0\n betas = attr.get(\"activation_beta\", [])\n if isinstance(betas, float):\n betas = [betas]\n acts = []\n for i in range(2 * num_directions):\n alpha = None\n beta = None\n activation = activations[i]\n if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:\n alpha = alphas[alpha_loc]\n alpha_loc += 1\n if cls._activation_needs_beta(activation) and len(betas) > beta_loc:\n beta = betas[beta_loc]\n beta_loc += 1\n acts.append(cls._activation_helper(activation, alpha, beta))\n else:\n acts = [_op.sigmoid, _op.tanh] * 2\n\n # TODO (vvchernov): It can be replaced by _op.split if issue #8412 is resolved\n X_steps = unbind(X, axis=0)\n\n H_ts = _op.split(Hp_0, num_directions)\n Ws = _op.split(Wp, num_directions)\n Rs = _op.split(Rp, num_directions)\n\n if Bp is not None:\n Bs = _op.split(Bp, num_directions)\n\n weights_dicts = []\n for i in range(num_directions):\n weights_dict = {}\n\n weights_dict[\"hidden_state\"] = _op.squeeze(H_ts[i], axis=[0])\n weights_dict[\"linear_before_reset\"] = linear_before_reset\n\n # Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o\n matz, matr, matn = _op.split(_op.squeeze(Ws[i], axis=[0]), 3)\n weights_dict[\"w_inp\"] = _op.concatenate([matr, matz, matn], axis=0)\n matz, matr, matn = _op.split(_op.squeeze(Rs[i], axis=[0]), 3)\n weights_dict[\"w_hid\"] = _op.concatenate([matr, matz, matn], axis=0)\n if Bp is not None:\n Bi, Bh = _op.split(Bs[i], 2, -1)\n matz, matr, matn = _op.split(_op.squeeze(Bi, axis=[0]), 3)\n weights_dict[\"b_inp\"] = _op.concatenate([matr, matz, matn], axis=0)\n matz, matr, matn = _op.split(_op.squeeze(Bh, axis=[0]), 3)\n weights_dict[\"b_hid\"] = _op.concatenate([matr, matz, matn], axis=0)\n weights_dicts.append(weights_dict)\n\n if num_directions == 2:\n output, H = GRU.bidir_gru_cell(\n input_seqs=X_steps,\n weight_dicts=weights_dicts,\n acts=acts,\n )\n else:\n # outputs shape = [seqs_num, (batch_size, hidden_size)]\n outputs, H = gru_cell(\n input_seqs=X_steps,\n **weights_dicts[0],\n rz_act=acts[0],\n n_act=acts[1],\n )\n\n # output shape = (seqs_num, num_directions, batch_size, hidden_size)\n output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)\n H = _op.expand_dims(H, axis=0)\n\n return _expr.TupleWrapper(_expr.Tuple((output, H)), 2)\n\n\nclass Resize(OnnxOpConverter):\n \"\"\"Operator converter for Resize\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n mode = attr.get(\"mode\").decode(\"ascii\")\n if mode == \"nearest\":\n method = \"nearest_neighbor\"\n elif mode == \"linear\":\n method = \"linear\"\n elif mode == \"cubic\":\n method = \"cubic\"\n else:\n raise tvm.error.OpAttributeInvalid(\n 'Value {} in attribute \"mode\" of operator Resize is not valid.'.format(mode)\n )\n\n scale = inputs[1]\n size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale\n ndims = len(infer_shape(inputs[0]))\n out = None\n if ndims == 3:\n out_size = fold_constant(_op.strided_slice(size, [2], [3]))\n out = _op.image.resize1d(inputs[0], out_size, None, \"NCW\", method, \"asymmetric\")\n elif ndims == 4:\n out_size = fold_constant(_op.strided_slice(size, [2], [4]))\n out = _op.image.resize2d(inputs[0], out_size, None, \"NCHW\", method, \"asymmetric\")\n elif ndims == 5:\n out_size = fold_constant(_op.strided_slice(size, [2], [5]))\n out = _op.image.resize3d(inputs[0], out_size, None, \"NCDHW\", method, \"asymmetric\")\n else:\n raise NotImplementedError(\"Resize only supports 3, 4, or 5 dims\")\n return out\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n scale = inputs[2]\n scale_shape = infer_shape(scale)\n if len(inputs) == 4:\n assert (\n len(scale_shape) == 0 or scale_shape[0] == 0\n ), \"One of scale or size should be passed, not both.\"\n size = inputs[3]\n else:\n assert len(scale_shape) != 0, \"One of scale or size should be passed.\"\n size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale\n return cls.v11_13_common(inputs, size, attr, params)\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n scale = inputs[2]\n size = inputs[3]\n\n # Some versions of onnx exporters produce an opset 13 model with the opset 11\n # resize op, handle that edge case\n if scale is not None and size is not None:\n return cls._impl_v11(inputs, attr, params)\n\n if size is not None:\n assert scale is None, \"One of scale or size should be passed, not both.\"\n else:\n scale_type = infer_type(scale)\n scale_shape = scale_type.checked_type.shape\n scale_dtype = scale_type.checked_type.dtype\n assert len(scale_shape) != 0, \"One of scale or size should be passed.\"\n size = _op.cast(shape_of(inputs[0]), scale_dtype) * scale\n\n return cls.v11_13_common(inputs, size, attr, params)\n\n @classmethod\n def v11_13_common(cls, inputs, size, attr, params):\n \"\"\"\n Resize v11 and Resize v13 are identical except in how\n they handle the passing of scale and size. This utility\n provides the implementation for both\n \"\"\"\n roi = inputs[1]\n if roi is not None and infer_shape(roi)[0] == 0:\n roi = None\n ndims = len(infer_shape(inputs[0]))\n mode = attr.get(\"mode\").decode(\"ascii\")\n if mode == \"nearest\":\n method = \"nearest_neighbor\"\n elif mode == \"linear\":\n method = \"linear\"\n elif mode == \"cubic\":\n method = \"cubic\"\n else:\n raise tvm.error.OpAttributeInvalid(\n 'Value {} in attribute \"mode\" of operator Resize is not valid.'.format(mode)\n )\n\n coord_trans = attr.get(\"coordinate_transformation_mode\", b\"half_pixel\").decode(\"ascii\")\n nearest_mode = attr.get(\"nearest_mode\", b\"round_prefer_floor\").decode(\"ascii\")\n alpha = attr.get(\"cubic_coeff_a\", -0.75)\n exclude = attr.get(\"exclude_outside\", 0)\n extrapolation_value = attr.get(\"extrapolation_value\", 0.0)\n\n if roi is not None:\n roi = fold_constant(\n _op.concatenate(\n [\n _op.strided_slice(roi, [2], [ndims]),\n _op.strided_slice(roi, [ndims + 2], [2 * ndims]),\n ],\n axis=0,\n )\n )\n\n out_size = fold_constant(_op.strided_slice(size, [2], [ndims]))\n\n out = None\n if ndims == 3:\n out = _op.image.resize1d(\n inputs[0],\n out_size,\n roi,\n \"NCW\",\n method,\n coord_trans,\n nearest_mode,\n alpha,\n exclude,\n extrapolation_value,\n )\n elif ndims == 4:\n out = _op.image.resize2d(\n inputs[0],\n out_size,\n roi,\n \"NCHW\",\n method,\n coord_trans,\n nearest_mode,\n alpha,\n exclude,\n extrapolation_value,\n )\n elif ndims == 5:\n out = _op.image.resize3d(\n inputs[0],\n out_size,\n roi,\n \"NCDHW\",\n method,\n coord_trans,\n nearest_mode,\n alpha,\n exclude,\n extrapolation_value,\n )\n else:\n raise NotImplementedError(\"Resize only supports 3, 4, or 5 dims\")\n\n return out\n\n\nclass NonZero(OnnxOpConverter):\n \"\"\"Operator converter for NonZero\"\"\"\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n if len(inputs) > 1:\n raise ValueError(\"Expect 1 input only\")\n\n output = AttrCvt(op_name=\"argwhere\")(inputs, attr, params)\n # ONNX NonZero always outputs int64\n output = _op.cast(output, \"int64\")\n return _op.transpose(output, axes=(1, 0))\n\n\nclass ReverseSequence(OnnxOpConverter):\n \"\"\"Operator converter for ReverseSequence\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n\n return _op.reverse_sequence(inputs[0], inputs[1], attr[\"time_axis\"], attr[\"batch_axis\"])\n\n\nclass TopK(OnnxOpConverter):\n \"\"\"Operator converter for TopK\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if len(inputs) != 2:\n raise ValueError(\"Expect 2 input only\")\n axis = attr.get(\"axis\", -1)\n largest = attr.get(\"largest\", 1)\n\n if largest == 0:\n # TODO(mbrookhart): optimize this by adding a smallest attribute to topi if this\n # ever becomes a bottleneck\n ndim = len(infer_shape(inputs[0]))\n if axis < 0:\n axis += ndim\n sort = _op.sort(inputs[0], axis=axis)\n argsort = _op.argsort(inputs[0], axis=axis, dtype=\"int64\")\n begin = [0] * ndim\n stride = [1] * ndim\n end = _op.concatenate(\n [\n _op.const([np.iinfo(np.int64).max] * axis, dtype=\"int64\"),\n inputs[1],\n _op.const([np.iinfo(np.int64).max] * (ndim - axis - 1), dtype=\"int64\"),\n ],\n axis=0,\n )\n return _expr.TupleWrapper(\n _expr.Tuple(\n [\n _op.strided_slice(sort, begin, end, stride),\n _op.strided_slice(argsort, begin, end, stride),\n ]\n ),\n 2,\n )\n\n return _op.topk(inputs[0], inputs[1], axis=axis, dtype=\"int64\")\n\n\nclass Range(OnnxOpConverter):\n \"\"\"Operator converter for Range\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if len(inputs) != 3:\n raise ValueError(\"Expect 3 input only\")\n\n return _op.arange(\n inputs[0], inputs[1], inputs[2], dtype=infer_type(inputs[0]).checked_type.dtype\n )\n\n\nclass IsInf(OnnxOpConverter):\n \"\"\"Operator converter for IsInf\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n detect_negative = attr.get(\"detect_negative\", 1)\n detect_positive = attr.get(\"detect_positive\", 1)\n dtype = infer_type(inputs[0]).checked_type.dtype\n isinf = _op.isinf(inputs[0])\n if not detect_negative:\n isinf = isinf * (inputs[0] > _op.const(0, dtype))\n if not detect_positive:\n isinf = isinf * (inputs[0] < _op.const(0, dtype))\n return isinf\n\n\nclass Celu(OnnxOpConverter):\n \"\"\"Operator convereter for celu\"\"\"\n\n @classmethod\n def _impl_v12(cls, inputs, attr, params):\n x = inputs[0]\n dtype = infer_type(x).checked_type.dtype\n alpha = _op.const(attr.get(\"alpha\", 1.0), dtype)\n zero = _op.const(0, dtype)\n one = _op.const(1, dtype)\n out = _op.maximum(zero, x) + _op.minimum(zero, alpha * (_op.exp(x / alpha) - one))\n return out\n\n\nclass MaxRoiPool(OnnxOpConverter):\n \"\"\"Operator converter for MaxRoiPool.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n assert len(inputs) == 2, \"MMaxRoiPool op take 2 inputs, {} given\".format(len(inputs))\n\n data = inputs[0]\n rois = inputs[1]\n pooled_shape = attr.get(\"pooled_shape\")\n spatial_scale = attr.get(\"spatial_scale\", 1.0)\n\n return _vision.roi_pool(data, rois, pooled_shape, spatial_scale)\n\n\nclass RoiAlign(OnnxOpConverter):\n \"\"\"Operator converter for RoiAlign.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if len(inputs) != 3:\n raise ValueError(\"Expect 3 inputs only\")\n x = inputs[0]\n rois = inputs[1]\n batch_indices = inputs[2]\n mode = attr.get(\"mode\", b\"avg\")\n if mode not in (b\"avg\", b\"max\"):\n raise NotImplementedError(\"RoiAlign in Relay only uses avg and max modes\")\n output_height = attr.get(\"output_height\", 1)\n output_width = attr.get(\"output_width\", 1)\n\n sampling_ratio = attr.get(\"sampling_ratio\", 0)\n spatial_scale = attr.get(\"spatial_scale\", 1.0)\n\n batch_indices = _op.expand_dims(batch_indices, axis=1, num_newaxis=1)\n batch_indices = _op.cast(batch_indices, infer_type(rois).checked_type.dtype)\n rois = _op.concatenate([batch_indices, rois], 1)\n\n return _vision.roi_align(\n x, rois, [output_height, output_width], spatial_scale, sampling_ratio, mode=mode\n )\n\n\nclass Clip(OnnxOpConverter):\n \"\"\"Operator converter for Clip.\"\"\"\n\n @staticmethod\n def convert_attributes(inputs, attr, params):\n convert = AttrCvt(\"clip\", transforms={\"min\": \"a_min\", \"max\": \"a_max\"})\n return convert(inputs, attr, params)\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n if \"min\" not in attr:\n attr[\"min\"] = -np.inf\n if \"max\" not in attr:\n attr[\"max\"] = np.inf\n return Clip.convert_attributes(inputs, attr, params)\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n if len(inputs) == 3 and isinstance(inputs[2], _expr.Constant):\n attr[\"max\"] = inputs[2].data.numpy().item()\n inputs = inputs[0:2]\n if len(inputs) >= 2 and isinstance(inputs[1], _expr.Constant):\n attr[\"min\"] = inputs[1].data.numpy().item()\n inputs = inputs[0:1]\n if \"min\" in attr and \"max\" in attr:\n return Clip.convert_attributes(inputs, attr, params)\n\n assert len(inputs) <= 3, \"Clip-11 takes up to 3 inputs, input, min, max\"\n result = inputs[0]\n for i, op in enumerate([_op.tensor.maximum, _op.tensor.minimum]):\n if i < len(inputs) - 1:\n if inputs[i + 1] is not None:\n result = op(result, inputs[i + 1])\n return result\n\n\nclass Softplus(OnnxOpConverter):\n \"\"\"Operator converter for Softplus.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n data = inputs[0]\n data_dtype = infer_type(data).checked_type.dtype\n data = _op.exp(data) + _expr.const(1, dtype=data_dtype)\n return _op.log(data)\n\n\nclass Loop(OnnxOpConverter):\n \"\"\"Operator converter for Loop\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n max_loop_count = inputs[0]\n cond = inputs[1]\n loop_deps = inputs[2:]\n num_deps = len(loop_deps)\n # Create a copy of the body function to prevent the original\n # from being modified.\n body = copy.copy(attr[\"body\"])\n iter_dtype = infer_type(max_loop_count).checked_type.dtype\n\n # Determine what condition mode we're in.\n assert cond is not None or max_loop_count is not None\n is_for_loop = max_loop_count is not None and cond is None\n is_condition_for_loop = cond is not None and max_loop_count is not None\n\n # Loop inputs will be packed as\n # [iter_count, max_count, condition, loop_deps, scan_outputs]\n def cond_fn(*loop_inputs):\n i = loop_inputs[0]\n max_count = loop_inputs[1]\n w = loop_inputs[2]\n\n if cond is not None:\n out_while = _op.equal(w, _expr.const(True, \"bool\"))\n if max_loop_count is not None:\n out_loop = _op.less(i, max_count)\n\n if is_condition_for_loop:\n return _op.logical_and(out_while, out_loop)\n if is_for_loop:\n return out_loop\n return out_while\n\n # Get the current graph proto and create a clone for the subgraph\n graph_scope = GraphProto.current\n subgraph_scope = GraphProto(\n graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params\n )\n # Load nodes from outer graph into inner graph.\n subgraph_scope._nodes = graph_scope._nodes.copy()\n\n # Create a list of variables for each value updated in the loop.\n def get_var(name, val, scan=False):\n checked_type = infer_type(val)\n if hasattr(checked_type, \"type_annotation\"):\n checked_type = checked_type.type_annotation\n if hasattr(checked_type, \"checked_type\"):\n checked_type = checked_type.checked_type\n shape = get_const_tuple(checked_type.shape)\n actual_shape = []\n for dim in shape:\n if isinstance(dim, int) and dim == 0:\n actual_shape.append(_ty.Any())\n else:\n actual_shape.append(dim)\n if scan:\n return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)\n\n return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)\n\n loop_vars = [\n _expr.var(body.input[0].name, shape=(), dtype=iter_dtype), # iteration count\n _expr.var(\"max_count\", shape=(), dtype=iter_dtype), # iteration count\n get_var(body.input[1].name, cond), # exit condition\n ]\n loop_vars += [get_var(body.input[i + 2].name, v) for i, v in enumerate(loop_deps)]\n loop_var_names = [v.name_hint for v in loop_vars]\n\n num_scan_outputs = len(body.output) - (1 + num_deps)\n\n # Construct variables and initial empty tensors for any scan outputs.\n # To do this, we'll figure out the output shapes of the body subgraph by importing\n # it and doing type inference.\n scan_output_vars = []\n scan_output_init = []\n if num_scan_outputs > 0:\n with subgraph_scope:\n loop_outputs = subgraph_scope.from_onnx(\n body, graph_scope.opset, get_output_expr=True\n )\n loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output))\n\n for i in range(num_scan_outputs):\n name, _, _, _ = get_info(body.output[i + 1 + num_deps])\n output_node = infer_type(loop_outputs[i + 1 + num_deps])\n shape = get_const_tuple(output_node.checked_type.shape)\n dtype = output_node.checked_type.dtype\n scan_output_vars.append(\n _expr.var(name, shape=([_ty.Any()] * (len(shape) + 1)), dtype=dtype)\n )\n scan_output_init.append(\n _op.reshape(_expr.const(np.array([]).astype(dtype)), [0] + [1] * len(shape))\n )\n\n # Now we can remove loop iter variables from our inner loop's inputs.\n # This is kind of a hack since we have graph inputs that we don't\n # want to treat as actual inputs.\n while len(body.input) != 0:\n body.input.pop(0)\n\n # Define the loop body, in this function we need to unpack loop inputs,\n # convert the loop subgraph, and pack outputs for the next iteration.\n def body_fn(*loop_inputs):\n # Unpack inputs\n loop_count = loop_inputs[0]\n max_count = loop_inputs[1]\n cond = loop_inputs[2]\n current_vars = list(loop_inputs[3 : (3 + num_deps)])\n scan_outputs = loop_inputs[(3 + num_deps) :]\n\n # Prepare body inputs by adding them to node dictionary.\n new_inputs = [loop_count, max_count, cond] + current_vars\n for i, inp in enumerate(new_inputs):\n subgraph_scope._nodes[loop_var_names[i]] = inp\n\n # Get the output of the current loop using the updated inputs.\n with subgraph_scope:\n loop_outputs = subgraph_scope.from_onnx(\n body, graph_scope.opset, get_output_expr=True\n )\n # Unpack the body outputs and prepare variables for next iteration.\n new_cond = loop_outputs[0]\n new_loop_vars = [loop_outputs[i] for i in range(1, 1 + num_deps)]\n new_scan_outputs = [loop_outputs[i] for i in range(1 + num_deps, len(loop_outputs))]\n\n # Add new scan outputs to tracking\n combined_scan_outputs = []\n for i, scan in enumerate(scan_outputs):\n rank = len(infer_shape(scan)) - 1\n new_scan = new_scan_outputs[i]\n expand_scan = _op.expand_dims(new_scan, axis=0)\n # For non scalar outputs we need to broadcast the initial value.\n if rank > 0:\n new_scan_shape = shape_of(new_scan, dtype=iter_dtype)\n scan_broadcast = _op.concatenate(\n [_op.reshape(loop_count, [1]), new_scan_shape], axis=0\n )\n scan = _op.broadcast_to(scan, scan_broadcast)\n combined_scan = _op.concatenate([scan, expand_scan], axis=0)\n combined_scan_outputs.append(combined_scan)\n\n # Increment counter.\n if max_loop_count is not None:\n incr = _expr.const(1, dtype=iter_dtype)\n loop_count = loop_count + incr\n\n # Pack loop outputs for next iteration\n # [iter_count, cond, loop_deps, loop_scans]\n return [loop_count, max_count, new_cond] + new_loop_vars + combined_scan_outputs\n\n # Create the loop function.\n loop = fold_constant(_loops.while_loop(cond_fn, loop_vars + scan_output_vars, body_fn))\n\n # Now need to run initial values through the graph.\n init_count = _expr.const(0, dtype=iter_dtype)\n loop_vals = loop(init_count, max_loop_count, cond, *loop_deps, *scan_output_init)\n\n # Extract final iteration outputs.\n if num_deps + num_scan_outputs == 1:\n outputs = _expr.TupleGetItem(loop_vals, 3)\n else:\n outputs = _expr.TupleWrapper(\n _expr.Tuple(\n [\n _expr.TupleGetItem(loop_vals, i + 3)\n for i in range(num_deps + num_scan_outputs)\n ]\n ),\n num_deps + num_scan_outputs,\n )\n\n # Update outer graph with constants found in the subgraph.\n free_vars = analysis.free_vars(loop)\n graph_scope._params.update(subgraph_scope._params)\n graph_scope._nodes.update(subgraph_scope._nodes)\n for var in free_vars:\n graph_scope._nodes.update({var.name_hint: var})\n return outputs\n\n\nclass If(OnnxOpConverter):\n \"\"\"Operator converter for If\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n cond = inputs[0]\n # Convert array to bool if needed.\n if len(infer_shape(cond)) > 0:\n cond = _op.take(cond, _expr.const(0, dtype=\"int64\"))\n then_branch = attr.get(\"then_branch\", None)\n else_branch = attr.get(\"else_branch\", None)\n assert then_branch is not None and else_branch is not None\n\n # Create graph converters for both branches.\n graph_scope = GraphProto.current\n then_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params)\n then_graph._nodes = graph_scope._nodes.copy()\n else_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params)\n else_graph._nodes = graph_scope._nodes.copy()\n\n # Convert each branch to a relay expression.\n with then_graph:\n then_expr = then_graph.from_onnx(then_branch, graph_scope.opset, get_output_expr=True)\n with else_graph:\n else_expr = else_graph.from_onnx(else_branch, graph_scope.opset, get_output_expr=True)\n\n # Add constants from both branches to parent graph.\n graph_scope._params.update(then_graph._params)\n graph_scope._nodes.update(then_graph._nodes)\n then_free_vars = analysis.free_vars(then_expr)\n for var in then_free_vars:\n graph_scope._nodes.update({var.name_hint: var})\n graph_scope._params.update(else_graph._params)\n graph_scope._nodes.update(else_graph._nodes)\n else_free_vars = analysis.free_vars(else_expr)\n for var in else_free_vars:\n graph_scope._nodes.update({var.name_hint: var})\n\n # Now we can construct the relay if statement and return.\n ret = _expr.If(cond, then_expr, else_expr)\n if len(then_branch.output) > 1:\n ret = _expr.TupleWrapper(ret, len(then_branch.output))\n return ret\n\n\nclass Scan(OnnxOpConverter):\n \"\"\"Operator converter for Scan\"\"\"\n\n @classmethod\n def _impl_v8(cls, inputs, attr, params):\n new_inputs = inputs[1:]\n batch_num = infer_shape(inputs[1])[0]\n out = []\n for i in range(batch_num):\n v9_inputs = [\n _op.take(new_inputs[j], _expr.const(i), axis=0) for j in range(len(new_inputs))\n ]\n results = cls._impl_v9(v9_inputs, attr, params)\n results = [_op.expand_dims(results[j], axis=0) for j in range(len(results))]\n if i == 0:\n out = results\n else:\n out = [_op.concatenate([out[j], results[j]], axis=0) for j in range(len(results))]\n\n out = _expr.TupleWrapper(_expr.Tuple(out), len(out))\n return out\n\n @classmethod\n def _impl_v9(cls, inputs, attr, params):\n body = attr.get(\"body\")\n num_scan_inputs = attr.get(\"num_scan_inputs\")\n num_all_inputs = len(inputs)\n num_state_inputs = len(body.input) - num_scan_inputs\n num_state_outputs = num_state_inputs\n num_all_outputs = len(body.output)\n num_scan_outputs = num_all_outputs - num_state_outputs\n scan_input_axes = attr.get(\"scan_input_axes\", [0] * num_scan_inputs)\n scan_input_directions = attr.get(\"scan_input_directions\", [0] * num_scan_inputs)\n scan_output_axes = list(attr.get(\"scan_output_axes\", [0] * num_scan_outputs))\n scan_output_directions = attr.get(\"scan_output_directions\", [0] * num_scan_outputs)\n # loop count are the same for all scan inputs, so get loop count by first input scan\n # strided_slice not support dynamic axes, so assume input shape are static\n max_loop_count = infer_shape(inputs[num_state_inputs])[scan_input_axes[0]]\n\n # Create a copy of the body function to prevent the original\n # from being modified.\n body = copy.copy(attr[\"body\"])\n\n # Loop inputs will be packed as\n # [iter_count, loop_deps, scan_outputs]\n def cond_fn(*loop_inputs):\n i = loop_inputs[0]\n return _op.less(i, relay.const(max_loop_count, \"int32\"))\n\n # Get the current graph proto and create a clone for the subgraph\n graph_scope = GraphProto.current\n subgraph_scope = GraphProto(\n graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params\n )\n # Load nodes from outer graph into inner graph.\n subgraph_scope._nodes = graph_scope._nodes.copy()\n\n # Create a list of variables for each value updated in the loop.\n def get_var(name, val, scan=False):\n checked_type = infer_type(val)\n if hasattr(checked_type, \"type_annotation\"):\n checked_type = checked_type.type_annotation\n if hasattr(checked_type, \"checked_type\"):\n checked_type = checked_type.checked_type\n shape = get_const_tuple(checked_type.shape)\n actual_shape = []\n for dim in shape:\n if isinstance(dim, int) and dim == 0:\n actual_shape.append(_ty.Any())\n else:\n actual_shape.append(dim)\n if scan:\n return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)\n\n return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)\n\n # Construct variables and initial empty tensors for any scan outputs.\n # To do this, we'll figure out the output shapes of the body subgraph by importing\n # it and doing type inference.\n scan_output_vars = []\n scan_output_init = []\n if num_scan_outputs > 0:\n with subgraph_scope:\n loop_outputs = subgraph_scope.from_onnx(\n body, graph_scope.opset, get_output_expr=True\n )\n loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output))\n\n for i in range(num_scan_outputs):\n name, _, _, _ = get_info(body.output[i + num_state_outputs])\n output_node = infer_type(loop_outputs[i + num_state_outputs])\n shape = list(get_const_tuple(output_node.checked_type.shape))\n if scan_output_axes[i] < 0:\n scan_output_axes[i] = len(shape) + scan_output_axes[i] + 1\n shape.insert(scan_output_axes[i], max_loop_count)\n dtype = output_node.checked_type.dtype\n scan_output_vars.append(_expr.var(name, shape=shape, dtype=dtype))\n scan_output_init.append(_op.zeros(shape, dtype))\n\n # loop vars = [iter_count, scan_state, scan_out]\n loop_vars = [\n _expr.var(\"iter\", shape=(), dtype=\"int32\"), # iteration count\n ]\n loop_vars += [\n get_var(body.input[i].name, v) for i, v in enumerate(inputs) if i < num_state_inputs\n ]\n loop_vars += scan_output_vars\n body_input_var_names = [\"iter\"] + [body.input[i].name for i in range(len(body.input))]\n\n # # Now we can remove loop iter variables from our inner loop's inputs.\n # # This is kind of a hack since we have graph inputs that we don't\n # # want to treat as actual inputs.\n while len(body.input) != 0:\n body.input.pop(0)\n\n # Define the loop body, in this function we need to unpack loop inputs,\n # convert the loop subgraph, and pack outputs for the next iteration.\n def body_fn(*loop_inputs):\n # Unpack inputs\n loop_count = loop_inputs[0]\n state_vars = list(loop_inputs[1 : 1 + num_state_inputs])\n scan_vars = list(loop_inputs[1 + num_state_inputs :])\n # body take scan graph scan inputs as original input\n input_scan_exprs = []\n for i in range(num_state_inputs, num_all_inputs):\n if scan_input_directions[i - num_state_inputs] != 0:\n input_scan_exprs.append(\n relay.take(\n inputs[i],\n relay.const(max_loop_count - 1, \"int32\") - loop_count,\n axis=scan_input_axes[i - num_state_inputs],\n )\n )\n else:\n input_scan_exprs.append(\n relay.take(\n inputs[i],\n loop_count,\n axis=scan_input_axes[i - num_state_inputs],\n )\n )\n\n # Prepare body inputs by adding them to node dictionary.\n body_inputs = [loop_count] + state_vars + input_scan_exprs\n for i, inp in enumerate(body_inputs):\n subgraph_scope._nodes[body_input_var_names[i]] = inp\n\n # Get the output of the current loop using the updated inputs.\n with subgraph_scope:\n loop_outputs = subgraph_scope.from_onnx(\n body, graph_scope.opset, get_output_expr=True\n )\n # Unpack the body outputs and prepare variables for next iteration.\n new_state_vars = [loop_outputs[i] for i in range(num_state_outputs)]\n new_scan_vars = [loop_outputs[i] for i in range(num_state_outputs, num_all_outputs)]\n\n # Add new scan outputs to tracking\n combined_scan_outputs = []\n for i in range(num_scan_outputs):\n if scan_output_directions[i] == 0:\n # append new scan output\n combined_scan = _op.concatenate(\n [scan_vars[i], _op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i])],\n axis=scan_output_axes[i],\n )\n # pop head scan output\n combined_scan = _op.strided_slice(\n combined_scan,\n begin=[1],\n end=[max_loop_count + 1],\n strides=[1],\n axes=[scan_output_axes[i]],\n )\n else:\n # prepend new scan output\n combined_scan = _op.concatenate(\n [_op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i]), scan_vars[i]],\n axis=scan_output_axes[i],\n )\n # pop tail scan output\n combined_scan = _op.strided_slice(\n combined_scan,\n begin=[0],\n end=[max_loop_count],\n strides=[1],\n axes=[scan_output_axes[i]],\n )\n combined_scan_outputs.append(combined_scan)\n\n incr = _expr.const(1, dtype=\"int32\")\n loop_count = loop_count + incr\n\n # Pack loop outputs for next iteration\n # [iter_count, state_var, scan_var]\n return [loop_count] + new_state_vars + combined_scan_outputs\n\n # Create the loop function.\n loop = fold_constant(_loops.while_loop(cond_fn, loop_vars, body_fn))\n\n # Now need to run initial values through the graph.\n init_count = _expr.const(0, dtype=\"int32\")\n\n input_states = [inputs[i] for i in range(num_state_inputs)]\n loop_vals = loop(init_count, *input_states, *scan_output_init)\n\n outputs = _expr.TupleWrapper(\n _expr.Tuple([_expr.TupleGetItem(loop_vals, i + 1) for i in range(num_all_outputs)]),\n num_all_outputs,\n )\n\n # Update outer graph with constants found in the subgraph.\n free_vars = analysis.free_vars(loop)\n graph_scope._params.update(subgraph_scope._params)\n graph_scope._nodes.update(subgraph_scope._nodes)\n for var in free_vars:\n graph_scope._nodes.update({var.name_hint: var})\n return outputs\n\n\nclass LinearRegressor(OnnxOpConverter):\n \"\"\"Operator converter for LinearRegressor.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n data = inputs[0]\n coefficients = attr.get(\"coefficients\", 0)\n data_shape = infer_shape(data)\n targets = attr.get(\"targets\", 1)\n coefficients = _expr.const(list(coefficients), dtype=\"float32\")\n coefficients_shape = infer_shape(coefficients)\n\n coefficients = _op.reshape(coefficients, (targets, coefficients_shape[0] // targets))\n if coefficients_shape[0] // targets < data_shape[-1]:\n data = _op.split(data, [coefficients_shape[0] // targets], -1)[0]\n\n mm_out = _op.nn.dense(data, coefficients)\n\n if \"intercepts\" in attr:\n intercepts = attr.get(\"intercepts\", 0)\n intercepts = _expr.const(list(intercepts), dtype=\"float32\")\n\n if targets == 1:\n return _op.nn.bias_add(mm_out, intercepts, axis=-1)\n return get_relay_op(\"add\")(mm_out, intercepts)\n\n return mm_out\n\n\nclass NonMaxSuppression(OnnxOpConverter):\n \"\"\"Operator converter for NonMaxSuppression.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n # Get parameter values\n boxes = inputs[0]\n scores = inputs[1]\n max_output_boxes_per_class = inputs[2]\n iou_threshold = inputs[3]\n score_threshold = inputs[4]\n\n boxes_dtype = infer_type(boxes).checked_type.dtype\n\n if attr.get(\"center_point_box\", 0) != 0:\n xc, yc, w, h = _op.split(boxes, 4, axis=2)\n half_w = w / _expr.const(2.0, boxes_dtype)\n half_h = h / _expr.const(2.0, boxes_dtype)\n x1 = xc - half_w\n x2 = xc + half_w\n y1 = yc - half_h\n y2 = yc + half_h\n boxes = _op.concatenate([y1, x1, y2, x2], axis=2)\n\n if iou_threshold is None:\n iou_threshold = _expr.const(0.0, dtype=\"float32\")\n if score_threshold is None:\n score_threshold = _expr.const(0.0, dtype=\"float32\")\n\n def conditionally_squeeze_scalar(x):\n rank = len(infer_shape(x))\n assert rank <= 1, \"nms thresholds must be scalars\"\n if rank == 1:\n return _op.squeeze(x, [0])\n return x\n\n max_output_boxes_per_class = conditionally_squeeze_scalar(max_output_boxes_per_class)\n iou_threshold = conditionally_squeeze_scalar(iou_threshold)\n score_threshold = conditionally_squeeze_scalar(score_threshold)\n\n nms_out = _op.vision.all_class_non_max_suppression(\n boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold\n )\n\n return _op.strided_slice(nms_out[0], _op.const([0], dtype=\"int64\"), nms_out[1])\n\n\nclass ATen(OnnxOpConverter):\n \"\"\"Operator converter for Pytorch ATen ops.\"\"\"\n\n @classmethod\n def _op_dispatch(cls, operator, inputs, attr, params):\n op_map = {\n \"size\": cls._size,\n \"arange\": cls._arange,\n \"index_put\": cls._index_put,\n \"reshape\": cls._reshape,\n \"embedding_bag\": cls._embedding_bag,\n }\n assert operator in op_map, \"Operator %s is not supported.\" % operator\n return op_map[operator](inputs, attr, params)\n\n @classmethod\n def _size(cls, inputs, attr, params):\n return _op.take(\n _op.shape_of(inputs[0], dtype=\"int64\"),\n _expr.const(-1, dtype=\"int64\"),\n axis=0,\n mode=\"wrap\",\n )\n\n @classmethod\n def _arange(cls, inputs, attr, params):\n return _op.arange(inputs[0], inputs[1], inputs[2], dtype=\"int64\")\n\n @classmethod\n def _check_index(cls, indices, values):\n def unfolding_indices(indices, values):\n n = len(indices)\n flatten_indices = []\n slices_size = []\n for index in indices:\n flatten_indices.append(_op.reshape(index, _op.const([-1])))\n slices_size.append(infer_shape(flatten_indices[-1])[0])\n repeat_size = [1]\n tile_size = [1]\n for i in range(1, n):\n repeat_size.append(slices_size[-i] * repeat_size[-1])\n tile_size.append(slices_size[i - 1] * tile_size[-1])\n repeat_size.reverse()\n unflod_slices = []\n for i in range(n):\n unflod_slices.append(\n fold_constant(\n _op.repeat(_op.tile(flatten_indices[i], (tile_size[i],)), repeat_size[i], 0)\n )\n )\n return unflod_slices, _op.reshape(values, _op.const([-1]))\n\n values_shape = infer_shape(values)\n if len(values_shape) != 1:\n return unfolding_indices(indices, values)\n return indices, values\n\n @classmethod\n def _index_put(cls, inputs, attr, params):\n in_tensor = inputs[0]\n indices, values = cls._check_index(inputs[1 : len(inputs) - 2], inputs[len(inputs) - 2])\n accumulate = inputs[len(inputs) - 1].data.asnumpy() != 0\n if not accumulate:\n mode = \"update\"\n else:\n mode = \"add\"\n index_tensor = _op.stack(indices, axis=0)\n return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)\n\n @classmethod\n def _reshape(cls, inputs, attr, params):\n return _op.reshape(inputs[0], inputs[1])\n\n @classmethod\n def _embedding_bag(cls, inputs, attr, params):\n mode_map = {0: _op.sum, 1: _op.mean, 2: _op.max}\n\n mode = attr.get(\"mode\", 1)\n reduction_fn = mode_map[mode]\n weights, indices, offsets = inputs[0], inputs[1], inputs[2]\n offsets_shape = _op.shape_of(offsets, dtype=\"int64\")\n indices_shape = _op.stack(\n [\n _op.take(offsets_shape, _expr.const(0, dtype=\"int64\")),\n _expr.const(-1, dtype=\"int64\"),\n ],\n axis=0,\n )\n indices = _op.reshape(indices, indices_shape)\n embedding = _op.take(weights, indices.astype(\"int64\"), axis=0)\n rembedding = reduction_fn(embedding, axis=1)\n # EmbeddingBag has 4 outputs for some reason despite only one ever being used.\n # Fill the rest with 0s.\n unused_output = _expr.const(0, dtype=\"float32\")\n return _expr.TupleWrapper(\n _expr.Tuple((rembedding, unused_output, unused_output, unused_output)), 4\n )\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n operator = attr.get(\"operator\", None).decode(\"utf-8\")\n assert operator, \"ATen Operator not found\"\n return cls._op_dispatch(operator, inputs, attr, params)\n\n\nclass QuantizeLinear(OnnxOpConverter):\n \"\"\"Operator converter for QuantizeLinear.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n data, scale, zp = inputs\n out_dtype = infer_type(zp).checked_type.dtype\n return _qnn.op.quantize(data, scale, _op.cast(zp, \"int32\"), 0, out_dtype)\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n data, scale, zp = inputs\n out_dtype = infer_type(zp).checked_type.dtype\n axis = attr.get(\"axis\", 1)\n if len(infer_shape(data)) < 2:\n axis = 0\n return _qnn.op.quantize(data, scale, _op.cast(zp, \"int32\"), axis, out_dtype)\n\n\nclass DequantizeLinear(OnnxOpConverter):\n \"\"\"Operator converter for QuantizeLinear.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n data, scale, zp = inputs\n return _qnn.op.dequantize(data, scale, _op.cast(zp, \"int32\"), 0)\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n data, scale, zp = inputs\n axis = attr.get(\"axis\", 1)\n if len(infer_shape(data)) <= 1:\n axis = 0\n return _qnn.op.dequantize(data, scale, _op.cast(zp, \"int32\"), axis)\n\n\nclass DynamicQuantizeLinear(OnnxOpConverter):\n \"\"\"Operator converter for QuantizeLinear.\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n \"\"\"This op is deprecated an only supports uint8\"\"\"\n data = inputs[0]\n data_dtype = infer_type(data).checked_type.dtype\n zero = _op.const(0, dtype=data_dtype)\n maximum = _op.maximum(zero, _op.max(data))\n minimum = _op.minimum(zero, _op.min(data))\n scale = (maximum - minimum) / _op.const(255, dtype=data_dtype)\n zp = zero - _op.min(data) / scale\n zp = _op.cast(_op.round(_op.clip(zp, 0, 255)), \"uint8\")\n return _expr.TupleWrapper(\n _expr.Tuple(\n [_qnn.op.quantize(data, scale, _op.cast(zp, \"int32\"), 0, \"uint8\"), scale, zp]\n ),\n size=3,\n )\n\n\nclass QLinearConv(OnnxOpConverter):\n \"\"\"Operator converter for QLinearConv.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n data = inputs[0]\n x_scale = get_scalar(inputs[1], params)\n x_zero_point = get_scalar(inputs[2], params, \"int32\")\n weight = inputs[3]\n w_scale = get_scalar_or_1d_tensor(inputs[4], params)\n w_zero_point = get_scalar_or_1d_tensor(inputs[5], params, \"int32\")\n y_scale = fold_constant(get_scalar(inputs[6], params))\n y_zero_point = get_scalar(inputs[7], params, \"int32\")\n\n # Check shapes for per channel quantization\n w_scale_shape = infer_shape(w_scale)\n w_zero_point_shape = infer_shape(w_zero_point)\n if len(w_scale_shape) == 1 or len(w_zero_point_shape) == 1:\n m = infer_shape(weight)[0]\n if m != w_scale_shape[0] or m != w_zero_point_shape[0]:\n raise tvm.error.OpAttributeInvalid(\n \"The number of elements should be equal to the number of output channels\"\n )\n\n input_shape = infer_shape(data)\n\n ndim = len(input_shape)\n kernel_type = infer_type(weight)\n kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]\n if \"kernel_shape\" not in attr:\n attr[\"kernel_shape\"] = kernel_shapes[0][2:]\n\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n # Warning: Convolution does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n zp = fold_constant(x_zero_point)\n assert isinstance(zp, relay.Constant), \"Zero point expected to be a constant\"\n data = autopad(\n data,\n attr.get(\"strides\", [1] * (ndim - 2)),\n attr[\"kernel_shape\"],\n attr.get(\"dilations\", [1] * (ndim - 2)),\n pad_value=zp.data,\n mode=attr[\"auto_pad\"],\n )\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = tuple([0 for i in range(ndim - 2)])\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator Conv is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"]))\n attr.pop(\"auto_pad\")\n\n out_channels = kernel_shapes[0][0]\n dilation = attr.get(\"dilations\", [1] * (ndim - 2))\n strides = attr.get(\"strides\", [1] * (ndim - 2))\n padding = attr[\"pads\"] if \"pads\" in attr else 0\n groups = attr[\"group\"] if \"group\" in attr else 1\n\n if ndim != 4:\n raise tvm.error.OpAttributeInvalid(\n \"Only 2D kernels are supported for operator QLinearConv.\"\n )\n\n out = _qnn.op.conv2d(\n data,\n weight,\n x_zero_point,\n w_zero_point,\n x_scale,\n w_scale,\n kernel_size=attr[\"kernel_shape\"],\n channels=out_channels,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=groups,\n )\n use_bias = len(inputs) == 9\n if use_bias:\n out = _op.nn.bias_add(out, inputs[8])\n\n out_dtype = infer_type(inputs[7]).checked_type.dtype\n requantize_scale = _op.multiply(x_scale, w_scale)\n\n # requantize requires y_scale to be constant,\n # if y_scale is not constant, doing dequantize -> quantize\n if isinstance(y_scale, _expr.Constant):\n out = _qnn.op.requantize(\n out,\n requantize_scale,\n _op.const(0, dtype=\"int32\"),\n y_scale,\n y_zero_point,\n out_dtype=out_dtype,\n axis=1,\n )\n else:\n out = _qnn.op.dequantize(out, requantize_scale, _op.const(0, dtype=\"int32\"), axis=1)\n out = _qnn.op.quantize(out, y_scale, y_zero_point, axis=1, out_dtype=out_dtype)\n return out\n\n\nclass QLinearAdd(OnnxOpConverter):\n \"\"\"Operator converter for QLinearAdd from Microsoft onnxruntime contrib opset.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n a = inputs[0]\n a_scale = get_scalar(inputs[1], params)\n a_zero_point = get_scalar(inputs[2], params, \"int32\")\n b = inputs[3]\n b_scale = get_scalar(inputs[4], params)\n b_zero_point = get_scalar(inputs[5], params, \"int32\")\n c_scale = get_scalar(inputs[6], params)\n c_zero_point = get_scalar(inputs[7], params, \"int32\")\n\n dtype = infer_type(a).checked_type.dtype\n\n ## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32\n ## and then requantize afer\n ## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qladd.cpp\n a = _qnn.op.dequantize(\n inputs[0], a_scale, a_zero_point\n ) # , c_scale, c_zero_point, out_dtype = dtype)\n b = _qnn.op.dequantize(\n inputs[3], b_scale, b_zero_point\n ) # , c_scale, c_zero_point, out_dtype = dtype)\n out = _op.add(a, b)\n return _qnn.op.quantize(out, c_scale, c_zero_point, out_dtype=dtype)\n\n\nclass QLinearMatMul(OnnxOpConverter):\n \"\"\"\n Operator converter for QLinearMatMul from Microsoft onnxruntime contrib opset.\n\n Limitations:\n - Only supports 2D input tensors.\n - Not guaranteed to meet the integer-overflow behavior stipulated in the\n ONNX documentation for this operator.\n\n The QLinearMatMul converter is re-used for MatMulInteger and is adapted for\n the latter with the optional `expected_out_dtypes` argument.\n \"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params, expected_out_dtypes=None):\n if expected_out_dtypes is None:\n # The default QLinearMatMul converter is expected to have one of\n # these output dtypes.\n expected_out_dtypes = [\"int8\", \"uint8\"]\n\n # Some of the ops used below take scalar-like inputs, and may require either\n # of the following:\n #\n # - the input is Const node (not merely an expression that *could* be reduced\n # to a single Const at graph-compilation time)\n #\n # - the input has a specific dtype\n #\n # This function attempts to present 'x' in a form that meets both of those\n # requirements.\n def try_resolve_to_const(x, dtype_override=None):\n x2 = try_resolve_var_to_const(x, params)\n num_elem = np.prod(infer_shape(x))\n if num_elem == 1:\n x2 = ensure_scalar_shape(x2)\n x_dtype = infer_type(x).checked_type.dtype\n if (dtype_override is not None) and (dtype_override != x_dtype):\n x2 = _op.cast(x2, dtype_override)\n x3 = fold_constant(x2)\n return x3\n\n # Unpack the inputs and obtain some type info...\n a, a_scale, a_zp, b, b_scale, b_zp, y_scale, y_zp = inputs\n\n a_type = infer_type(a).checked_type # 'T1' in ONNX doc for this op\n a_scale_type = infer_type(a_scale).checked_type\n a_zp_type = infer_type(a_zp).checked_type\n\n b_type = infer_type(b).checked_type # 'T2' in ONNX doc for this op\n b_scale_type = infer_type(b_scale).checked_type\n b_zp_type = infer_type(b_zp).checked_type\n\n y_scale_type = infer_type(y_scale).checked_type\n y_zp_type = infer_type(y_zp).checked_type # 'T3' in ONNX doc for this op\n\n a_shape = infer_shape(a)\n b_shape = infer_shape(b)\n\n # Verify type assumptions, based on the ONNX doc for this op...\n assert a_type.dtype in [\"int8\", \"uint8\"]\n assert a_scale_type.dtype == \"float32\"\n assert a_zp_type.dtype == a_type.dtype\n\n assert b_type.dtype in [\"int8\", \"uint8\"]\n assert b_scale_type.dtype == \"float32\"\n assert b_zp_type.dtype == b_type.dtype\n\n assert y_scale_type.dtype == \"float32\"\n assert y_zp_type.dtype in expected_out_dtypes\n\n # TODO: relax this limitation in a future version of this importer.\n a_rank = len(a_shape)\n b_rank = len(b_shape)\n assert (a_rank == 2) and (b_rank == 2), (\n \"QLinearMatMul importer currently requires both 'a' and 'b' tensors to be 2D, but\"\n \" rank(a)={}, rank(b)={}\".format(a_rank, b_rank)\n )\n\n # _qnn.op.dense requires the zero-point values to have dtype int32.\n a_scale_scalar = try_resolve_to_const(a_scale)\n a_zp_scalar = try_resolve_to_const(a_zp, \"int32\")\n\n b_scale_scalar = try_resolve_to_const(b_scale)\n b_zp_scalar = try_resolve_to_const(b_zp, \"int32\")\n\n y_scale_scalar = try_resolve_to_const(y_scale)\n y_zp_scalar = try_resolve_to_const(y_zp, \"int32\")\n\n # TODO: Confirm that we're using 'num_hidden_units' correctly / as intended with\n # the '_qnn.op.dense' instance below.\n num_hidden_units = infer_shape(b)[-1]\n\n # - Specify the matmul result dtype as int32, so that hopefully the matmul will use\n # a 32-bit accumulator as seems to be required by the ONNX op's documentation.\n #\n # TL;DR:\n # The ONNX documentation for this op is clear about acceptable overflow\n # behavior during the matmul operation:\n # - The scalar multiplication ops MAY NOT overflow.\n # - The scalar addition ops, which sum the results of the scalar multiplication,\n # MAY overflow, but if they do so, it must behave as one would expect during\n # 32-bit integer-addition overflow.\n # As of this writing, Relay's qnn.op.dense operator doesn't expose a way for us to\n # express these constraints.\n #\n # TODO: Extend TVM / Relay / TIR / etc. to allow this kind of constraint to be\n # expressed in a Relay graph. And then update this importer and various TVM\n # backends accordingly.\n matmul_result_dtype = \"int32\"\n\n matmul_result = _qnn.op.dense(\n a,\n _op.transpose(b),\n a_zp_scalar,\n b_zp_scalar,\n a_scale_scalar,\n b_scale_scalar,\n num_hidden_units,\n matmul_result_dtype,\n )\n\n # This information might only be found in the C++ code-comments for the\n # dense.matmul op, but the quantized tensor returned by _qnn.op.dense\n # has scale==(a_scale_scalar * b_scale_scalar), and zero_point==0.\n #\n # 'matmul_result_zp_scalar' has type 'int32' to satisfy input requirements\n # of the [de/re]quantize ops below.\n matmul_result_scale_scalar = fold_constant(_op.multiply(a_scale_scalar, b_scale_scalar))\n matmul_result_zp_scalar = _op.const(0, dtype=\"int32\")\n\n if \"int32\" in expected_out_dtypes:\n # This is the adaptation of the QLinearMatMul converter for MatMulInteger,\n # in the MatMulInteger case we skip the unnecessary requantization step.\n return matmul_result\n\n # requantize requires y_scale to be constant,\n # if y_scale is not constant, doing dequantize -> quantize\n if isinstance(y_scale_scalar, _expr.Constant):\n y = _qnn.op.requantize(\n matmul_result,\n matmul_result_scale_scalar,\n matmul_result_zp_scalar,\n y_scale_scalar,\n y_zp_scalar,\n axis=-1,\n rounding=\"TONEAREST\",\n out_dtype=y_zp_type.dtype,\n )\n else:\n matmul_result_deq = _qnn.op.dequantize(\n matmul_result, matmul_result_scale_scalar, matmul_result_zp_scalar, axis=0\n )\n\n y = _qnn.op.quantize(\n matmul_result_deq, y_scale_scalar, y_zp_scalar, axis=0, out_dtype=y_zp_type.dtype\n )\n\n return y\n\n\nclass MatMulInteger(OnnxOpConverter):\n \"\"\"Operator converter for MatMulInteger.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n a = inputs[0]\n b = inputs[1]\n\n a_dtype = infer_type(a).checked_type.dtype\n b_dtype = infer_type(b).checked_type.dtype\n\n assert a_dtype in (\"int8\", \"uint8\"), \"MatMulInteger: invalid dtype for first input\"\n assert b_dtype in (\"int8\", \"uint8\"), \"MatMulInteger: invalid dtype for second input\"\n\n assert a_dtype == b_dtype, \"MatMulInteger: input dtypes must match\"\n\n a_scale = _op.const(1.0, dtype=\"float32\")\n b_scale = _op.const(1.0, dtype=\"float32\")\n out_scale = _op.const(1.0, dtype=\"float32\")\n\n a_zero_point = _op.const(0.0, dtype=a_dtype)\n b_zero_point = _op.const(0.0, dtype=b_dtype)\n out_zero_point = _op.const(0.0, dtype=\"int32\")\n\n if len(inputs) == 4:\n a_zero_point = inputs[2]\n b_zero_point = inputs[3]\n\n a_zp_dtype = infer_type(a_zero_point).checked_type.dtype\n b_zp_dtype = infer_type(b_zero_point).checked_type.dtype\n assert (\n a_zp_dtype == a_dtype and b_zp_dtype == b_dtype\n ), \"MatMulInteger: input dtype doesn't match zero point dtype\"\n elif len(inputs) != 2:\n raise AssertionError(\n \"MatMulInteger op takes 2 or 4 inputs, {} given\".format(len(inputs))\n )\n\n inputs = [\n a,\n a_scale,\n a_zero_point,\n b,\n b_scale,\n b_zero_point,\n out_scale,\n out_zero_point,\n ]\n\n return QLinearMatMul.get_converter(10)(inputs, attr, params, expected_out_dtypes=[\"int32\"])\n\n\nclass QLinearMul(OnnxOpConverter):\n \"\"\"Operator converter for QLinearMul from Microsoft onnxruntime contrib opset.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n a = inputs[0]\n a_scale = get_scalar(inputs[1], params)\n a_zero_point = get_scalar(inputs[2], params, \"int32\")\n b = inputs[3]\n b_scale = get_scalar(inputs[4], params)\n b_zero_point = get_scalar(inputs[5], params, \"int32\")\n y_scale = fold_constant(get_scalar(inputs[6], params))\n y_zero_point = get_scalar(inputs[7], params, \"int32\")\n\n dtype = infer_type(a).checked_type.dtype\n\n ## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32\n ## and then requantize afer\n ## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qlmul.cpp\n a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)\n b = _qnn.op.dequantize(inputs[3], b_scale, b_zero_point)\n out = _op.multiply(a, b)\n return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)\n\n\nclass QLinearLeakyRelu(OnnxOpConverter):\n \"\"\"Operator converter for QLinearLeakyRelu from Microsoft onnxruntime contrib opset.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n\n a_scale = get_scalar(inputs[1], params)\n a_zero_point = get_scalar(inputs[2], params, \"int32\")\n y_scale = fold_constant(get_scalar(inputs[3], params))\n y_zero_point = get_scalar(inputs[4], params, \"int32\")\n alpha = float(attr.get(\"alpha\", 1.0))\n\n dtype = infer_type(inputs[0]).checked_type.dtype\n\n # Onnxruntime doesn't actually do this op in integer, they dequantize to fp32\n # and then requantize afer (according to documentation below)\n # https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearLeakyRelu\n a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)\n out = _op.nn.leaky_relu(a, alpha)\n return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)\n\n\nclass QLinearSigmoid(OnnxOpConverter):\n \"\"\"Operator converter for QLinearSigmoid from Microsoft onnxruntime contrib opset.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n x = inputs[0]\n x_scale = get_scalar(inputs[1], params)\n x_zero_point = get_scalar(inputs[2], params, \"int32\")\n y_scale = fold_constant(get_scalar(inputs[3], params))\n y_zero_point = get_scalar(inputs[4], params, \"int32\")\n\n dtype = infer_type(x).checked_type.dtype\n\n ## Apparently, onnxruntime doesn't do this op in integer, they dequantize to fp32\n ## and then requantize after:\n ## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/\n ## providers/dml/DmlExecutionProvider/src/GraphTransformer.cpp#L245\n x = _qnn.op.dequantize(x, x_scale, x_zero_point)\n out = _op.sigmoid(x)\n return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)\n\n\nclass QLinearConcat(OnnxOpConverter):\n \"\"\"Operator converter for QLinearConcat from Microsoft onnxruntime contrib opset.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n # which axis to concat on\n axis = attr[\"axis\"]\n\n y_scale = fold_constant(get_scalar(inputs[0], params))\n y_zero_point = get_scalar(inputs[1], params, \"int32\")\n\n # input tensors, scales, zero_points\n assert (\n len(inputs) % 3 == 2\n ), \"Additional input count must be a multiple of 3 -- tensor/scale/zero_point tuples\"\n tensors = []\n scales = []\n zero_points = []\n for i in range(2, len(inputs), 3):\n tensors.append(inputs[i])\n scales.append(get_scalar(inputs[i + 1], params))\n zero_points.append(get_scalar(inputs[i + 2], params, \"int32\"))\n\n return _qnn.op.concatenate(tensors, scales, zero_points, y_scale, y_zero_point, axis)\n\n\nclass ConvInteger(OnnxOpConverter):\n \"\"\"Operator converter for ConvInteger.\"\"\"\n\n @classmethod\n def _impl_v10(cls, inputs, attr, params):\n data = inputs[0]\n weight = inputs[1]\n data_zp = inputs[2]\n weight_zp = inputs[3]\n if data_zp is None:\n data_zp = _expr.const(0, \"int32\")\n if weight_zp is None:\n weight_zp = _expr.const(0, \"int32\")\n\n input_type = infer_type(data)\n input_shape = get_const_tuple(input_type.checked_type.shape)\n\n ndim = len(input_shape)\n kernel_type = infer_type(weight)\n kernel_shape = get_const_tuple(kernel_type.checked_type.shape)\n if \"kernel_shape\" not in attr:\n attr[\"kernel_shape\"] = kernel_shape[2:]\n\n if \"auto_pad\" in attr:\n attr[\"auto_pad\"] = attr[\"auto_pad\"].decode(\"utf-8\")\n if attr[\"auto_pad\"] in (\"SAME_UPPER\", \"SAME_LOWER\"):\n # Warning: Convolution does not yet support dynamic shapes,\n # one will need to run dynamic_to_static on this model after import\n data = autopad(\n data,\n attr.get(\"strides\", [1] * (ndim - 2)),\n attr[\"kernel_shape\"],\n attr.get(\"dilations\", [1] * (ndim - 2)),\n pad_value=data_zp,\n mode=attr[\"auto_pad\"],\n )\n elif attr[\"auto_pad\"] == \"VALID\":\n attr[\"pads\"] = tuple([0 for i in range(ndim - 2)])\n elif attr[\"auto_pad\"] == \"NOTSET\":\n pass\n else:\n msg = 'Value {} in attribute \"auto_pad\" of operator Conv is invalid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr[\"auto_pad\"]))\n attr.pop(\"auto_pad\")\n\n out_channels = kernel_shape[0]\n dilation = attr.get(\"dilations\", [1] * (ndim - 2))\n strides = attr.get(\"strides\", [1] * (ndim - 2))\n padding = attr[\"pads\"] if \"pads\" in attr else 0\n groups = attr[\"group\"] if \"group\" in attr else 1\n\n if ndim != 4:\n raise tvm.error.OpAttributeInvalid(\n \"Only 2D kernels are supported for operator ConvInteger.\"\n )\n\n return _qnn.op.conv2d(\n data,\n weight,\n _op.cast(data_zp, \"int32\"),\n _op.cast(weight_zp, \"int32\"),\n _expr.const(1.0, \"float32\"),\n _expr.const(1.0, \"float32\"),\n kernel_size=attr[\"kernel_shape\"],\n channels=out_channels,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=groups,\n )\n\n\nclass BitShift(OnnxOpConverter):\n \"\"\"Operator converter for NonZero\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n if len(inputs) != 2:\n raise ValueError(\"Bitshift expects 2 inputs\")\n\n direction = attr.get(\"direction\", \"LEFT\").decode(\"ascii\")\n if direction == \"LEFT\":\n out = _op.left_shift(*inputs)\n elif direction == \"RIGHT\":\n out = _op.right_shift(*inputs)\n else:\n raise ValueError(\"Unsupported Shift Direction: \" + direction)\n return out\n\n\nclass Unique(OnnxOpConverter):\n \"\"\"Operator converter for unique\"\"\"\n\n @classmethod\n def _impl_v11(cls, inputs, attr, params):\n if len(inputs) != 1:\n raise ValueError(\"Unique expects 1 input\")\n\n data = inputs[0]\n axis = attr.get(\"axis\", None)\n if axis is None: # If axis is None, flatten the input before calling unique\n data = _op.reshape(data, _op.const([-1]))\n else:\n data_shape = infer_shape(data)\n if len(data_shape) != 1:\n raise ValueError(\"TVM only supports 1D Unique operator.\")\n is_sorted = attr.get(\"sorted\", 1) # sorted is 0 or 1, 1 by default\n\n # ONNX documentation lists return_counts as optional but there is no input to specify\n # whether it is returned. Therefore we'll just always return it.\n unique = _op.unique(data, is_sorted=(is_sorted == 1), return_counts=True)\n num_unique = unique[3]\n\n trim_unique_lambda = lambda input: _op.strided_slice(input, _op.const([0]), num_unique)\n\n unique_vals = trim_unique_lambda(unique[0])\n indices = _op.cast(trim_unique_lambda(unique[1]), \"int64\") # ONNX always returns int64\n inverse_indices = _op.cast(unique[2], \"int64\") # ONNX always returns int64\n counts = _op.cast(trim_unique_lambda(unique[4]), \"int64\") # ONNX always returns int64\n # ONNX unique returns unique, indices, inverse_indices, (optional) counts\n return _expr.TupleWrapper(_expr.Tuple([unique_vals, indices, inverse_indices, counts]), 4)\n\n\nclass Einsum(OnnxOpConverter):\n \"\"\"Operator converter for Einsum\"\"\"\n\n @classmethod\n def _impl_v12(cls, inputs, attr, params):\n equation = attr[\"equation\"].decode(\"utf-8\")\n return _op.einsum(inputs, equation)\n\n\nclass RandomNormal(OnnxOpConverter):\n \"\"\"Operator converter for random_normal\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n dtype = get_type(attr.get(\"dtype\", 1))\n mean = attr.get(\"mean\", 0.0)\n scale = attr.get(\"scale\", 1.0)\n seed = attr.get(\"seed\", None)\n shape = attr[\"shape\"]\n\n assert dtype in [\n \"float32\",\n \"float64\",\n ], \"Only float random value generation is currently supported.\"\n\n if seed is None:\n seed = np.random.randint(1e6)\n else:\n seed = int(seed)\n key = _random.threefry_key(seed)\n output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale)\n _, vals = _expr.TupleWrapper(output, 2)\n return vals\n\n\nclass RandomNormalLike(OnnxOpConverter):\n \"\"\"Operator converter for random_normal_like\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n dtype = attr.get(\"dtype\", None)\n scale = attr.get(\"scale\", 1.0)\n mean = attr.get(\"mean\", 0.0)\n seed = attr.get(\"seed\", None)\n shape = infer_shape(inputs[0])\n if dtype is None:\n dtype = infer_type(inputs[0]).checked_type.dtype\n else:\n dtype = get_type(dtype)\n\n assert dtype in [\n \"float32\",\n \"float64\",\n ], \"Only float random value generation is currently supported.\"\n\n if seed is None:\n seed = np.random.randint(1e6)\n else:\n seed = int(seed)\n key = _random.threefry_key(seed)\n output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale)\n _, vals = _expr.TupleWrapper(output, 2)\n return vals\n\n\nclass RandomUniform(OnnxOpConverter):\n \"\"\"Operator converter for random_uniform\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n dtype = get_type(attr.get(\"dtype\", 1))\n high = attr.get(\"high\", 1.0)\n low = attr.get(\"low\", 0.0)\n seed = attr.get(\"seed\", None)\n shape = attr[\"shape\"]\n\n assert dtype in [\n \"float32\",\n \"float64\",\n ], \"Only float random value generation is currently supported.\"\n\n if seed is None:\n seed = np.random.randint(1e6)\n else:\n seed = int(seed)\n key = _random.threefry_key(seed)\n output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high)\n _, vals = _expr.TupleWrapper(output, 2)\n return vals\n\n\nclass RandomUniformLike(OnnxOpConverter):\n \"\"\"Operator converter for random_uniform_like\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n dtype = attr.get(\"dtype\", None)\n high = attr.get(\"high\", 1.0)\n low = attr.get(\"low\", 0.0)\n seed = attr.get(\"seed\", None)\n shape = infer_shape(inputs[0])\n if dtype is None:\n dtype = infer_type(inputs[0]).checked_type.dtype\n else:\n dtype = get_type(dtype)\n\n assert dtype in [\n \"float32\",\n \"float64\",\n ], \"Only float random value generation is currently supported.\"\n\n if seed is None:\n seed = np.random.randint(1e6)\n else:\n seed = int(seed)\n key = _random.threefry_key(seed)\n output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high)\n _, vals = _expr.TupleWrapper(output, 2)\n return vals\n\n\nclass NegativeLogLikelihoodLoss(OnnxOpConverter):\n \"\"\"Operator converter for NegativeLogLikehoodLoss\"\"\"\n\n VALID_REDUCTIONS = {\"mean\", \"sum\", \"none\"}\n\n @classmethod\n def run_calculation(\n cls: \"NegativeLogLikelihoodLoss\",\n input_tensor: relay.Expr,\n target_tensor: relay.Expr,\n weight_tensor: Optional[relay.Expr],\n ignore_index: int,\n ):\n \"\"\"Run calculation for NegativeLogLikelihood, returning output tensor and\n weight tensor used for mean-style reductions.\n \"\"\"\n # Convert negative indices --> positive indices for gather ops, note we have to\n # use the original target tensor to interact with ignore_index to have proper behavior.\n normalized_target_tensor = normalize_gather_indices(input_tensor, target_tensor, 1)\n\n if weight_tensor is None:\n channels = infer_shape(input_tensor)[1]\n weight_tensor = relay.ones(\n [channels],\n dtype=infer_type(input_tensor).checked_type.dtype,\n )\n\n loss = -relay.gather(\n input_tensor,\n axis=1,\n indices=relay.expand_dims(normalized_target_tensor, 1),\n )\n loss = relay.squeeze(loss, axis=[1])\n\n expanded_normalized_target_tensor = relay.expand_dims(normalized_target_tensor, 0)\n expanded_normalized_target_tensor = relay.nn.batch_flatten(\n expanded_normalized_target_tensor\n )\n flattened_weights = relay.gather_nd(weight_tensor, expanded_normalized_target_tensor)\n select_weights = relay.reshape_like(flattened_weights, loss)\n loss *= select_weights\n\n if ignore_index is not None:\n # \"Ignore\" values whose target is the ignore_index\n mask_tensor = relay.equal(\n target_tensor, relay.const(ignore_index, dtype=target_tensor.type_annotation.dtype)\n )\n mask_tensor = relay.const(1, dtype=\"int8\") - relay.cast(mask_tensor, \"int8\")\n loss = relay.where(\n mask_tensor, loss, relay.const(0, infer_type(loss).checked_type.dtype)\n )\n\n # This is not explained super clearly in the onnx spec, but masked values don't\n # contribute toward the final value in reduction\n select_weights *= relay.cast_like(mask_tensor, select_weights)\n\n weight_total = relay.sum(select_weights)\n return loss, weight_total\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n ignore_index = attr.get(\"ignore_index\", None)\n reduction = attr.get(\"reduction\", b\"mean\").decode(\"utf-8\")\n\n if reduction not in cls.VALID_REDUCTIONS:\n raise ValueError(\n f\"Unknown reduction type {reduction}, choices are {cls.VALID_REDUCTIONS}\"\n )\n\n input_tensor, target_tensor = inputs[0], inputs[1]\n if len(inputs) == 3:\n weight_tensor = inputs[2]\n else:\n weight_tensor = None\n\n loss, weight_total = cls.run_calculation(\n input_tensor,\n target_tensor,\n weight_tensor=weight_tensor,\n ignore_index=ignore_index,\n )\n if reduction == \"mean\":\n return relay.sum(loss) / weight_total\n if reduction == \"sum\":\n return relay.sum(loss)\n # Case reduction == 'none'\n return loss\n\n\nclass SoftmaxCrossEntropyLoss(OnnxOpConverter):\n \"\"\"Operator converter for SCE_loss\"\"\"\n\n @classmethod\n def _impl_v13(cls, inputs, attr, params):\n ignore_index = attr.get(\"ignore_index\", None)\n reduction = attr.get(\"reduction\", b\"mean\").decode(\"utf-8\")\n input_tensor, target_tensor = inputs[0], inputs[1]\n if len(inputs) == 3:\n weight_tensor = inputs[2]\n else:\n weight_tensor = None\n\n get_log_prob = attr[\"tvm_custom\"][\"num_outputs\"] == 2\n log_softmax_tensor = LogSoftmax.run_calculation(input_tensor, axes=[1])\n\n loss, weight_total = NegativeLogLikelihoodLoss.run_calculation(\n log_softmax_tensor,\n target_tensor,\n weight_tensor,\n ignore_index=ignore_index,\n )\n\n if reduction == \"mean\":\n loss = relay.sum(loss) / weight_total\n elif reduction == \"sum\":\n loss = relay.sum(loss)\n\n if get_log_prob:\n return relay.TupleWrapper(relay.Tuple((loss, log_softmax_tensor)), 2)\n return loss\n\n\nclass Adagrad(OnnxOpConverter):\n \"\"\"Operator converter for adagrad op.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n decay_factor = attr.get(\"decay_factor\", 0.0)\n epsilon = attr.get(\"epsilon\", 0.0)\n norm_coefficient = attr.get(\"norm_coefficient\", 0.0)\n\n R = inputs[0]\n T = inputs[1]\n\n # convert attributes to constants, proper types\n dtype_inputs = infer_type(inputs[3]).checked_type.dtype\n decay_factor = relay.const(decay_factor, dtype=dtype_inputs)\n epsilon = relay.const(epsilon, dtype=dtype_inputs)\n norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)\n T = relay.cast_like(T, inputs[3])\n\n assert (\n len(inputs) - 2\n ) % 3 == 0, f\"Expect triplets for remaining inputs, found {len(inputs) - 2}\"\n\n # Remaining inputs are:\n # [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_sq_g, x_2_sq_g...]\n num_input_tensors = (len(inputs) - 2) // 3\n output_tensors = []\n output_accumulated_squared_gradients = []\n for i in range(num_input_tensors):\n x = inputs[i + 2]\n gradient = inputs[i + 2 + num_input_tensors]\n accumulated_squared_gradient = inputs[i + 2 + 2 * num_input_tensors]\n\n r = R / (relay.const(1.0, dtype=dtype_inputs) + T * decay_factor)\n g_regularized = norm_coefficient * x + gradient\n new_accumulated_squared_gradient = (\n accumulated_squared_gradient + g_regularized * g_regularized\n )\n h_adaptive = relay.sqrt(new_accumulated_squared_gradient) + epsilon\n\n x_new = x - r * g_regularized / h_adaptive\n\n output_tensors.append(x_new)\n output_accumulated_squared_gradients.append(new_accumulated_squared_gradient)\n\n # append lists together, momentums come after result tensors\n result = output_tensors + output_accumulated_squared_gradients\n return _expr.TupleWrapper(_expr.Tuple(result), len(result))\n\n\nclass Adam(OnnxOpConverter):\n \"\"\"Operator converter for Adam op.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = attr.get(\"alpha\", 0.9)\n beta = attr.get(\"beta\", 0.999)\n\n # Note in the docs epsilon default is 0.0 but in the tests it is set to 1e-2:\n # https://git.io/Ju5C4\n epsilon = attr.get(\"epsilon\", 1e-2)\n norm_coefficient = attr.get(\"norm_coefficient\", 0.0)\n norm_coefficient_post = attr.get(\"norm_coefficient_post\", 0.0)\n\n R = inputs[0]\n T = inputs[1]\n\n assert (\n len(inputs) - 2\n ) % 4 == 0, f\"Expect 4-lets for remaining inputs, found {len(inputs) - 2}\"\n\n # convert attributes to constants, proper types\n dtype_inputs = infer_type(inputs[3]).checked_type.dtype\n inverse_alpha = relay.const(1 - alpha, dtype=dtype_inputs)\n alpha = relay.const(alpha, dtype=dtype_inputs)\n inverse_beta = relay.const(1 - beta, dtype=dtype_inputs)\n beta = relay.const(beta, dtype=dtype_inputs)\n epsilon = relay.const(epsilon, dtype=dtype_inputs)\n norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)\n norm_coefficient_post = relay.const(norm_coefficient_post, dtype=dtype_inputs)\n one = relay.const(1, dtype=dtype_inputs)\n T = relay.cast_like(T, inputs[3])\n\n # Remaining inputs are:\n # [x_1, x_2 ..., x_1_grad, x_2_grad, ... x_1_g_accum, x_2_g_accum..., x_1_g_sq_accum, ...]\n num_input_tensors = (len(inputs) - 2) // 4\n output_tensors = []\n output_accumulated_gradients = []\n output_accumulated_squared_gradients = []\n for i in range(num_input_tensors):\n x = inputs[i + 2]\n g = inputs[i + 2 + num_input_tensors]\n v = inputs[i + 2 + 2 * num_input_tensors]\n h = inputs[i + 2 + 3 * num_input_tensors]\n\n g_regularized = norm_coefficient * x + g\n v_new = alpha * v + inverse_alpha * g_regularized\n h_new = beta * h + inverse_beta * g_regularized * g_regularized\n h_sqrt = relay.sqrt(h_new) + epsilon\n\n true_branch = R * relay.sqrt(one - relay.power(beta, T)) / (one - relay.power(alpha, T))\n R_adjusted = relay.If(T > relay.const(0, dtype=dtype_inputs), true_branch, R)\n\n x_new = x - R_adjusted * (v_new / h_sqrt)\n x_result = (one - norm_coefficient_post) * x_new\n\n output_tensors.append(x_result)\n output_accumulated_gradients.append(v_new)\n output_accumulated_squared_gradients.append(h_new)\n\n # append lists together to get final result\n result = (\n output_tensors + output_accumulated_gradients + output_accumulated_squared_gradients\n )\n return _expr.TupleWrapper(_expr.Tuple(result), len(result))\n\n\nclass Momentum(OnnxOpConverter):\n \"\"\"Operator converter for Momentum op.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attr, params):\n alpha = attr[\"alpha\"]\n beta = attr[\"beta\"]\n mode = attr[\"mode\"].decode(\"utf-8\")\n norm_coefficient = attr[\"norm_coefficient\"]\n\n assert mode in [\"nesterov\", \"standard\"], f\"Unknown momentum mode {mode}\"\n R = inputs[0]\n T = inputs[1]\n\n assert (\n len(inputs) - 2\n ) % 3 == 0, f\"Expect triplets for remaining inputs, found {len(inputs) - 2}\"\n # Remaining inputs are:\n # [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_momentum, x_2_momentum...]\n num_input_tensors = (len(inputs) - 2) // 3\n\n # convert attributes to constants\n dtype_inputs = infer_type(inputs[3]).checked_type.dtype\n alpha = relay.const(alpha, dtype=dtype_inputs)\n beta = relay.const(beta, dtype=dtype_inputs)\n norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)\n default_beta = relay.const(1.0, dtype=dtype_inputs)\n\n # Calculate updated values for every input\n output_tensors = []\n output_momentums = []\n for i in range(num_input_tensors):\n x = inputs[i + 2]\n gradient = inputs[i + 2 + num_input_tensors]\n momentum = inputs[i + 2 + 2 * num_input_tensors]\n g_regularized = norm_coefficient * x + gradient\n beta_adjusted = relay.If(T > relay.const(0, dtype=\"int64\"), beta, default_beta)\n new_momentum = alpha * momentum + beta_adjusted * g_regularized\n\n if mode == \"standard\":\n x_output = x - R * new_momentum\n else:\n # mode == 'nesterov'\n x_output = x - R * (g_regularized + alpha * new_momentum)\n\n output_tensors.append(x_output)\n output_momentums.append(new_momentum)\n\n # append lists together, momentums come after result tensors\n result = output_tensors + output_momentums\n return _expr.TupleWrapper(_expr.Tuple(result), len(result))\n\n\n# compatible operators that do NOT require any conversion.\n_identity_list = []\n\n\n# _convert_map defines maps of name to converter functor(callable)\n# for 1 to 1 mapping, use Renamer if nothing but name is different\n# use AttrCvt if attributes need to be converted\n# for 1 to N mapping(composed), use custom callable functions\n# for N to 1 mapping, currently not supported(?)\ndef _get_convert_map(opset):\n return {\n # defs/experimental\n \"Identity\": Renamer(\"copy\"),\n \"Affine\": Affine.get_converter(opset),\n \"BitShift\": BitShift.get_converter(opset),\n \"ThresholdedRelu\": ThresholdedRelu.get_converter(opset),\n \"ScaledTanh\": ScaledTanh.get_converter(opset),\n \"ParametricSoftplus\": ParametricSoftPlus.get_converter(opset),\n \"Constant\": Constant.get_converter(opset),\n \"ConstantOfShape\": ConstantOfShape.get_converter(opset),\n # 'GivenTensorFill'\n \"FC\": AttrCvt(\"dense\", ignores=[\"axis\", \"axis_w\"]),\n \"Scale\": Scale.get_converter(opset),\n # 'GRUUnit'\n # 'ATen'\n # 'ImageScaler'\n # 'MeanVarianceNormalization'\n # 'Crop'\n # 'Embedding'\n \"Upsample\": Upsample.get_converter(opset),\n \"SpatialBN\": BatchNorm.get_converter(opset),\n # defs/generator\n # 'Constant' # Implemented\n # 'RandomUniform'\n # 'RandomNormal'\n # 'RandomUniformLike'\n # 'RandomNormalLike'\n # defs/logical\n # defs/math\n \"Add\": Add.get_converter(opset),\n \"Sub\": Sub.get_converter(opset),\n \"Mul\": Mul.get_converter(opset),\n \"Div\": Div.get_converter(opset),\n \"Neg\": Renamer(\"negative\"),\n \"Abs\": Absolute.get_converter(opset),\n \"Reciprocal\": Reciprocal.get_converter(opset),\n \"Floor\": Renamer(\"floor\"),\n \"Ceil\": Renamer(\"ceil\"),\n \"Round\": Renamer(\"round\"),\n \"IsInf\": IsInf.get_converter(opset),\n \"IsNaN\": Renamer(\"isnan\"),\n \"Sqrt\": Renamer(\"sqrt\"),\n \"Relu\": Renamer(\"relu\"),\n \"Celu\": Celu.get_converter(opset),\n \"LeakyRelu\": Renamer(\"leaky_relu\"),\n \"Selu\": Selu.get_converter(opset),\n \"Elu\": Elu.get_converter(opset),\n \"Gelu\": Gelu.get_converter(opset),\n \"BiasGelu\": BiasGelu.get_converter(opset),\n # TODO: We need a better way to handle different domains, in case\n # of name collisions. EmbedLayerNormalization, SkipLayerNormalization, and Attention\n # are in the `com.microsoft` domain.\n \"EmbedLayerNormalization\": EmbedLayerNormalization.get_converter(opset),\n \"SkipLayerNormalization\": SkipLayerNormalization.get_converter(opset),\n \"Attention\": Attention.get_converter(opset),\n \"Exp\": Renamer(\"exp\"),\n \"Greater\": Renamer(\"greater\"),\n \"GreaterOrEqual\": Renamer(\"greater_equal\"),\n \"Less\": Renamer(\"less\"),\n \"LessOrEqual\": Renamer(\"less_equal\"),\n \"Log\": Renamer(\"log\"),\n \"Acos\": Renamer(\"acos\"),\n \"Acosh\": Renamer(\"acosh\"),\n \"Asin\": Renamer(\"asin\"),\n \"Asinh\": Renamer(\"asinh\"),\n \"Atan\": Renamer(\"atan\"),\n \"Atanh\": Renamer(\"atanh\"),\n \"Cos\": Renamer(\"cos\"),\n \"Cosh\": Renamer(\"cosh\"),\n \"Sin\": Renamer(\"sin\"),\n \"Sinh\": Renamer(\"sinh\"),\n \"Tan\": Renamer(\"tan\"),\n \"Tanh\": Renamer(\"tanh\"),\n \"Pow\": Pow.get_converter(opset),\n \"PRelu\": Prelu.get_converter(opset),\n \"Sigmoid\": Renamer(\"sigmoid\"),\n \"HardSigmoid\": HardSigmoid.get_converter(opset),\n \"HardSwish\": HardSwish.get_converter(opset),\n \"Max\": Maximum.get_converter(opset),\n \"Min\": Minimum.get_converter(opset),\n \"Sum\": Sum.get_converter(opset),\n \"Mean\": Mean.get_converter(opset),\n \"Clip\": Clip.get_converter(opset),\n \"Softplus\": Softplus.get_converter(opset),\n # softmax default axis is different in onnx\n \"Softmax\": Softmax.get_converter(opset),\n \"LogSoftmax\": LogSoftmax.get_converter(opset),\n \"OneHot\": OneHot.get_converter(opset),\n \"Hardmax\": Hardmax.get_converter(opset),\n \"Shrink\": Shrink.get_converter(opset),\n \"Softsign\": Softsign.get_converter(opset),\n \"Gemm\": Gemm.get_converter(opset),\n \"MatMul\": MatMul.get_converter(opset),\n \"MatMulInteger\": MatMulInteger.get_converter(opset),\n \"MatMulInteger16\": MatMulInteger16.get_converter(opset),\n \"Mod\": Mod.get_converter(opset),\n \"Xor\": Renamer(\"logical_xor\"),\n # defs/nn\n \"AveragePool\": AveragePool.get_converter(opset),\n \"LpPool\": LpPool.get_converter(opset),\n \"GlobalLpPool\": GlobalLpPool.get_converter(opset),\n \"MaxPool\": MaxPool.get_converter(opset),\n \"MaxUnpool\": MaxUnpool.get_converter(opset),\n \"Conv\": Conv.get_converter(opset),\n \"ConvTranspose\": ConvTranspose.get_converter(opset),\n \"GlobalAveragePool\": GlobalAveragePool.get_converter(opset),\n \"GlobalMaxPool\": GlobalMaxPool.get_converter(opset),\n \"BatchNormalization\": BatchNorm.get_converter(opset),\n \"InstanceNormalization\": InstanceNorm.get_converter(opset),\n # 'LpNormalization'\n \"Dropout\": AttrCvt(\"dropout\", {\"ratio\": \"rate\"}, ignores=[\"is_test\"]),\n \"Flatten\": Flatten.get_converter(opset),\n \"LRN\": LRN.get_converter(opset),\n # Recurrent Layers\n \"LSTM\": LSTM.get_converter(opset),\n \"GRU\": GRU.get_converter(opset),\n # defs/vision\n \"MaxRoiPool\": MaxRoiPool.get_converter(opset),\n \"RoiAlign\": RoiAlign.get_converter(opset),\n \"NonMaxSuppression\": NonMaxSuppression.get_converter(opset),\n # defs/reduction\n \"ReduceMax\": ReduceMax.get_converter(opset),\n \"ReduceMin\": ReduceMin.get_converter(opset),\n \"ReduceSum\": ReduceSum.get_converter(opset),\n \"ReduceMean\": ReduceMean.get_converter(opset),\n \"ReduceProd\": ReduceProd.get_converter(opset),\n \"ReduceLogSumExp\": ReduceLogSumExp.get_converter(opset),\n \"ReduceLogSum\": ReduceLogSum.get_converter(opset),\n \"ReduceSumSquare\": ReduceSumSquare.get_converter(opset),\n \"ReduceL1\": ReduceL1.get_converter(opset),\n \"ReduceL2\": ReduceL2.get_converter(opset),\n # defs/sorting\n \"ArgMax\": ArgMax.get_converter(opset),\n \"ArgMin\": ArgMin.get_converter(opset),\n \"TopK\": TopK.get_converter(opset),\n # defs/tensor\n \"Cast\": Cast.get_converter(opset),\n \"Reshape\": Reshape.get_converter(opset),\n \"Expand\": Expand.get_converter(opset),\n \"Concat\": Concat.get_converter(opset),\n \"Split\": Split.get_converter(opset),\n \"Slice\": Slice.get_converter(opset),\n \"Transpose\": AttrCvt(\"transpose\", {\"perm\": \"axes\"}),\n \"DepthToSpace\": DepthToSpace.get_converter(opset),\n \"SpaceToDepth\": SpaceToDepth.get_converter(opset),\n \"Gather\": Gather.get_converter(opset),\n \"GatherElements\": GatherElements.get_converter(opset),\n \"GatherND\": GatherND.get_converter(opset),\n \"Compress\": Compress.get_converter(opset),\n \"Size\": AttrCvt(\"ndarray_size\", extras={\"dtype\": \"int64\"}),\n \"Scatter\": Scatter.get_converter(opset),\n \"ScatterElements\": Scatter.get_converter(opset),\n \"ScatterND\": ScatterND.get_converter(opset),\n \"EyeLike\": EyeLike.get_converter(opset),\n \"Squeeze\": Squeeze.get_converter(opset),\n \"Unsqueeze\": Unsqueeze.get_converter(opset),\n \"Pad\": Pad.get_converter(opset),\n \"Shape\": Shape.get_converter(opset),\n \"Sign\": Sign.get_converter(opset),\n \"Equal\": Equal.get_converter(opset),\n \"Not\": Not.get_converter(opset),\n \"And\": And.get_converter(opset),\n \"Tile\": Tile.get_converter(opset),\n \"Erf\": Erf.get_converter(opset),\n \"Where\": Where.get_converter(opset),\n \"Or\": Or.get_converter(opset),\n \"Resize\": Resize.get_converter(opset),\n \"NonZero\": NonZero.get_converter(opset),\n \"Range\": Range.get_converter(opset),\n \"CumSum\": CumSum.get_converter(opset),\n \"Unique\": Unique.get_converter(opset),\n \"Einsum\": Einsum.get_converter(opset),\n # defs/control_flow\n \"Loop\": Loop.get_converter(opset),\n \"If\": If.get_converter(opset),\n # Torch ATen Dispatcher.\n \"ATen\": ATen.get_converter(opset),\n # Quantization\n \"QuantizeLinear\": QuantizeLinear.get_converter(opset),\n \"DequantizeLinear\": DequantizeLinear.get_converter(opset),\n \"DynamicQuantizeLinear\": DynamicQuantizeLinear.get_converter(opset),\n \"ReverseSequence\": ReverseSequence.get_converter(opset),\n \"QLinearConv\": QLinearConv.get_converter(opset),\n \"QLinearConcat\": QLinearConcat.get_converter(opset),\n \"QLinearAdd\": QLinearAdd.get_converter(opset),\n \"QLinearMatMul\": QLinearMatMul.get_converter(opset),\n \"QLinearMul\": QLinearMul.get_converter(opset),\n \"QLinearSigmoid\": QLinearSigmoid.get_converter(opset),\n \"ConvInteger\": ConvInteger.get_converter(opset),\n \"QLinearAveragePool\": QLinearAveragePool.get_converter(opset),\n \"QLinearGlobalAveragePool\": QLinearGlobalAveragePool.get_converter(opset),\n \"QLinearLeakyRelu\": QLinearLeakyRelu.get_converter(opset),\n # Random number generation.\n \"RandomNormal\": RandomNormal.get_converter(opset),\n \"RandomNormalLike\": RandomNormalLike.get_converter(opset),\n \"RandomUniform\": RandomUniform.get_converter(opset),\n \"RandomUniformLike\": RandomUniformLike.get_converter(opset),\n # Loss functions / training\n \"NegativeLogLikelihoodLoss\": NegativeLogLikelihoodLoss.get_converter(opset),\n \"SoftmaxCrossEntropyLoss\": SoftmaxCrossEntropyLoss.get_converter(opset),\n \"Adagrad\": Adagrad.get_converter(opset),\n \"Adam\": Adam.get_converter(opset),\n \"Momentum\": Momentum.get_converter(opset),\n \"Scan\": Scan.get_converter(opset),\n # ML\n \"LinearRegressor\": LinearRegressor.get_converter(opset),\n }\n\n\nclass GraphProto:\n \"\"\"A helper class for handling Relay expression copying from pb2.GraphProto.\n Definition: https://github.com/onnx/onnx/blob/main/onnx/onnx.proto\n\n Parameters\n ----------\n shape : dict of str to tuple, optional\n The input shape to the graph\n\n dtype : str or dict of str to str\n The input types to the graph\n\n freeze_params: bool\n If this parameter is true, the importer will take any provided\n onnx input values (weights, shapes, etc) and embed them into the relay model\n as Constants instead of variables. This allows more aggressive optimizations\n at compile time and helps in making models static if certain inputs represent\n attributes relay would traditionally consider compile-time constants.\n\n \"\"\"\n\n current = None\n\n def __init__(self, shape, dtype, freeze_params=False):\n self._nodes = {}\n self._params = {}\n self._inputs = {}\n self._renames = {}\n self._num_input = 0\n self._num_param = 0\n self._shape = shape.copy() if shape else {}\n self._input_names = []\n self._dtype = dtype\n self.opset = None\n self._freeze_params = freeze_params\n\n def __enter__(self):\n self._old_manager = GraphProto.current\n GraphProto.current = self\n return self\n\n def __exit__(self, ptype, value, trace):\n GraphProto.current = self._old_manager\n\n def freeze(self, func, params):\n bind_map = {}\n for name in params.keys():\n if name in self._nodes.keys():\n bind_map[self._nodes[name]] = _expr.const(params[name])\n body = _expr.bind(func.body, bind_map)\n fn = _function.Function(analysis.free_vars(body), body)\n return fn, {}\n\n def from_onnx(self, graph, opset, get_output_expr=False):\n \"\"\"Construct Relay expression from ONNX graph.\n\n Onnx graph is a python protobuf object.\n The companion parameters will be handled automatically.\n However, the input names from onnx graph is vague, mixing inputs and\n network weights/bias such as \"1\", \"2\"...\n For convenience, we rename the `real` input names to \"input_0\",\n \"input_1\"... And renaming parameters to \"param_0\", \"param_1\"...\n\n Parameters\n ----------\n graph : onnx protobuf object\n The loaded onnx graph\n\n opset : opset version\n\n get_output_expr: bool\n If set to true, this conversion will return each output expression rather\n than a packaged module. This can be useful when converting subgraphs to\n relay.\n\n Returns\n -------\n mod : tvm.IRModule\n The returned relay module\n\n params : dict\n A dict of name: tvm.nd.array pairs, used as pretrained weights\n \"\"\"\n self.opset = opset\n self._parse_graph_initializers(graph)\n self._parse_graph_input(graph)\n self._check_user_inputs_in_outermost_graph_scope()\n self._check_for_unsupported_ops(graph)\n self._construct_nodes(graph)\n\n # now return the outputs\n outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]\n outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)\n # If requested, directly return the converted expressions.\n if get_output_expr:\n return outputs\n ## Maintain the order of inputs and parameters from the ONNX graph, but only include\n ## those parameters that are needed to execute the relay graph\n free_vars = analysis.free_vars(outputs)\n nodes = {v: k for k, v in self._nodes.items()}\n free_vars = [nodes[var] for var in free_vars]\n for i_name in self._params:\n if i_name in free_vars and i_name not in self._inputs:\n self._inputs[i_name] = self._nodes[i_name]\n # Create a function from our output expression and all input variables.\n func = _function.Function([v for k, v in self._inputs.items()], outputs)\n return IRModule.from_expr(func), self._params\n\n def _parse_graph_initializers(self, graph):\n \"\"\"Parse network inputs to relay, aka parameters.\"\"\"\n for init_tensor in graph.initializer:\n if not init_tensor.name.strip():\n raise ValueError(\"Tensor's name is required.\")\n array = self._parse_array(init_tensor)\n if self._freeze_params:\n self._nodes[init_tensor.name] = _expr.const(array)\n else:\n self._params[init_tensor.name] = array\n self._nodes[init_tensor.name] = new_var(\n init_tensor.name,\n shape=self._params[init_tensor.name].shape,\n dtype=self._params[init_tensor.name].dtype,\n )\n\n def _parse_graph_input(self, graph):\n for i in graph.input:\n # from onnx v0.2, GraphProto.input has type ValueInfoProto,\n # and the name is 'i.name'\n i_name, i_shape, d_type, i_shape_name = get_info(i)\n if i_name in self._params:\n # i is a param instead of input\n self._num_param += 1\n self._nodes[i_name] = new_var(\n i_name, shape=self._params[i_name].shape, dtype=self._params[i_name].dtype\n )\n elif i_name in self._nodes:\n continue\n else:\n self._num_input += 1\n self._input_names.append(i_name)\n if i_name in self._shape:\n i_shape = self._shape[i_name]\n else:\n if \"?\" in str(i_shape):\n warning_msg = (\n \"Input %s has unknown dimension shapes: %s. \"\n \"Specifying static values may improve performance\"\n % (i_name, str(i_shape_name))\n )\n warnings.warn(warning_msg)\n if isinstance(self._dtype, dict):\n dtype = self._dtype[i_name] if i_name in self._dtype else d_type\n else:\n dtype = d_type\n self._nodes[i_name] = new_var(i_name, shape=i_shape, dtype=dtype)\n self._inputs[i_name] = self._nodes[i_name]\n\n def _check_user_inputs_in_outermost_graph_scope(self):\n \"\"\"Only check user inputs in the outer-most graph scope.\"\"\"\n if self._old_manager is None:\n assert all(\n [name in self._input_names for name in self._shape.keys()]\n ), \"User specified the shape for inputs that weren't found in the graph: \" + str(\n self._shape\n )\n\n def _check_for_unsupported_ops(self, graph):\n convert_map = _get_convert_map(self.opset)\n unsupported_ops = set()\n for node in graph.node:\n op_name = node.op_type\n if (\n op_name not in convert_map\n and op_name != \"Constant\"\n and op_name not in _identity_list\n ):\n unsupported_ops.add(op_name)\n if unsupported_ops:\n msg = \"The following operators are not supported for frontend ONNX: \"\n msg += \", \".join(unsupported_ops)\n raise tvm.error.OpNotImplemented(msg)\n\n def _construct_nodes(self, graph):\n \"\"\"Nodes are stored as directed acyclic graph.\"\"\"\n for node in graph.node:\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n # Create and populate input list.\n inputs = onnx_input()\n for i in node.input:\n if i != \"\":\n inputs.append(self._nodes[self._renames.get(i, i)])\n else:\n inputs.append(None)\n i_name = self._parse_value_proto(node)\n node_output = self._fix_outputs(op_name, node.output)\n attr[\"tvm_custom\"] = {}\n attr[\"tvm_custom\"][\"name\"] = i_name\n attr[\"tvm_custom\"][\"num_outputs\"] = len(node_output)\n\n op = self._convert_operator(op_name, inputs, attr, self.opset)\n if not isinstance(op, _expr.TupleWrapper):\n outputs_num = 1\n else:\n outputs_num = len(op)\n\n if outputs_num == 1:\n op = fold_constant(op)\n else:\n op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op))\n\n if outputs_num > 1:\n # ONNX supports optional outputs for some nodes.\n # This block searches for missing outputs in the ONNX graph\n # and removes any unneeded ops\n valid_outputs = [False] * outputs_num\n for i, output in enumerate(node_output):\n if output != \"\":\n valid_outputs[i] = True\n # If we have outputs ONNX isn't expecting, we need to drop them\n if not all(valid_outputs):\n tup = op.astuple()\n # TupleWrapper can also wrap ops with TupleType outputs\n if isinstance(tup, _expr.Tuple):\n # For tuples, we extract the fields instead of using GetTupleItem\n outputs = [tup.fields[i] for i, valid in enumerate(valid_outputs) if valid]\n else:\n # For call nodes, we need to GetTupleItem\n outputs = [op[i] for i, valid in enumerate(valid_outputs) if valid]\n # Create the new op with valid outputs\n if len(outputs) == 1:\n op = outputs[0]\n elif len(outputs) != outputs_num:\n op = _expr.TupleWrapper(_expr.Tuple(outputs), len(outputs))\n # Drop invalid outputs for the onnx node\n outputs_num = len(outputs)\n node_output = [output for output in node_output if output != \"\"]\n assert (\n len(node_output) == outputs_num\n ), \"Number of output mismatch {} vs {} in {}.\".format(\n len(node_output), outputs_num, op_name\n )\n\n if outputs_num == 1:\n self._nodes[node_output[0]] = op\n else:\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n def _parse_value_proto(self, value_proto):\n \"\"\"Parse ValueProto or raw str.\"\"\"\n try:\n name = value_proto.name\n except AttributeError:\n name = value_proto\n return name\n\n def _parse_array(self, tensor_proto):\n np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims))\n return _nd.array(np_array)\n\n def _parse_attr(self, attr_proto):\n \"\"\"Convert a list of AttributeProto to a dict, with names as keys.\"\"\"\n attrs = {}\n for a in attr_proto:\n for f in [\"f\", \"i\", \"s\", \"g\"]:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in [\"floats\", \"ints\", \"strings\"]:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in [\"t\"]:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in [\"tensors\"]:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in [\"graphs\"]:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Field {} is not supported in relay.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs\n\n def _convert_operator(self, op_name, inputs, attrs, opset):\n \"\"\"Convert ONNX operator into a Relay operator.\n The converter must specify conversions explicitly for incompatible name, and\n apply handlers to operator attributes.\n\n Parameters\n ----------\n op_name : str\n Operator name, such as Convolution, FullyConnected\n inputs : list of tvm.relay.function.Function\n List of inputs.\n attrs : dict\n Dict of operator attributes\n opset : int\n Opset version\n\n Returns\n -------\n sym : tvm.relay.function.Function\n Converted relay function\n \"\"\"\n convert_map = _get_convert_map(opset)\n if op_name in _identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n sym = convert_map[op_name](inputs, attrs, self._params)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n return sym\n\n def _fix_outputs(self, op_name, outputs):\n \"\"\"A hack to handle dropout or similar operator that have more than one out\n in ONNX.\n \"\"\"\n if op_name == \"Dropout\":\n if len(outputs) == 1:\n return outputs\n # TODO(zhreshold): support dropout mask?\n outputs = outputs[:-1]\n return outputs\n\n\ndef from_onnx(\n model, shape=None, dtype=\"float32\", opset=None, freeze_params=True, convert_config=None\n):\n \"\"\"Convert a ONNX model into an equivalent Relay Function.\n\n ONNX graphs are represented as Python Protobuf objects.\n The companion parameters will be handled automatically.\n However, the input names from onnx graph is vague, mixing inputs and\n network weights/bias such as \"1\", \"2\"...\n For convenience, we rename the `real` input names to \"input_0\",\n \"input_1\"... And renaming parameters to \"param_0\", \"param_1\"...\n\n By default, ONNX defines models in terms of dynamic shapes. The ONNX importer\n retains that dynamism upon import, and the compiler attempts to convert the\n model into a static shapes at compile time. If this fails, there may still\n be dynamic operations in the model. Not all TVM kernels currently support\n dynamic shapes, please file an issue on discuss.tvm.apache.org\n if you hit an error with dynamic kernels.\n\n Parameters\n ----------\n model : protobuf object\n ONNX ModelProto after ONNX v1.1.0\n\n shape : dict of str to tuple, optional\n The input shape to the graph\n\n dtype : str or dict of str to str\n The input types to the graph\n\n opset : int, optional\n Override to autodetected opset.\n This can be helpful for some testing.\n\n freeze_params: bool\n If this parameter is true, the importer will take any provided\n onnx input values (weights, shapes, etc) and embed them into the relay model\n as Constants instead of variables. This allows more aggressive optimizations\n at compile time and helps in making models static if certain inputs represent\n attributes relay would traditionally consider compile-time constants.\n\n convert_config : Optional[Dict[str, Any]]\n Default config:\n use_nt_batch_matmul : bool = True\n True to convert qualified onnx `matmul` to `nn.batch_matmul` strict to NT format\n (transpose_a=False, transpose_b=True).\n\n Returns\n -------\n mod : tvm.IRModule\n The relay module for compilation\n\n params : dict of str to tvm.nd.NDArray\n The parameter dict to be used by relay\n \"\"\"\n global ONNX_DEFAULT_CONFIGS\n if convert_config is not None:\n ONNX_DEFAULT_CONFIGS.update(convert_config)\n\n try:\n import onnx\n\n if hasattr(onnx.checker, \"check_model\"):\n # try use onnx's own model checker before converting any model\n try:\n onnx.checker.check_model(model)\n except Exception as e: # pylint: disable=c-extension-no-member, broad-except\n # the checker is a bit violent about errors, so simply print warnings here\n warnings.warn(str(e))\n except ImportError:\n pass\n g = GraphProto(shape, dtype, freeze_params)\n graph = model.graph\n\n try:\n opset_in_model = model.opset_import[0].version if model.opset_import else 1\n except AttributeError:\n opset_in_model = 1\n\n if opset is None:\n opset = opset_in_model\n elif opset < opset_in_model:\n warnings.warn(\n \"\"\n f\"You are overwritting original opset ver = {opset_in_model} by lower ver = {opset}. \"\n f\"That might cause model conversion errors.\"\n )\n\n # Use the graph proto as a scope so that ops can access other nodes if needed.\n with g:\n mod, params = g.from_onnx(graph, opset)\n\n if freeze_params:\n mod = relay.transform.DynamicToStatic()(mod)\n\n return mod, params\n"
]
| [
[
"numpy.array",
"numpy.ones_like",
"numpy.asarray",
"numpy.zeros",
"numpy.prod",
"numpy.random.randint",
"numpy.sqrt",
"numpy.iinfo",
"numpy.dtype"
]
]
|
selflein/manifold-flow | [
"2cc91c7acf61c8b4df07a940f0311ee93c39f0c7"
]
| [
"experiments/evaluation/mmd.py"
]
| [
"import itertools\nimport numpy as np\n\n\ndef sq_maximum_mean_discrepancy(xs, ys, wxs=None, wys=None, scale=None, return_scale=False):\n \"\"\"\n Finite sample estimate of square maximum mean discrepancy. Uses a gaussian kernel.\n :param xs: first sample\n :param ys: second sample\n :param wxs: weights for first sample, optional\n :param wys: weights for second sample, optional\n :param scale: kernel scale. If None, calculate it from data\n :return: squared mmd, scale if not given\n \"\"\"\n\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n n_x = xs.shape[0]\n n_y = ys.shape[0]\n\n if wxs is not None:\n wxs = np.asarray(wxs)\n assert wxs.ndim == 1 and n_x == wxs.size\n\n if wys is not None:\n wys = np.asarray(wys)\n assert wys.ndim == 1 and n_y == wys.size\n\n xx_sq_dists = np.sum(np.array([x1 - x2 for x1, x2 in itertools.combinations(xs, 2)]) ** 2, axis=1)\n yy_sq_dists = np.sum(np.array([y1 - y2 for y1, y2 in itertools.combinations(ys, 2)]) ** 2, axis=1)\n xy_sq_dists = np.sum(np.array([x1 - y2 for x1, y2 in itertools.product(xs, ys)]) ** 2, axis=1)\n\n if scale is None:\n scale = np.median(np.sqrt(np.concatenate([xx_sq_dists, yy_sq_dists, xy_sq_dists])))\n elif scale == \"ys\":\n diffs = np.array([x1 - x2 for x1, x2 in itertools.combinations(ys, 2)])\n dists = np.sqrt(np.sum(diffs ** 2, axis=1))\n scale = np.median(dists)\n elif scale == \"xs\":\n diffs = np.array([x1 - x2 for x1, x2 in itertools.combinations(xs, 2)])\n dists = np.sqrt(np.sum(diffs ** 2, axis=1))\n scale = np.median(dists)\n\n c = -0.5 / (scale ** 2)\n\n if wxs is None:\n kxx = np.sum(np.exp(c * xx_sq_dists)) / (n_x * (n_x - 1))\n else:\n wxx = np.array([w1 * w2 for w1, w2 in itertools.combinations(wxs, 2)])\n kxx = np.sum(wxx * np.exp(c * xx_sq_dists)) / (1.0 - np.sum(wxs ** 2))\n\n if wys is None:\n kyy = np.sum(np.exp(c * yy_sq_dists)) / (n_y * (n_y - 1))\n else:\n wyy = np.array([w1 * w2 for w1, w2 in itertools.combinations(wys, 2)])\n kyy = np.sum(wyy * np.exp(c * yy_sq_dists)) / (1.0 - np.sum(wys ** 2))\n\n if wxs is None and wys is None:\n kxy = np.sum(np.exp(c * xy_sq_dists)) / (n_x * n_y)\n else:\n if wxs is None:\n wxs = np.full(n_x, 1.0 / n_x)\n if wys is None:\n wys = np.full(n_y, 1.0 / n_y)\n wxy = np.array([w1 * w2 for w1, w2 in itertools.product(wxs, wys)])\n kxy = np.sum(wxy * np.exp(c * xy_sq_dists))\n\n mmd2 = 2 * (kxx + kyy - kxy)\n\n if return_scale:\n return mmd2, scale\n else:\n return mmd2\n"
]
| [
[
"numpy.concatenate",
"numpy.full",
"numpy.asarray",
"numpy.median",
"numpy.sum",
"numpy.exp"
]
]
|
Basvanstein/nasbench301 | [
"2984dec45c760d47762f50efe39b71e9d1ac22e0"
]
| [
"nasbench301/surrogate_models/gnn/models/gincnn.py"
]
| [
"import torch\nfrom torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set\n\nfrom nasbench301.surrogate_models.gnn.gnn_utils import NODE_PRIMITIVES\nfrom nasbench301.surrogate_models.gnn.models.conv import GNN_node_Virtualnode, GNN_node\n\n\nclass NodeEncoder(torch.nn.Module):\n '''\n Input:\n x: default node feature. the first and second column represents node type and node attributes.\n depth: The depth of the node in the AST.\n\n Output:\n emb_dim-dimensional vector\n\n '''\n\n def __init__(self, emb_dim, num_nodetypes, num_nodeattributes):\n super(NodeEncoder, self).__init__()\n\n self.type_encoder = torch.nn.Embedding(num_nodetypes, emb_dim)\n self.attribute_encoder = torch.nn.Embedding(num_nodeattributes, emb_dim)\n\n def forward(self, x):\n return self.type_encoder(x[:, 0]) + self.attribute_encoder(x[:, 1])\n\n\nclass GIN(torch.nn.Module):\n\n def __init__(self, dim_features, dim_target, model_config, JK=\"last\"):\n super(GIN, self).__init__()\n\n self.config = model_config\n self.node_encoder = NodeEncoder(model_config['gnn_hidden_dimensions'], num_nodetypes=len(NODE_PRIMITIVES),\n num_nodeattributes=8)\n\n if model_config['virtual_node']:\n self.gnn_node = GNN_node_Virtualnode(num_layer=model_config['num_gnn_layers'],\n emb_dim=model_config['gnn_hidden_dimensions'],\n JK=JK, drop_ratio=model_config['dropout_prob'], residual=False,\n gnn_type='gin', node_encoder=self.node_encoder)\n else:\n self.gnn_node = GNN_node(num_layer=model_config['num_gnn_layers'],\n emb_dim=model_config['gnn_hidden_dimensions'],\n JK=JK, drop_ratio=model_config['drop_ratio'], residual=False,\n gnn_type='gin', node_encoder=self.node_encoder)\n if model_config['graph_pooling'] == \"sum\":\n self.pool = global_add_pool\n elif model_config['graph_pooling'] == \"mean\":\n self.pool = global_mean_pool\n elif model_config['graph_pooling'] == \"max\":\n self.pool = global_max_pool\n elif model_config['graph_pooling'] == \"attention\":\n self.pool = GlobalAttention(\n gate_nn=torch.nn.Sequential(\n torch.nn.Linear(model_config['gnn_hidden_dimensions'], 2 * model_config['gnn_hidden_dimensions']),\n torch.nn.BatchNorm1d(2 * model_config['gnn_hidden_dimensions']),\n torch.nn.ReLU(), torch.nn.Linear(2 * model_config['gnn_hidden_dimensions'], 1)))\n elif model_config['graph_pooling'] == \"set2set\":\n self.pool = Set2Set(model_config['gnn_hidden_dimensions'], processing_steps=2)\n else:\n raise ValueError(\"Invalid graph pooling type.\")\n\n self.graph_pred_linear_list = torch.nn.ModuleList()\n\n self.graph_pred_linear = torch.nn.Linear(model_config['gnn_hidden_dimensions'], 1)\n\n def forward(self, graph_batch):\n # Implement Equation 4.2 of the paper i.e. concat all layers' graph representations and apply linear model\n # note: this can be decomposed in one smaller linear model per layer\n\n h_node = self.gnn_node(graph_batch)\n\n h_graph = self.pool(h_node, graph_batch.batch)\n graph_output = self.graph_pred_linear(h_graph)\n return torch.sigmoid(graph_output.view(-1))\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.Embedding"
]
]
|
anurag-gandhi/pandas-profiling | [
"2373f3a299264f7b312dbe4b92edc14d36e8140e"
]
| [
"src/pandas_profiling/model/messages.py"
]
| [
"\"\"\"Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant\nvalues, high correlations).\"\"\"\nfrom enum import Enum, auto, unique\nfrom typing import List, Union\n\nimport numpy as np\n\nfrom pandas_profiling.config import config\nfrom pandas_profiling.model.base import Variable\nfrom pandas_profiling.model.correlations import perform_check_correlation\n\n\n@unique\nclass MessageType(Enum):\n \"\"\"Message Types\"\"\"\n\n CONSTANT = auto()\n \"\"\"This variable has a constant value.\"\"\"\n\n ZEROS = auto()\n \"\"\"This variable contains zeros.\"\"\"\n\n HIGH_CORRELATION = auto()\n \"\"\"This variable is highly correlated.\"\"\"\n\n HIGH_CARDINALITY = auto()\n \"\"\"This variable has a high cardinality.\"\"\"\n\n UNSUPPORTED = auto()\n \"\"\"This variable is unsupported.\"\"\"\n\n DUPLICATES = auto()\n \"\"\"This variable contains duplicates.\"\"\"\n\n SKEWED = auto()\n \"\"\"This variable is highly skewed.\"\"\"\n\n MISSING = auto()\n \"\"\"This variable contains missing values.\"\"\"\n\n INFINITE = auto()\n \"\"\"This variable contains infinite values.\"\"\"\n\n TYPE_DATE = auto()\n \"\"\"This variable is likely a datetime, but treated as categorical.\"\"\"\n\n UNIQUE = auto()\n \"\"\"This variable has unique values.\"\"\"\n\n CONSTANT_LENGTH = auto()\n \"\"\"This variable has a constant length\"\"\"\n\n REJECTED = auto()\n \"\"\"Variables are rejected if we do not want to consider them for further analysis.\"\"\"\n\n UNIFORM = auto()\n \"\"\"The variable is uniformly distributed\"\"\"\n\n\nclass Message(object):\n \"\"\"A message object (type, values, column).\"\"\"\n\n def __init__(\n self,\n message_type: MessageType,\n values: dict,\n column_name: Union[str, None] = None,\n fields=None,\n ):\n if fields is None:\n fields = set()\n\n self.fields = fields\n self.message_type = message_type\n self.values = values\n self.column_name = column_name\n self.anchor_id = hash(column_name)\n\n def fmt(self):\n # TODO: render in template\n name = self.message_type.name.replace(\"_\", \" \")\n if name == \"HIGH CORRELATION\":\n num = len(self.values[\"fields\"])\n title = \", \".join(self.values[\"fields\"])\n name = f'<abbr title=\"This variable has a high correlation with {num} fields: {title}\">HIGH CORRELATION</abbr>'\n return name\n\n def __repr__(self):\n message_type = self.message_type.name\n column = self.column_name\n return f\"[{message_type}] warning on column {column}\"\n\n\ndef check_table_messages(table: dict) -> List[Message]:\n \"\"\"Checks the overall dataset for warnings.\n\n Args:\n table: Overall dataset statistics.\n\n Returns:\n A list of messages.\n \"\"\"\n messages = []\n if warning_value(table[\"n_duplicates\"]):\n messages.append(\n Message(\n message_type=MessageType.DUPLICATES,\n values=table,\n fields={\"n_duplicates\"},\n )\n )\n return messages\n\n\ndef check_variable_messages(col: str, description: dict) -> List[Message]:\n \"\"\"Checks individual variables for warnings.\n\n Args:\n col: The column name that is checked.\n description: The series description.\n\n Returns:\n A list of messages.\n \"\"\"\n messages = []\n\n # Missing\n if warning_value(description[\"p_missing\"]):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.MISSING,\n values=description,\n fields={\"p_missing\", \"n_missing\"},\n )\n )\n\n if description[\"type\"] == Variable.S_TYPE_UNSUPPORTED:\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.UNSUPPORTED,\n values=description,\n fields={},\n )\n )\n elif description[\"distinct_count_with_nan\"] <= 1:\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.CONSTANT,\n values=description,\n fields={\"n_unique\"},\n )\n )\n\n if (\n description[\"type\"] == Variable.S_TYPE_UNSUPPORTED\n or description[\"distinct_count_with_nan\"] <= 1\n ):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.REJECTED,\n values=description,\n fields={},\n )\n )\n elif description[\"distinct_count_without_nan\"] == description[\"n\"]:\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.UNIQUE,\n values=description,\n fields={\"n_unique\", \"p_unique\"},\n )\n )\n elif description[\"type\"] in [\n Variable.TYPE_NUM,\n Variable.TYPE_CAT,\n Variable.TYPE_DATE,\n ]:\n # Uniformity\n if description[\"type\"] == Variable.TYPE_CAT:\n # High cardinality\n if description[\"distinct_count\"] > config[\"vars\"][\"cat\"][\n \"cardinality_threshold\"\n ].get(int):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.HIGH_CARDINALITY,\n values=description,\n fields={\"n_unique\"},\n )\n )\n\n chi_squared_threshold = config[\"vars\"][\"cat\"][\"chi_squared_threshold\"].get(\n float\n )\n else:\n chi_squared_threshold = config[\"vars\"][\"num\"][\"chi_squared_threshold\"].get(\n float\n )\n\n if (\n \"chi_squared\" in description\n and description[\"chi_squared\"][1] > chi_squared_threshold\n ):\n messages.append(\n Message(column_name=col, message_type=MessageType.UNIFORM, values={})\n )\n\n # Categorical\n if description[\"type\"] == Variable.TYPE_CAT:\n if \"date_warning\" in description and description[\"date_warning\"]:\n messages.append(\n Message(column_name=col, message_type=MessageType.TYPE_DATE, values={})\n )\n\n # Constant length\n if (\n \"composition\" in description\n and description[\"min_length\"] == description[\"max_length\"]\n ):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.CONSTANT_LENGTH,\n values=description,\n fields={\"composition_min_length\", \"composition_max_length\"},\n )\n )\n\n # Numerical\n if description[\"type\"] == Variable.TYPE_NUM:\n # Skewness\n if warning_skewness(description[\"skewness\"]):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.SKEWED,\n values=description,\n fields={\"skewness\"},\n )\n )\n\n # Infinite values\n if warning_value(description[\"p_infinite\"]):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.INFINITE,\n values=description,\n fields={\"p_infinite\", \"n_infinite\"},\n )\n )\n\n # Zeros\n if warning_value(description[\"p_zeros\"]):\n messages.append(\n Message(\n column_name=col,\n message_type=MessageType.ZEROS,\n values=description,\n fields={\"n_zeros\", \"p_zeros\"},\n )\n )\n\n return messages\n\n\ndef check_correlation_messages(correlations):\n messages = []\n\n for corr, matrix in correlations.items():\n if config[\"correlations\"][corr][\"warn_high_correlations\"].get(bool):\n threshold = config[\"correlations\"][corr][\"threshold\"].get(float)\n correlated_mapping = perform_check_correlation(matrix, threshold)\n if len(correlated_mapping) > 0:\n for k, v in correlated_mapping.items():\n messages.append(\n Message(\n column_name=k,\n message_type=MessageType.HIGH_CORRELATION,\n values={\"corr\": corr, \"fields\": v},\n )\n )\n return messages\n\n\ndef warning_value(value: float) -> bool:\n return not np.isnan(value) and value > 0.01\n\n\ndef warning_skewness(v: float) -> bool:\n return not np.isnan(v) and (\n v < -config[\"vars\"][\"num\"][\"skewness_threshold\"].get(int)\n or v > config[\"vars\"][\"num\"][\"skewness_threshold\"].get(int)\n )\n\n\ndef warning_type_date(series):\n from dateutil.parser import parse, ParserError\n\n try:\n series.apply(parse)\n return True\n except ParserError:\n return False\n"
]
| [
[
"numpy.isnan"
]
]
|
hsiehpinghan/ALBERT-TF2.0 | [
"1df5f735f7e9c4247644931f8de4412da72ccb79"
]
| [
"input_pipeline.py"
]
| [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"BERT model input pipelines.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nfrom absl import flags\n\nFLAGS = flags.FLAGS\n\n\ndef decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example\n\n\ndef file_based_input_fn_builder(input_file, name_to_features):\n \"\"\"Creates an `input_fn` closure to be passed for BERT custom training.\"\"\"\n\n def input_fn():\n \"\"\"Returns dataset for training/evaluation.\"\"\"\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n d = d.map(lambda record: decode_record(record, name_to_features))\n\n # When `input_file` is a path to a single file or a list\n # containing a single path, disable auto sharding so that\n # same input file is sent to all workers.\n if isinstance(input_file, str) or len(input_file) == 1:\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF\n d = d.with_options(options)\n return d\n\n return input_fn\n\ndef create_pretrain_dataset(input_patterns,\n seq_length,\n max_predictions_per_seq,\n batch_size,\n is_training=True,\n input_pipeline_context=None):\n \"\"\"Creates input dataset from (tf)records files for pretraining.\"\"\"\n name_to_features = {\n 'input_ids':\n tf.io.FixedLenFeature([seq_length], tf.int64),\n 'input_mask':\n tf.io.FixedLenFeature([seq_length], tf.int64),\n 'segment_ids':\n tf.io.FixedLenFeature([seq_length], tf.int64),\n 'masked_lm_positions':\n tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),\n 'masked_lm_ids':\n tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),\n 'masked_lm_weights':\n tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),\n 'next_sentence_labels':\n tf.io.FixedLenFeature([1], tf.int64),\n }\n\n dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training)\n\n if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:\n dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n input_pipeline_context.input_pipeline_id)\n\n dataset = dataset.repeat()\n\n # We set shuffle buffer to exactly match total number of\n # training files to ensure that training data is well shuffled.\n input_files = []\n for input_pattern in input_patterns:\n input_files.extend(tf.io.gfile.glob(input_pattern))\n dataset = dataset.shuffle(len(input_files))\n\n # In parallel, create tf record dataset for each train files.\n # cycle_length = 8 means that up to 8 files will be read and deserialized in\n # parallel. You may want to increase this number if you have a large number of\n # CPU cores.\n dataset = dataset.interleave(\n tf.data.TFRecordDataset, cycle_length=8,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n decode_fn = lambda record: decode_record(record, name_to_features)\n dataset = dataset.map(\n decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n def _select_data_from_record(record):\n \"\"\"Filter out features to use for pretraining.\"\"\"\n x = {\n 'input_word_ids': record['input_ids'],\n 'input_mask': record['input_mask'],\n 'input_type_ids': record['segment_ids'],\n 'masked_lm_positions': record['masked_lm_positions'],\n 'masked_lm_ids': record['masked_lm_ids'],\n 'masked_lm_weights': record['masked_lm_weights'],\n 'next_sentence_labels': record['next_sentence_labels'],\n }\n\n y = record['masked_lm_weights']\n\n return (x, y)\n\n dataset = dataset.map(\n _select_data_from_record,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if is_training:\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat()\n\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dataset = dataset.prefetch(1024)\n return dataset\n\n\ndef create_classifier_dataset(file_path,\n seq_length,\n batch_size,\n is_training=True,\n drop_remainder=True):\n \"\"\"Creates input dataset from (tf)records files for train/eval.\"\"\"\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'label_ids': tf.io.FixedLenFeature([], tf.int64) if FLAGS.task_name.lower() != \"sts\" else tf.io.FixedLenFeature([], tf.float32),\n 'is_real_example': tf.io.FixedLenFeature([], tf.int64),\n }\n input_fn = file_based_input_fn_builder(file_path, name_to_features)\n dataset = input_fn()\n\n def _select_data_from_record(record):\n x = {\n 'input_word_ids': record['input_ids'],\n 'input_mask': record['input_mask'],\n 'input_type_ids': record['segment_ids']\n }\n y = record['label_ids']\n return (x, y)\n\n dataset = dataset.map(_select_data_from_record)\n\n if is_training:\n dataset = dataset.shuffle(1024,reshuffle_each_iteration=True)\n if FLAGS.custom_training_loop:\n dataset = dataset.repeat()\n\n dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)\n dataset = dataset.prefetch(1024)\n return dataset\n\n\ndef create_squad_dataset(file_path, seq_length, batch_size, is_training=True):\n \"\"\"Creates input dataset from (tf)records files for train/eval.\"\"\"\n name_to_features = {\n 'unique_ids': tf.io.FixedLenFeature([], tf.int64),\n 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n }\n if is_training:\n name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)\n name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)\n\n input_fn = file_based_input_fn_builder(file_path, name_to_features)\n dataset = input_fn()\n\n def _select_data_from_record(record):\n x, y = {}, {}\n for name, tensor in record.items():\n if name in ('start_positions', 'end_positions'):\n y[name] = tensor\n else:\n x[name] = tensor\n return (x, y)\n\n dataset = dataset.map(_select_data_from_record)\n\n if is_training:\n dataset = dataset.shuffle(100,reshuffle_each_iteration=True)\n if FLAGS.custom_training_loop:\n dataset = dataset.repeat()\n\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(1024)\n return dataset\n\ndef create_squad_dataset_v2(file_path, seq_length, batch_size, is_training):\n \"\"\"Creates input dataset from (tf)records files for pretraining.\"\"\"\n name_to_features = {\n \"unique_ids\": tf.io.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"cls_index\": tf.io.FixedLenFeature([],tf.int64),\n \"p_mask\": tf.io.FixedLenFeature([seq_length], tf.float32)\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.io.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.io.FixedLenFeature([], tf.int64)\n name_to_features[\"is_impossible\"] = tf.io.FixedLenFeature([], tf.float32)\n\n input_fn = file_based_input_fn_builder(file_path, name_to_features)\n dataset = input_fn()\n\n def _select_data_from_record(record):\n x, y = {}, {}\n for name, tensor in record.items():\n if name in ('start_positions','end_positions','is_impossible'):\n y[name] = tensor\n if name == 'start_positions':\n x[name] = tensor\n else:\n x[name] = tensor\n return (x, y)\n\n dataset = dataset.map(_select_data_from_record)\n\n if is_training:\n dataset = dataset.shuffle(100,reshuffle_each_iteration=True)\n if FLAGS.custom_training_loop:\n dataset = dataset.repeat()\n\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(1024)\n return dataset\n"
]
| [
[
"tensorflow.data.Dataset.list_files",
"tensorflow.data.TFRecordDataset",
"tensorflow.io.gfile.glob",
"tensorflow.io.FixedLenFeature",
"tensorflow.io.parse_single_example",
"tensorflow.data.Options",
"tensorflow.cast"
]
]
|
lbdvriesGT/cardio | [
"da6c478739bd254ccc04ee0ba327fd1610d07a42",
"da6c478739bd254ccc04ee0ba327fd1610d07a42"
]
| [
"cardio/dataset/dataset/models/tf/inception_v1.py",
"cardio/dataset/dataset/models/torch/base.py"
]
| [
"\"\"\" Szegedy C. et al \"`Going Deeper with Convolutions\n<https://arxiv.org/abs/1409.4842>`_\"\n\"\"\"\nimport tensorflow.compat.v1 as tf\n\nfrom .inception_base import Inception\nfrom .layers import conv_block\n\n\n_DEFAULT_V1_ARCH = {\n 'b': {'filters': [\n [64, 96, 128, 16, 32, 32],\n [128, 128, 192, 32, 96, 64],\n [192, 96, 208, 16, 48, 64],\n [160, 112, 224, 24, 64, 64],\n [128, 128, 256, 24, 64, 64],\n [112, 144, 288, 32, 64, 64],\n [256, 160, 320, 32, 128, 128],\n [256, 160, 320, 32, 128, 128],\n [384, 192, 384, 48, 128, 128]]},\n 'r': {'layout': 'p', 'pool_size': 3, 'pool_strides': 2}\n}\n\n\nclass Inception_v1(Inception):\n \"\"\" Inception network, version 1\n\n **Configuration**\n\n inputs : dict\n dict with 'images' and 'labels' (see :meth:`~.TFModel._make_inputs`)\n\n body/arch : dict\n architecture: network layout, block layout, number of filters in each block, pooling parameters\n \"\"\"\n @classmethod\n def default_config(cls):\n \"\"\" Define model defaults. See :meth: `~.TFModel.default_config` \"\"\"\n config = Inception.default_config()\n config['common']['layout'] = 'cn'\n config['initial_block'] += dict(layout='cnp cn cn p', filters=[64, 64, 192],\n kernel_size=[7, 3, 3], strides=[2, 1, 1],\n pool_size=3, pool_strides=2)\n config['body']['arch'] = _DEFAULT_V1_ARCH\n config['body']['layout'] = 'bbrbbbbbrbb'\n config['head'].update(dict(layout='Vdf', dropout_rate=.4))\n config['loss'] = 'ce'\n\n return config\n\n @classmethod\n def block(cls, inputs, filters, layout='cn', name=None, **kwargs):\n \"\"\" Inception building block\n\n Parameters\n ----------\n inputs : tf.Tensor\n input tensor\n filters : list with 6 items:\n - number of filters in 1x1 conv\n - number of filters in 1x1 conv going before conv 3x3\n - number of filters in 3x3 conv\n - number of filters in 1x1 conv going before conv 5x5,\n - number of filters in 5x5 conv,\n - number of filters in 1x1 conv going before max-pooling\n layout : str\n a sequence of layers in the block. Default is 'cn'.\n name : str\n scope name\n\n Returns\n -------\n tf.Tensor\n \"\"\"\n with tf.variable_scope(name):\n branch_1 = conv_block(inputs, layout=layout, filters=filters[0], kernel_size=1, name='conv_1', **kwargs)\n\n branch_3 = conv_block(inputs, layout=layout*2, filters=[filters[1], filters[2]], kernel_size=[1, 3],\n name='conv_3', **kwargs)\n\n branch_5 = conv_block(inputs, layout=layout*2, filters=[filters[3], filters[4]], kernel_size=[1, 5],\n name='conv_5', **kwargs)\n\n branch_pool = conv_block(inputs, layout='p'+layout, filters=filters[5], kernel_size=1,\n name='conv_pool', **{**kwargs, 'pool_strides': 1})\n\n axis = cls.channels_axis(kwargs['data_format'])\n output = tf.concat([branch_1, branch_3, branch_5, branch_pool], axis, name='output')\n return output\n\n @classmethod\n def reduction_block(cls, inputs, layout='p', filters=None, name='reduction_block', **kwargs):\n \"\"\" Reduction block.\n\n Just a max pooling in 3x3 with strides=2\n\n Parameters\n ----------\n inputs : tf.Tensor\n input tensor\n name : str\n scope name\n\n Returns\n -------\n tf.Tensor\n \"\"\"\n output = conv_block(inputs, layout=layout, filters=filters, name=name, **kwargs)\n return output\n",
"\"\"\" Eager version of TorchModel. \"\"\"\nimport os\nimport re\nimport warnings\nimport threading\nimport inspect\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport dill\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\n\ntry:\n import cupy as cp\n CUPY_AVAILABLE = True\nexcept ImportError:\n CUPY_AVAILABLE = False\n\nfrom .visualization import VisualizationMixin\nfrom .utils import unpack_fn_from_config, get_shape\nfrom .layers import ConvBlock\nfrom .losses import CrossEntropyLoss, BinaryLovaszLoss, LovaszLoss, SSIM, MSSIM\nfrom .losses import binary as binary_losses, multiclass as multiclass_losses\nfrom ..base import BaseModel\nfrom ... import Config\n\n\n\nLOSSES = {\n 'l1': nn.L1Loss,\n 'huber': nn.SmoothL1Loss,\n 'absolutedifference': nn.L1Loss,\n 'mse': nn.MSELoss,\n 'cos': nn.CosineSimilarity,\n 'cosine': nn.CosineSimilarity,\n 'hinge': nn.HingeEmbeddingLoss,\n 'ssim': SSIM,\n 'mssim': MSSIM,\n\n 'bce': nn.BCEWithLogitsLoss,\n 'bdice': binary_losses.Dice,\n 'btversky': binary_losses.Tversky,\n 'blovasz': BinaryLovaszLoss,\n\n 'ce': CrossEntropyLoss,\n 'crossentropy': CrossEntropyLoss,\n 'logloss': CrossEntropyLoss,\n 'dice': multiclass_losses.Dice,\n 'lovasz': LovaszLoss\n}\n\nDECAYS = {\n 'exp': torch.optim.lr_scheduler.ExponentialLR,\n 'lambda': torch.optim.lr_scheduler.LambdaLR,\n 'step': torch.optim.lr_scheduler.StepLR,\n 'multistep': torch.optim.lr_scheduler.MultiStepLR,\n 'cos': torch.optim.lr_scheduler.CosineAnnealingLR,\n}\n\nDECAYS_DEFAULTS = {\n torch.optim.lr_scheduler.ExponentialLR : dict(gamma=0.96),\n torch.optim.lr_scheduler.LambdaLR : dict(lr_lambda=lambda epoch: 0.96**epoch),\n torch.optim.lr_scheduler.StepLR: dict(step_size=30),\n torch.optim.lr_scheduler.MultiStepLR: dict(milestones=[30, 80]),\n torch.optim.lr_scheduler.CosineAnnealingLR: dict(T_max=None)\n}\n\n\n\nclass TorchModel(BaseModel, VisualizationMixin):\n r\"\"\" Base class for eager Torch models.\n\n Parameters\n ----------\n config : dict, :class:`~Config`\n Configuration of model creation. Below are the valid keys.\n\n inputs : dict, optional\n Mapping from placeholder names (e.g. ``images``, ``labels``, ``masks``) to arguments of their initialization.\n Allows to create placeholders of needed shape and data format and initialize model before\n first pass of actual batch data (thus explicitly imposing shapes).\n\n Value must be a dictionary with parameters. If some parameters are omitted, then defaults will be at use.\n Valid keys are:\n\n dtype : str or torch.dtype\n Data type. Default is 'float32'.\n\n shape : int, None, sequence of ints or Nones\n Tensor shape with channels and without batch size. Default is None.\n\n classes : int, array-like or None\n If int, then number of classes.\n If None, then tensor has no classes. Default is None.\n\n placeholder_batch_size : int\n If `inputs` is specified with all the required shapes, then it serves as size of batch dimension during\n placeholder (usually np.ndarrays with zeros) creation. Default value is 2.\n\n loss : str, dict\n Loss function, might be defined in multiple formats.\n\n If str, then short ``name``.\n If dict, then ``{'name': name, **kwargs}``.\n\n Name must be one of:\n - short name (e.g. ``'mse'``, ``'ce'``, ``'l1'``, ``'cos'``, ``'hinge'``,\n ``'huber'``, ``'logloss'``, ``'dice'``)\n - a class name from `torch losses <https://pytorch.org/docs/stable/nn.html#loss-functions>`_\n (e.g. ``'PoissonNLL'`` or ``'TripletMargin'``)\n - an instance of `:class:torch.nn.Module`\n - callable\n\n Examples:\n\n - ``{'loss': 'mse'}``\n - ``{'loss': {'name': 'KLDiv', 'reduction': 'none'}}``\n - ``{'loss': {'name': MyCustomLoss, 'epsilon': 1e-6}}``\n - ``{'loss': my_custom_loss_fn}``\n\n optimizer : str, dict\n Optimizer, might be defined in multiple formats.\n\n If str, then short ``name``.\n If dict, then ``{'name': name, **kwargs}``.\n\n Name must be one of:\n - short name (e.g. ``'Adam'``, ``'Adagrad'``, any optimizer from\n `torch.optim <https://pytorch.org/docs/stable/optim.html#algorithms>`_)\n - a class with ``Optimizer`` interface\n - a callable which takes model parameters and optional args\n\n Examples:\n\n - ``{'optimizer': 'Adam'}``\n - ``{'optimizer': {'name': 'SparseAdam', 'lr': 0.01}}``\n - ``{'optimizer': {'name': 'Adagrad', 'initial_accumulator_value': 0.01}}``\n - ``{'optimizer': {'name': MyCustomOptimizer, 'momentum': 0.95}}``\n\n decay : dict, list of dicts\n The learning rate decay algorithm might be defined in multiple formats.\n All decays require to have 'frequency' as a key in a configuration dictionary.\n Parameter 'frequency' sets how often do decay step: at every `'frequency'`\n iteration. Each decay might have optional parameters 'first_iter' and 'last_iter'\n that defines the closed range of iterations where decay is at work.\n If you want to use a learning rate warmup and decay together,\n you should use a list of decays (see examples).\n\n If dict, then ``{'name': name, **kwargs}``.\n If list, then each item is a dict of format described above.\n\n Name must be one of:\n\n - a class name from `torch.optim.lr_scheduler\n <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_\n (e.g. ``'LambdaLR'``) except ``'ReduceLROnPlateau'``.\n - short name (``'exp'`` - ExponentialLR, ``'lambda'`` - LambdaLR, ``'step'`` - StepLR,\n ``'multistep'`` - MultiStepLR, ``'cos'`` - CosineAnnealingLR)\n - a class with ``_LRScheduler`` interface\n - a callable which takes optimizer and optional args\n\n Examples:\n\n - ``{'decay': {'name: 'exp', 'frequency': 5, 'first_iter': 6, 'last_iter': 20}}``\n - ``{'decay': {'name': 'StepLR', 'steps_size': 10000, 'frequency': 5}}``\n - ``{'decay': {'name': MyCustomDecay, 'decay_rate': .5, 'frequency': 15, 'first_iter': 400}``\n - ``{'decay': [{'name': 'exp', 'gamma': 1, 'frequency': 1, 'last_iter': 900},\n {'name': 'exp', 'gamma': 0.96, 'frequency': 2, 'first_iter': 901}]``\n\n device : str, torch.device or sequence\n If str, a device name (e.g. 'cpu' or 'gpu:0'). Regular expressions are also allowed (e.g. 'gpu:*').\n If torch.device, then device to be used.\n If sequence, then each entry must be in one of previous formats, and batch data is paralleled across them.\n Default behaviour is to use one (and only one) device of the best available type (priority to GPU over CPU).\n\n benchmark : bool\n Whether to optimize network's forward pass after the first batch. Can speed up training if shapes of inputs\n are constant.\n\n profile : bool\n Whether to collect stats of model training timings.\n If True, then stats can be accessed via `profile_info` attribute or :meth:`.show_profile_info` method.\n\n amp : bool\n Whether to use automated mixed precision during model training and inference. Default is True.\n The output type of predictions remains float32.\n\n sync_frequency : int\n How often to apply accumulated gradients to the weights. Default value is to apply them after each batch.\n\n microbatch : int, bool or None\n Also known as virtual batch. If int, then size of chunks to split every batch into.\n Allows to process given data sequentially, accumulating gradients from microbatches and applying them\n once in the end. Can be changed later in the `train` method. Batch size must be divisible by microbatch size.\n If True, then every batch is split into individual items (same as microbatch equals 1).\n If False or None, then feature is not used. Default is not to use microbatching.\n\n sam_rho : float\n Foret P. et al. \"`Sharpness-Aware Minimization for Efficiently Improving Generalization\n <https://arxiv.org/abs/2010.01412>`_\".\n If evaluates to False, then SAM is not used.\n If float, then controls the size of neighborhood (check the paper for details).\n sam_individual_norm : bool\n If True, then each gradient is scaled according to its own L2 norm.\n If False, then one common gradient norm is computed and used as a scaler for all gradients.\n\n callbacks : sequence of `:class:callbacks.BaseCallback`\n Callbacks to call at the end of each training iteration.\n\n order : sequence\n Defines sequence of network blocks in the architecture. Default is initial_block -> body -> head.\n Each element of the sequence must be either a string, a tuple or a dict.\n If string, then it is used as name of method to use, as config key to use, as name in model repr.\n For example, ``'initial_block'`` stands for using ``self.initial_block`` with config[`initial_block`]\n as parameters, and model representation would show this part of network as `initial_block`.\n If tuple, then it must have three elements: (block_name, config_name, method).\n If dict, then it must contain three keys: `block_name`, `config_name`, `method`.\n In cases of tuple and dict, `method` can also be callable.\n\n initial_block : dict\n User-defined module or parameters for the input block, usually\n :class:`~.torch.layers.ConvBlock` parameters.\n\n If ``initial_block/inputs`` is specified with a name or list of names,\n then it should contain names from ``inputs`` with info about shapes of tensors to be passed to `initial_block`.\n\n Examples:\n\n - ``{'initial_block/inputs': 'images'}``\n - ``{'initial_block': dict(inputs='features')}``\n - ``{'initial_block': dict(inputs='images', layout='nac nac', filters=64, kernel_size=[7, 3], strides=[1, 2])}``\n - ``{'initial_block': MyCustomModule(some_param=1, another_param=2)}``\n\n body : dict or nn.Module\n User-defined module or parameters for the base network layers,\n usually :class:`~.torch.layers.ConvBlock` parameters.\n\n head : dict or nn.Module\n User-defined module or parameters for the head layers,\n usually :class:`~.torch.layers.ConvBlock` parameters.\n\n predictions : str or callable\n An operation applied to the head output to make the predictions tensor which is used in the loss function.\n See :meth:`.TorchModel.output` for details.\n\n output : dict or list\n Auxiliary operations to apply to network predictions. See :meth:`.TorchModel.output` for details.\n\n common : dict\n Default parameters for all blocks (see :class:`~.torch.layers.ConvBlock`).\n\n\n **In order to create your own model, it is recommended to:**\n\n * Take a look at :class:`~.torch.layers.ConvBlock` since it is widely used as a building\n block almost everywhere.\n\n * Define model defaults (e.g. number of filters, dropout rates, etc) by overriding\n :meth:`.TorchModel.default_config`. Those parameters are then updated with external configuration dictionary.\n\n * Define config post-processing by overriding :meth:`~.TorchModel.build_config`.\n Its main use is to infer parameters that can't be known in advance (e.g. number of classes, shape of inputs).\n\n * Override :meth:`~.TorchModel.initial_block`, :meth:`~.TorchModel.body` and :meth:`~.TorchModel.head`, if needed.\n You can either use usual `Torch layers <https://pytorch.org/docs/stable/nn.html>`_,\n or predefined layers like :class:`~eager_torch.layers.PyramidPooling`.\n Conveniently, 'initial_block' is used to make pre-processing (e.g. reshaping or agressive pooling) of inputs,\n 'body' contains the meat of the network flow, and 'head' makes sure that the output is compatible with targets.\n\n\n **In order to use existing model, it is recommended to:**\n\n * If ``inputs`` key defines shapes for all tensors in ``initial_block/inputs``, then model is created off of\n placeholders (tensors with all zeros); otherwise, the first batch data is used to create model.\n\n * ``loss``, ``optimizer``, ``decay`` keys.\n\n * ``initial_block`` sub-dictionary with ``inputs`` key with names of tensors to use as network inputs.\n\n * ``initial_block``, ``body``, ``head`` keys are used to define behaviour of respective part of the network.\n Default behaviour is to support all of the :class:`~.torch.layers.ConvBlock` options.\n For complex models, take a look at default config of the chosen model to learn\n which parameters should be configured.\n \"\"\"\n PRESERVE = [\n 'full_config', 'config', 'model',\n 'input_names', 'input_shapes', 'target_shape', 'classes',\n 'loss', 'optimizer', 'decay', 'decay_step',\n 'sync_counter', 'microbatch',\n 'iteration', 'iter_info', 'lr_list', 'syncs', 'decay_iters',\n '_loss_list', 'loss_list',\n ]\n\n def __init__(self, config=None):\n self.full_config = Config(config)\n self.model_lock = threading.Lock()\n\n # Shapes of inputs and outputs\n self.input_names = None\n self.input_shapes = None\n self.target_shape = None\n self.classes = None\n\n # Pytorch model\n self.model = None\n\n # Leading device and list of all devices used\n self.device = None\n self.devices = []\n\n # Train procedure and ifrastructure\n self.loss = None\n self.optimizer = None\n self.decay = None\n self.decay_step = None\n\n self.amp = True\n self.scaler = None\n\n self.callbacks = []\n\n # Memory amortization: accumulate gradients to update weights later\n self.sync_frequency = 1\n self.sync_counter = 0\n self.microbatch = None\n\n # Sharpness-aware minimization\n self.sam_rho = 0.0\n self.sam_individual_norm = True\n\n # Store info about passed train iterations\n self.iteration = 0\n self.iter_info = {}\n self.lr_list = []\n self.syncs = []\n self.decay_iters = []\n self._loss_list = []\n self.loss_list = []\n\n # Profile kernels used\n self.profile = False\n self.profilers = []\n self.profile_info = None\n super().__init__(config)\n\n def reset(self):\n \"\"\" Allows to recreate model from scratch. \"\"\"\n self.model = None\n self.iter_info = {}\n\n\n def build(self):\n \"\"\" Build the model. \"\"\"\n # Create config from default and external one\n self.full_config = self.combine_configs()\n self._get_devices()\n self._get_placeholder_shapes()\n self.full_config = self.build_config()\n\n # Store some of the config values\n self.microbatch = self.full_config.get('microbatch', None)\n self.sync_frequency = self.full_config.get('sync_frequency', 1)\n self.amp = self.full_config.get('amp', True)\n\n self.sam_rho = self.full_config.get('sam_rho', 0.0)\n self.sam_individual_norm = self.full_config.get('sam_individual_norm', False)\n self.profile = self.full_config.get('profile', False)\n\n self.callbacks = [callback.set_model(self) for callback in self.full_config.get('callbacks', [])]\n\n # If the inputs are set in config with their shapes we can build right away\n if self.input_shapes:\n self._build()\n\n\n # Create config of model creation: combine the external and default ones\n @classmethod\n def default_config(cls):\n \"\"\" Define model defaults.\n\n You need to override this method if you expect your model or its blocks to serve as a base for other models\n (e.g. VGG for FCN, ResNet for LinkNet, etc).\n\n Put here all constants (like the number of filters, kernel sizes, block layouts, strides, etc)\n specific to the model, but independent of anything else (like image shapes, number of classes, etc).\n\n These defaults can be changed in :meth:`~.TorchModel.build_config` or when calling :meth:`.Pipeline.init_model`.\n\n Examples\n --------\n .. code-block:: python\n\n @classmethod\n def default_config(cls):\n config = TorchModel.default_config()\n config['initial_block'] = dict(layout='cnap', filters=16, kernel_size=7, strides=2,\n pool_size=3, pool_strides=2)\n config['body/filters'] = 32\n config['head'] = dict(layout='cnadV', dropout_rate=.2)\n return config\n \"\"\"\n config = Config()\n config['inputs'] = {}\n config['placeholder_batch_size'] = 2\n\n config['device'] = None\n config['benchmark'] = True\n config['profile'] = False\n config['microbatch'] = None\n config['sync_frequency'] = 1\n\n config['loss'] = None\n config['optimizer'] = 'Adam'\n config['decay'] = None\n config['amp'] = True\n\n config['sam_rho'] = 0.0\n config['sam_individual_norm'] = True\n\n config['order'] = ['initial_block', 'body', 'head']\n config['initial_block'] = {}\n config['body'] = {}\n config['head'] = {}\n config['common'] = {}\n\n config['predictions'] = None\n config['output'] = None\n return config\n\n def combine_configs(self):\n \"\"\" Combine default configuration and the external one. \"\"\"\n config = self.default_config() + self.config\n return config\n\n def build_config(self):\n \"\"\" Define model's architecture configuration.\n\n * Don't forget to call ``super().build_config(names)`` in the beginning.\n\n * Define parameters for :meth:`.TorchModel.initial_block`, :meth:`.TorchModel.body`, :meth:`.TorchModel.head`,\n which depend on inputs.\n\n * Dont forget to return ``config`` at the end.\n\n Examples\n --------\n .. code-block:: python\n\n def build_config(self, names=None):\n config = super().build_config(names)\n config['head/filters'] = self.num_classes('targets')\n return config\n \"\"\"\n config = self.full_config\n\n if config.get('inputs'):\n inputs_config = config['inputs']\n\n # Add default aliases\n if 'targets' not in inputs_config:\n if 'labels' in inputs_config:\n inputs_config['targets'] = inputs_config['labels']\n elif 'masks' in inputs_config:\n inputs_config['targets'] = inputs_config['masks']\n\n # Fetch default data format for all the parts of the network\n inputs = config.get('initial_block/inputs')\n if isinstance(inputs, str):\n data_format = inputs_config.get(inputs, {}).get('data_format')\n elif isinstance(inputs, (tuple, list)):\n data_format = inputs_config.get(inputs[0], {}).get('data_format')\n else:\n data_format = 'channels_first'\n config['common/data_format'] = config.get('common/data_format') or data_format\n\n config['head/target_shape'] = self.target_shape\n config['head/classes'] = self.classes\n\n if config.get('head/units') is None:\n config['head/units'] = self.classes\n if config.get('head/filters') is None:\n config['head/filters'] = self.classes\n return config\n\n def unpack(self, name):\n \"\"\" Get params from config. \"\"\"\n unpacked = unpack_fn_from_config(name, self.full_config)\n if isinstance(unpacked, list):\n return {name: unpacked}\n key, kwargs = unpacked\n return {name: key, **kwargs}\n\n\n # Prepare to build the model: determine device(s) and shape(s)\n def _get_devices(self):\n devices = self.full_config.get('device')\n if devices is None:\n if torch.cuda.is_available():\n self.device = torch.device('cuda:0')\n else:\n self.device = torch.device('cpu')\n else:\n devices = devices if isinstance(devices, list) else [devices]\n available_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())] + ['cpu']\n for dev in devices:\n if isinstance(dev, torch.device):\n self.devices.append(dev)\n elif isinstance(dev, str):\n dev_ = dev.lower()\n dev_ = dev_.replace('gpu', 'cuda')\n dev_ = dev_.replace('cpu:0', 'cpu')\n\n devices = [torch.device(device) for device in available_devices\n if re.search(dev_, device.lower()) is not None]\n self.devices.extend(devices)\n else:\n raise TypeError('Wrong device type: {}'.format(type(dev)))\n self.devices = [device for i, device in enumerate(self.devices)\n if device not in self.devices[:i]]\n self.device = self.devices[0]\n\n torch.backends.cudnn.benchmark = self.full_config.get('benchmark', 'cuda' in self.device.type)\n\n def _get_placeholder_shapes(self):\n config = self.full_config\n\n input_names = config.pop('initial_block/inputs', default=None)\n if input_names is not None:\n batch_size = config.get('placeholder_batch_size', 2)\n input_names = input_names if isinstance(input_names, (tuple, list)) else [input_names]\n\n shapes = []\n for name in input_names:\n cfg = config['inputs'].get(name, {})\n if 'shape' in cfg:\n shapes.append((batch_size, *cfg['shape']))\n else:\n shapes.append(None)\n\n if None not in shapes:\n self.input_shapes = shapes\n self.input_names = input_names\n\n if config.get('inputs'):\n classes, shapes = [], []\n for name in ['labels', 'masks', 'targets']:\n cfg = config['inputs'].get(name, {})\n if 'classes' in cfg:\n classes.append(cfg['classes'])\n if 'shape' in cfg:\n shapes.append(cfg['shape'])\n if len(classes) == 1:\n self.classes = classes[0]\n if len(shapes) == 1:\n self.target_shape = (batch_size, *shapes[0])\n if self.classes is None:\n self.classes = shapes[0][0]\n\n def _to_device(self):\n \"\"\" Select whether to put model on a single device or to a number of devices in `DataParallel` mode.\n\n Notes\n -----\n The method serves for code simplification at build / load stages and shouldn't be applied to prebuilt\n models since it does not change models attributes (like `self.device`) and does not process model-related\n objects (like loss functions or optimizers).\n \"\"\"\n if len(self.devices) > 1:\n self.model = nn.DataParallel(self.model, self.devices)\n else:\n self.model.to(self.device)\n\n # Chain multiple building blocks to create model\n def _build(self, inputs=None):\n config = self.full_config\n order = config.get('order')\n\n inputs = inputs or self._placeholder_data()\n\n blocks = []\n for item in order:\n if isinstance(item, str):\n block_name = config_name = method = item\n elif isinstance(item, tuple) and len(item) == 3:\n block_name, config_name, method = item\n elif isinstance(item, dict):\n block_name = item['block_name']\n config_name = item.get('config_name', block_name)\n method = item.get('method', config_name)\n\n inputs = inputs[0] if isinstance(inputs, (tuple, list)) and len(inputs) == 1 else inputs\n block = self._make_block(config_name, method, config, inputs)\n if block is not None:\n block.to(self.device)\n inputs = block(inputs)\n blocks.append((block_name, block))\n\n self.model = nn.Sequential(OrderedDict(blocks))\n self._to_device()\n\n self.make_loss(**self.unpack('loss'))\n self.make_optimizer(**self.unpack('optimizer'))\n self.make_decay(**self.unpack('decay'), optimizer=self.optimizer)\n self.scaler = torch.cuda.amp.GradScaler()\n\n\n def _placeholder_data(self):\n data = [np.zeros(shape, dtype=np.float32) for shape in self.input_shapes]\n data = self.transfer_to_device(data)\n return data\n\n def _make_block(self, name, method, config, inputs):\n if isinstance(config[name], nn.Module):\n block = config[name]\n elif isinstance(config[name], dict):\n config = {**config['common'], **config[name]}\n if 'module' in config:\n module = config['module']\n if isinstance(module, nn.Module):\n block = module\n else:\n kwargs = config.get('module_kwargs', {})\n if 'inputs' in inspect.getfullargspec(module.__init__)[0]:\n kwargs = {'inputs': inputs, **kwargs}\n block = module(*config.get('module_args', []), **kwargs)\n else:\n method = getattr(self, method) if isinstance(method, str) else method\n block = method(inputs=inputs, **config)\n else:\n raise ValueError('{} must be configured either as nn.Module or dictionary, got {}'.format(name, config))\n return block\n\n\n # Create training procedure(s): loss, optimizer, decay\n def make_loss(self, loss, **kwargs):\n \"\"\" Set model loss. Changes the `loss` attribute. \"\"\"\n loss_fn = None\n # Parse `loss` to actual module\n if isinstance(loss, str):\n # String like 'ce', 'bdice' or 'CrossEntropy'\n if hasattr(nn, loss):\n loss = getattr(nn, loss)\n elif hasattr(nn, loss + \"Loss\"):\n loss = getattr(nn, loss + \"Loss\")\n else:\n loss = LOSSES.get(re.sub('[-_ ]', '', loss).lower(), None)\n\n elif isinstance(loss, nn.Module):\n # Already a valid module\n loss_fn = loss\n elif callable(loss):\n # Callable: just pass other arguments in\n loss_fn = partial(loss, **kwargs)\n elif isinstance(loss, type):\n # Class to make module\n pass\n else:\n raise ValueError(\"Loss is not defined in the model %s\" % self.__class__.__name__)\n\n loss_fn = loss_fn or loss(**kwargs)\n if isinstance(loss_fn, nn.Module):\n loss_fn.to(device=self.device)\n\n self.loss = loss_fn\n\n def make_optimizer(self, optimizer, **kwargs):\n \"\"\" Set model optimizer. Changes the `optimizer` attribute. \"\"\"\n # Choose the optimizer\n if callable(optimizer) or isinstance(optimizer, type):\n pass\n elif isinstance(optimizer, str) and hasattr(torch.optim, optimizer):\n optimizer = getattr(torch.optim, optimizer)\n else:\n raise ValueError(\"Unknown optimizer\", optimizer)\n\n self.optimizer = optimizer(self.model.parameters(), **kwargs)\n\n def make_decay(self, decay, optimizer=None, **kwargs):\n \"\"\" Set model decay. Changes the `decay` and `decay_step` attribute. \"\"\"\n if isinstance(decay, (tuple, list)):\n decays = decay\n else:\n decays = [(decay, kwargs)] if decay else []\n\n self.decay, self.decay_step = [], []\n\n for decay_, decay_kwargs in decays:\n if decay_ is None:\n raise ValueError('Missing `name` key in the decay configuration')\n\n # Parse decay\n if callable(decay_) or isinstance(decay_, type):\n pass\n elif isinstance(decay_, str) and hasattr(torch.optim.lr_scheduler, decay_):\n decay = getattr(torch.optim.lr_scheduler, decay_)\n elif decay_ in DECAYS:\n decay_ = DECAYS.get(decay_)\n else:\n raise ValueError('Unknown learning rate decay method', decay_)\n\n # Parse step parameters\n step_params = {\n 'first_iter': 0,\n 'last_iter': np.inf,\n **decay_kwargs\n }\n if 'frequency' not in step_params:\n raise ValueError('Missing `frequency` key in the decay configuration')\n\n # Set defaults for some of the decays\n if decay_ in DECAYS_DEFAULTS:\n decay_dict = DECAYS_DEFAULTS.get(decay_).copy()\n if decay == DECAYS['cos']:\n decay_dict.update(T_max=step_params['frequency'])\n decay_kwargs = {**decay_dict, **decay_kwargs}\n\n # Remove unnecessary keys from kwargs\n for key in ['first_iter', 'last_iter', 'frequency']:\n decay_kwargs.pop(key, None)\n\n # Create decay or store parameters for later usage\n if optimizer:\n decay_ = decay_(optimizer, **decay_kwargs)\n else:\n decay = (decay_, decay_kwargs)\n\n self.decay.append(decay_)\n self.decay_step.append(step_params)\n\n\n # Use an external model\n def set_model(self, model):\n \"\"\" Set the underlying model to a supplied one and update training infrastructure. \"\"\"\n self.model = model\n\n self._to_device()\n\n self.make_loss(**self.unpack('loss'))\n self.make_optimizer(**self.unpack('optimizer'))\n self.make_decay(**self.unpack('decay'), optimizer=self.optimizer)\n self.scaler = torch.cuda.amp.GradScaler()\n\n # Define model structure\n @classmethod\n def get_defaults(cls, name, kwargs):\n \"\"\" Fill block params from default config and kwargs \"\"\"\n config = cls.default_config()\n _config = Config(config.get(name))\n _config = _config + (kwargs or {})\n config = {**config['common'], **_config}\n return config\n\n @classmethod\n def initial_block(cls, inputs, **kwargs):\n \"\"\" Transform inputs. Usually used for initial preprocessing, e.g. reshaping, downsampling etc.\n\n Notes\n -----\n For parameters see :class:`~.torch.layers.ConvBlock`.\n\n Returns\n -------\n torch.nn.Module or None\n \"\"\"\n kwargs = cls.get_defaults('initial_block', kwargs)\n if kwargs.get('layout') or kwargs.get('base_block'):\n return ConvBlock(inputs=inputs, **kwargs)\n return None\n\n @classmethod\n def body(cls, inputs, **kwargs):\n \"\"\" Base layers which produce a network embedding.\n\n Notes\n -----\n For parameters see :class:`~.torch.layers.ConvBlock`.\n\n Returns\n -------\n torch.nn.Module or None\n \"\"\"\n kwargs = cls.get_defaults('body', kwargs)\n if kwargs.get('layout') or kwargs.get('base_block'):\n return ConvBlock(inputs=inputs, **kwargs)\n return None\n\n @classmethod\n def head(cls, inputs, target_shape, classes, **kwargs):\n \"\"\" The last network layers which produce predictions. Usually used to make network output\n compatible with the `targets` tensor.\n\n Notes\n -----\n For parameters see :class:`~.torch.layers.ConvBlock`.\n\n Returns\n -------\n torch.nn.Module or None\n \"\"\"\n _ = target_shape, classes\n kwargs = cls.get_defaults('head', kwargs)\n if kwargs.get('layout') or kwargs.get('base_block'):\n return ConvBlock(inputs=inputs, **kwargs)\n return None\n\n\n # Transfer data to/from device(s)\n def parse_inputs(self, *args, **kwargs):\n \"\"\" Convert arguments (either positional or keyword) into inputs and targets of a neural network. \"\"\"\n if args and kwargs:\n raise ValueError('Use either positional or keyword arguments in `train` call.')\n\n if kwargs:\n for name in ['labels', 'masks', 'targets']:\n if name in kwargs:\n targets = kwargs.pop(name)\n\n args = [kwargs.get(name) for name in (self.input_names or list(kwargs.keys()))]\n args.append(targets)\n return args\n\n def transfer_to_device(self, data):\n \"\"\" Transfer (possibly nested) structure to device and return the same structure. \"\"\"\n if isinstance(data, (tuple, list)):\n return [self.transfer_to_device(item) for item in data]\n\n if isinstance(data, np.ndarray):\n if data.dtype not in [np.float32, 'float32']:\n data = data.astype(np.float32)\n data = torch.from_numpy(data).to(self.device)\n return data\n\n if isinstance(data, torch.Tensor):\n data = data.to(self.device)\n return data\n\n if CUPY_AVAILABLE and isinstance(data, cp.ndarray):\n if data.device.id == self.device.index:\n data = torch.utils.dlpack.from_dlpack(data.toDlpack())\n return data\n raise TypeError(f'cupy arrays should reside on the same GPU, as model itself: {self.device}.')\n\n if data is None:\n return None\n raise TypeError('Passed data should either be a `np.ndarray`, `torch.Tensor` or `cupy.ndarray`. ')\n\n def transfer_from_device(self, data):\n \"\"\" Transfer (possibly nested) structure from device and return the same structure. \"\"\"\n if isinstance(data, (tuple, list)):\n return [self.transfer_from_device(item) for item in data]\n\n if isinstance(data, (torch.Tensor, torch.autograd.Variable)):\n return data.detach().cpu().numpy()\n\n if isinstance(data, (np.ndarray, int, float)):\n return data\n raise TypeError('Passed data should either be a `torch.Tensor` or sequence of them. ')\n\n def parse_output(self, fetches, outputs):\n \"\"\" Retrieve tensors from device in the same structure, as `fetches`. \"\"\"\n fetches = fetches if fetches is not None else []\n _fetches = [fetches] if isinstance(fetches, str) else fetches\n\n output = []\n for name in _fetches:\n if name in outputs:\n value = outputs[name]\n value = self.transfer_from_device(value)\n output.append(value)\n else:\n raise KeyError('Unknown value to fetch', name)\n\n output = output[0] if isinstance(fetches, str) else type(fetches)(output)\n return output\n\n\n # Apply model to train/predict on given data\n def train(self, *args, feed_dict=None, fetches=None, use_lock=True, profile=False,\n sync_frequency=True, microbatch=True, sam_rho=None, sam_individual_norm=None, **kwargs):\n \"\"\" Train the model with the data provided\n\n Parameters\n ----------\n args\n Arguments to be passed directly into the model.\n feed_dict : dict\n If ``initial_block/inputs`` are set, then this argument allows to pass data inside,\n with keys being names and values being actual data.\n fetches : tuple, list\n Sequence of tensor names to calculate and return.\n use_lock : bool\n If True, then model, loss and gradient update operations are locked, thus allowing for multithreading.\n sync_frequency : int, bool or None\n If int, then how often to apply accumulated gradients to the weights.\n If True, then value from config is used (default value is to apply gradients after each batch of data).\n If False or None, then gradients are applied after each batch of data.\n microbatch : int, bool or None\n If int, then size of chunks to split every batch into. Allows to process given data sequentially,\n accumulating gradients from microbatches and applying them once in the end.\n If True, then value from config is used (default value is not to use microbatching).\n If False or None, then microbatching is not used.\n sam_rho : float\n Foret P. et al. \"`Sharpness-Aware Minimization for Efficiently Improving Generalization\n <https://arxiv.org/abs/2010.01412>`_\".\n If evaluates to False, then SAM is not used.\n If float, then controls the size of neighborhood (check the paper for details).\n sam_individual_norm : bool\n If True, then each gradient is scaled according to its own L2 norm.\n If False, then one common gradient norm is computed and used as a scaler for all gradients.\n profile : bool\n Whether to collect stats of model training timings.\n If True, then stats can be accessed via `profile_info` attribute or :meth:`.show_profile_info` method.\n kwargs : dict\n Additional named arguments directly passed to `feed_dict`.\n\n Returns\n -------\n Calculated values of tensors in `fetches` in the same order.\n\n Examples\n --------\n .. code-block:: python\n\n model.train(B('images'), B('labels'), fetches='loss')\n \"\"\"\n # Prepare inputs and targets: convert to Torch Tensors and transfer to device\n *inputs, targets = self.parse_inputs(*args, **{**(feed_dict or {}), **kwargs})\n\n # Lock the entire method; release in any case\n try:\n if use_lock:\n self.model_lock.acquire()\n\n # Parse arguments\n if sync_frequency is True:\n sync_frequency = self.sync_frequency\n elif sync_frequency is False or sync_frequency is None:\n sync_frequency = 1\n\n if microbatch:\n if microbatch is True:\n microbatch = self.microbatch\n else:\n microbatch = microbatch or self.microbatch\n\n # Split data into microbatches, if needed\n if microbatch:\n microbatch = 1 if microbatch is True else microbatch\n steps = len(targets) // microbatch\n split_inputs = [[item[i:i + microbatch] for item in inputs] for i in range(0, len(targets), microbatch)]\n split_targets = [targets[i:i + microbatch] for i in range(0, len(targets), microbatch)]\n else:\n steps = 1\n split_inputs = [inputs]\n split_targets = [targets]\n\n # Prepare parameters for SAM\n if sam_rho is None:\n sam_rho = self.sam_rho\n if sam_individual_norm is None:\n sam_individual_norm = self.sam_individual_norm\n\n # Create Pytorch model if it is yet to be initialized, based on the actual inputs\n if self.model is None:\n if isinstance(split_inputs[0], (list, tuple)):\n self.input_shapes = [get_shape(item) for item in split_inputs[0]]\n else:\n self.input_shapes = get_shape(split_inputs[0])\n\n self.target_shape = get_shape(split_targets[0])\n if self.classes is None:\n if len(self.target_shape) > 1: # segmentation\n self.classes = self.target_shape[1]\n\n # Can use the first two items to build model: no need for the whole tensor\n self.build_config()\n build_inputs = [item[:2] for item in split_inputs[0]]\n build_inputs = self.transfer_to_device(build_inputs)\n self._build(build_inputs)\n\n self.model.train()\n\n # Set up the profiling, if needed\n profile = profile or self.profile\n if profile:\n profiler = torch.autograd.profiler.profile(use_cuda='cpu' not in self.device.type)\n profiler.__enter__()\n\n # Train on each of the microbatches\n outputs = []\n for i in range(steps):\n _inputs = split_inputs[i]\n _targets = split_targets[i]\n\n _inputs = self.transfer_to_device(_inputs)\n _targets = self.transfer_to_device(_targets)\n\n output = self._train(*_inputs, _targets, fetches=fetches, sync_frequency=sync_frequency*steps,\n sam_rho=sam_rho, sam_individual_norm=sam_individual_norm)\n outputs.append(output)\n\n # Store the average value of loss over the entire batch\n self.loss_list.append(np.mean(self._loss_list[-steps:]))\n\n # Parse `outputs` to a desired structure. `outputs` stores fetches for each microbatch\n # which must be aggregated to get fetches for the whole batch. Scalar values will be\n # aggregated by `mean`, array values will be concatenated by the first (batch) axis.\n if fetches:\n outputs = [[item] for item in outputs] if isinstance(fetches, str) else outputs\n output = []\n for i in range(len(outputs[0])):\n fetches_values = [item[i] for item in outputs]\n if fetches_values[0].size != 1:\n output.append(np.concatenate(fetches_values, axis=0))\n else:\n output.append(np.mean(fetches_values))\n if isinstance(fetches, str):\n output = output[0]\n else:\n output = []\n\n # Exit the profiling mode\n if profile:\n profiler.__exit__(None, None, None)\n self.profilers.append(profiler)\n\n # Store info about current iteration\n self.iter_info.update({\n 'amp': self.amp,\n 'microbatch': microbatch,\n 'sync_frequency': sync_frequency,\n 'steps': steps,\n 'sam': bool(sam_rho), 'sam_rho': sam_rho, 'sam_individual_norm': sam_individual_norm,\n 'actual_model_inputs_shape': [get_shape(item) for item in _inputs],\n 'actual_model_outputs_shape': get_shape(_targets),\n })\n\n # Call the callbacks\n for callback in self.callbacks:\n callback.on_iter_end()\n\n finally:\n if use_lock:\n self.model_lock.release()\n return output\n\n def _train(self, *args, fetches=None, sync_frequency=True, sam_rho=0.0, sam_individual_norm=True):\n # Parse inputs\n *inputs, targets = args\n inputs = inputs[0] if isinstance(inputs, (tuple, list)) and len(inputs) == 1 else inputs\n\n # Apply model, compute loss and gradients\n with torch.cuda.amp.autocast(enabled=self.amp):\n predictions = self.model(inputs)\n\n # SAM: store grads from previous microbatches\n if self.iteration >= 1 and bool(sam_rho):\n for p in self.model.parameters():\n if p.grad is not None:\n self.optimizer.state[p]['previous_grad'] = p.grad.clone().detach()\n p.grad = None\n\n with torch.cuda.amp.autocast(enabled=self.amp):\n loss = self.loss(predictions, targets)\n loss_ = loss if sync_frequency == 1 else loss / sync_frequency\n (self.scaler.scale(loss_) if self.amp else loss).backward()\n\n # SAM: use obtained grads to move to the local maxima\n if self.iteration >= 1 and bool(sam_rho):\n # Fetch gradients\n grads = []\n params_with_grads = []\n for p in self.model.parameters():\n if p.grad is not None:\n grads.append(p.grad.clone().detach())\n params_with_grads.append(p)\n p.grad = None\n\n # Move to the local maxima\n if sam_individual_norm:\n epsilons = [grad * sam_rho / (grad.detach().norm(2).to(self.device)) for grad in grads]\n else:\n grad_norm = torch.stack([g.detach().norm(2).to(self.device) for g in grads]).norm(2)\n epsilons = [eps * sam_rho / grad_norm for eps in grads]\n\n if self.amp:\n scale = self.scaler.get_scale()\n epsilons = [eps / scale for eps in epsilons]\n params_with_grads = [p + eps for p, eps in zip(params_with_grads, epsilons)]\n\n # Compute new gradients: direction to move to minimize the local maxima\n with torch.cuda.amp.autocast(enabled=self.amp):\n predictions = self.model(inputs)\n loss_inner = self.loss(predictions, targets)\n (self.scaler.scale(loss_inner) if self.amp else loss_inner).backward()\n\n # Cancel the previous update to model parameters, add stored gradients from previous microbatches\n params_with_grads = [p - eps for p, eps in zip(params_with_grads, epsilons)]\n\n for p in self.model.parameters():\n previous_grad = self.optimizer.state[p].get('previous_grad')\n if previous_grad is not None:\n p.grad.add_(previous_grad)\n\n # Store loss value for every microbatch\n self._loss_list.append(loss.detach().cpu().numpy())\n\n # Whether to update weights or keep accumulating\n if self.sync_counter == sync_frequency - 1:\n # Store learning rate: once per sync\n # Note: we do it before decay, so it is actual LR used on this iteration\n self.lr_list.append([group['lr'] for group in self.optimizer.param_groups])\n\n # Update weights and remove grads\n if self.amp:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n # Optimization over default `zero_grad`; can be removed after PyTorch >= 1.8\n for p in self.model.parameters():\n p.grad = None\n self.iteration += 1\n\n # Apply decay to learning rate, if needed\n if self.decay:\n for decay, decay_step in zip(self.decay, self.decay_step):\n step_cond = (self.iteration - decay_step['first_iter']) % decay_step['frequency'] == 0\n range_cond = decay_step['first_iter'] <= self.iteration <= decay_step['last_iter']\n if step_cond and range_cond:\n decay.step()\n self.decay_iters.append(self.iteration)\n\n # Update counters\n self.sync_counter = 0\n self.syncs.append(True)\n else:\n self.sync_counter += 1\n self.syncs.append(False)\n\n # Store outputs\n output_container = {\n 'predictions': predictions,\n 'loss': loss,\n }\n\n config = self.full_config\n additional_outputs = self.output(inputs=predictions,\n predictions=config['predictions'],\n ops=config['output'])\n output_container = {**output_container, **additional_outputs}\n output = self.parse_output(fetches, output_container)\n return output\n\n\n def predict(self, *args, targets=None, feed_dict=None, fetches=None, use_lock=True, **kwargs):\n \"\"\" Get predictions on the data provided.\n\n Parameters\n ----------\n args : sequence\n Arguments to be passed directly into the model.\n feed_dict : dict\n If ``initial_block/inputs`` are set, then this argument allows to pass data inside,\n with keys being names and values being actual data.\n targets : ndarray, optional\n Targets to calculate loss.\n fetches : tuple, list\n Sequence of tensors to fetch from the model.\n use_lock : bool\n If True, then model and loss computation operations are locked, thus allowing for multithreading.\n kwargs : dict\n Additional named arguments directly passed to `feed_dict`.\n\n Returns\n -------\n Calculated values of tensors in `fetches` in the same order.\n\n Examples\n --------\n .. code-block:: python\n\n model.predict(B('images'), targets=B('labels'), fetches='loss')\n \"\"\"\n inputs, targets = self._make_prediction_inputs(*args, targets=targets, feed_dict=feed_dict, **kwargs)\n\n # Acquire lock, release anyway\n try:\n if use_lock:\n self.model_lock.acquire()\n\n self.model.eval()\n\n with torch.no_grad(), torch.cuda.amp.autocast(enabled=self.amp):\n output_container = {}\n inputs = self.transfer_to_device(inputs)\n predictions = self.model(inputs)\n\n if self.amp:\n if isinstance(predictions, (tuple, list)):\n predictions = type(predictions)(p.float() for p in predictions)\n else:\n predictions = predictions.float()\n output_container['predictions'] = predictions\n\n if targets is not None:\n targets = self.transfer_to_device(targets)\n output_container['loss'] = self.loss(predictions, targets)\n\n config = self.full_config\n additional_outputs = self.output(inputs=predictions, predictions=config['predictions'],\n ops=config['output'])\n output_container = {**output_container, **additional_outputs}\n output = self.parse_output(fetches, output_container)\n\n finally:\n if use_lock:\n self.model_lock.release()\n return output\n\n def _make_prediction_inputs(self, *args, targets=None, feed_dict=None, **kwargs):\n \"\"\" Parse arguments to create valid inputs for the model.\n Implements the logic of parsing the positional and keyword arguments to the model,\n possibly wrapped into `feed_dict` dictionary, or even combination of the two.\n\n Used under the hood of :meth:`~.TorchModel.predict` method.\n\n Examples\n --------\n .. code-block:: python\n\n model.predict(B('images'), targets=B('labels'))\n model.predict(images=B('images'), targets=B('labels'))\n model.predict(B('images'), targets=B('labels'), masks=B('masks'))\n \"\"\"\n # Concatenate `kwargs` and `feed_dict`; if not empty, use keywords in `parse_input`\n feed_dict = {**(feed_dict or {}), **kwargs}\n if len(feed_dict) == 1:\n _, value = feed_dict.popitem()\n args = (*args, value)\n if feed_dict:\n if targets is not None and 'targets' in feed_dict.keys():\n warnings.warn(\"`targets` already present in `feed_dict`, so those passed as keyword arg won't be used\")\n *inputs, targets = self.parse_inputs(*args, **feed_dict)\n\n # Positional arguments only\n else:\n inputs = self.parse_inputs(*args)\n if targets is not None:\n targets = self.parse_inputs(targets)[0]\n inputs = inputs[0] if isinstance(inputs, (tuple, list)) and len(inputs) == 1 else inputs\n return inputs, targets\n\n def output(self, inputs, predictions=None, ops=None):\n \"\"\" Add output operations to the model, like predicted probabilities or labels, etc.\n\n Parameters\n ----------\n inputs : torch.Tensor or a sequence of torch.Tensors\n Input tensors.\n\n predictions : str or callable\n Operation to apply to the network output to obtain tensor which is used in loss computation.\n\n If str, then one of predefined operations:\n - 'sigmoid' - ``sigmoid(inputs)``\n - 'proba' - ``softmax(inputs)``\n - 'labels' - ``argmax(inputs)``\n - 'softplus' - ``softplus(inputs)``\n\n If callable, then user-defined operation.\n\n ops : sequence, dict or OrderedDict\n Auxiliary operations to apply.\n\n If sequence, then operations to apply. Transformed tensors are stored with the same name, as operation\n If dict, then mapping from prefixes to operations. Transformed tensors are stored with\n the prefixed name of the operation.\n\n For multi-output models ensure that an ordered dict is used (e.g. :class:`~collections.OrderedDict`).\n\n Raises\n ------\n ValueError if the number of inputs does not equal to the number of prefixes\n TypeError if inputs is not a Tensor or a sequence of Tensors\n\n Examples\n --------\n .. code-block:: python\n\n config = {\n 'output': ['proba', 'labels']\n }\n\n However, if one of the placeholders also has a name 'labels', then it will be lost as the model\n will rewrite the name 'labels' with an output. In this case dict might be more convenient:\n\n .. code-block:: python\n\n config = {\n 'output': {'predicted': ['proba', 'labels']}\n }\n\n Now the output will be stored under names 'predicted_proba' and 'predicted_labels'.\n \"\"\"\n if ops is None:\n ops = []\n elif not isinstance(ops, (dict, tuple, list)):\n ops = [ops]\n if not isinstance(ops, dict):\n ops = {'': ops}\n\n if not isinstance(inputs, (tuple, list)):\n inputs = [inputs]\n\n outputs = {}\n for i, tensor in enumerate(inputs):\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(\"Network output is expected to be a Tensor, but given {}\".format(type(tensor)))\n\n prefix = [*ops.keys()][i]\n attr_prefix = prefix + '_' if prefix else ''\n\n self._add_output_op(tensor, predictions, 'predictions', '')\n for oper in ops[prefix]:\n name, output = self._add_output_op(tensor, oper, oper, attr_prefix)\n outputs[name] = output\n return outputs\n\n def _add_output_op(self, inputs, oper, name, attr_prefix):\n if oper is None:\n output = inputs\n elif oper == 'softplus':\n output = torch.nn.functional.softplus(inputs)\n elif oper == 'sigmoid':\n output = torch.nn.functional.sigmoid(inputs)\n elif oper == 'proba':\n output = torch.nn.functional.softmax(inputs, dim=1)\n elif oper == 'labels':\n output = inputs.argmax(dim=1)\n elif callable(oper):\n output = oper(inputs)\n name = oper.__name__\n return attr_prefix + name, output\n\n\n # Preserve model for later usage\n def save(self, path, *args, **kwargs):\n \"\"\" Save torch model.\n\n Parameters\n ----------\n path : str\n Path to a file where the model data will be stored.\n\n Examples\n --------\n .. code-block:: python\n\n torch_model = ResNet34()\n\n Now save the model\n\n .. code-block:: python\n\n torch_model.save('/path/to/models/resnet34')\n\n The model will be saved to /path/to/models/resnet34.\n \"\"\"\n _ = args\n dirname = os.path.dirname(path)\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if kwargs.get('pickle_module') is None:\n kwargs['pickle_module'] = dill\n\n torch.save({item: getattr(self, item) for item in self.PRESERVE}, path, **kwargs)\n\n def load(self, path, *args, eval=False, **kwargs):\n \"\"\" Load a torch model from files.\n\n Parameters\n ----------\n path : str\n File path where a model is stored.\n\n eval : bool\n Whether to switch the model to eval mode.\n\n Examples\n --------\n .. code-block:: python\n\n resnet = ResNet34(load=dict(path='/path/to/models/resnet34'))\n\n torch_model.load(path='/path/to/models/resnet34')\n\n TorchModel(config={'device': 'gpu:2', 'load/path': '/path/to/models/resnet34'})\n\n **How to move the model to device**\n\n The model will be moved to device specified in the model config by key `device`.\n \"\"\"\n _ = args\n self._get_devices()\n\n if kwargs.get('pickle_module') is None:\n kwargs['pickle_module'] = dill\n\n if self.device:\n checkpoint = torch.load(path, map_location=self.device, **kwargs)\n else:\n checkpoint = torch.load(path, **kwargs)\n\n # `load_config` is a reference to `self.config` used to update `full_config`\n # It is required since `self.config` is overwritten in the cycle below\n load_config = self.config\n\n for item in self.PRESERVE:\n setattr(self, item, checkpoint.get(item))\n self.full_config = self.full_config + load_config\n\n self._to_device()\n\n if eval:\n self.model.eval()\n\n\n # Debug and profile the performance\n def set_debug_mode(self, mode=True):\n \"\"\" Changes representation of model to a more or less detailed.\n By default, model representation reduces the description of the most complex modules.\n \"\"\"\n if self.model is None:\n raise ValueError('Model is not initialized yet. ')\n self.model.apply(lambda module: setattr(module, 'debug', mode))\n\n def show_profile_info(self, per_iter=False, sortby=None, limit=10, parse=False):\n \"\"\" Show stored profiling information with varying levels of details. \"\"\"\n if (self.profile_info is None) or parse:\n self._parse_profilers()\n\n if self.device.type == 'cpu':\n columns = ['ncalls', 'CPU_tottime', 'CPU_cumtime', 'CPU_tottime_avg']\n if sortby is None:\n sortby = ('CPU_tottime', 'sum') if per_iter is False else 'CPU_tottime'\n else:\n columns = ['ncalls', 'CUDA_cumtime', 'CUDA_cumtime_avg']\n if sortby is None:\n sortby = ('CUDA_cumtime', 'sum') if per_iter is False else 'CUDA_cumtime'\n\n if per_iter is False:\n aggs = {key: ['sum', 'mean', 'max'] for key in columns}\n result = (self.profile_info.reset_index().groupby(['name']).agg(aggs)\n .sort_values(sortby, ascending=False)[:limit])\n else:\n result = (self.profile_info.reset_index().set_index(['iter', 'name'])[columns]\n .sort_values(['iter', sortby], ascending=[True, False])\n .groupby(level=0).apply(lambda df: df[:limit]).droplevel(0))\n return result\n\n def _parse_profilers(self):\n us_in_s = 1000.0 * 1000.0\n\n indices, values = [], []\n for i, profiler in enumerate(self.profilers):\n for evt in profiler.function_events.key_averages():\n indices.append((i, evt.key))\n row_dict = {\n 'ncalls': evt.count,\n 'CPU_tottime': evt.self_cpu_time_total / us_in_s,\n 'CPU_cumtime': evt.cpu_time_total / us_in_s,\n 'CUDA_cumtime': evt.cuda_time_total / us_in_s,\n }\n values.append(row_dict)\n multiindex = pd.MultiIndex.from_tuples(indices, names=['iter', 'name'])\n\n self.profile_info = pd.DataFrame(values, index=multiindex,\n columns=['ncalls', 'CPU_tottime', 'CPU_cumtime', 'CUDA_cumtime'])\n self.profile_info['CPU_tottime_avg'] = self.profile_info['CPU_tottime'] / self.profile_info['ncalls']\n self.profile_info['CUDA_cumtime_avg'] = self.profile_info['CUDA_cumtime'] / self.profile_info['ncalls']\n"
]
| [
[
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.variable_scope"
],
[
"numpy.concatenate",
"torch.device",
"torch.nn.functional.sigmoid",
"torch.cuda.amp.autocast",
"torch.nn.functional.softplus",
"numpy.zeros",
"pandas.DataFrame",
"torch.autograd.profiler.profile",
"torch.no_grad",
"pandas.MultiIndex.from_tuples",
"numpy.mean",
"torch.from_numpy",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.load",
"torch.nn.functional.softmax",
"torch.cuda.amp.GradScaler",
"torch.nn.DataParallel"
]
]
|
victor-estrade/datawarehouse | [
"9ae342bf6f9c3622eb841c2ee770519b12cde1c3"
]
| [
"datawarehouse/mnist.py"
]
| [
"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport gzip\n\nimport pandas as pd\nimport numpy as np\n\nfrom .download import maybe_download\nfrom .download import get_data_dir\n\ndef _load_mnist_images(filename):\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: [batch_size, image_width, image_height, channels]\n data = data.reshape(-1, 28, 28, 1)\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\ndef _load_mnist_labels(filename):\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\ndef load_mnist():\n \"\"\"\n TODO : doc\n \"\"\"\n source_url = 'http://yann.lecun.com/exdb/mnist/'\n fname_train_images = 'train-images-idx3-ubyte.gz'\n fname_train_labels = 'train-labels-idx1-ubyte.gz'\n fname_test_images = 't10k-images-idx3-ubyte.gz'\n fname_test_labels = 't10k-labels-idx1-ubyte.gz'\n data_dir = get_data_dir()\n maybe_download(os.path.join(data_dir, fname_train_images), source_url+fname_train_images)\n maybe_download(os.path.join(data_dir, fname_train_labels), source_url+fname_train_labels)\n maybe_download(os.path.join(data_dir, fname_test_images), source_url+fname_test_images)\n maybe_download(os.path.join(data_dir, fname_test_labels), source_url+fname_test_labels)\n\n X_train = _load_mnist_images(os.path.join(data_dir, fname_train_images))\n y_train = _load_mnist_labels(os.path.join(data_dir, fname_train_labels))\n X_test = _load_mnist_images(os.path.join(data_dir, fname_test_images))\n y_test = _load_mnist_labels(os.path.join(data_dir, fname_test_labels))\n X = np.concatenate([X_train, X_test], axis=0)\n y = np.concatenate([y_train, y_test], axis=0)\n\n return X, y\n"
]
| [
[
"numpy.concatenate",
"numpy.float32"
]
]
|
deadphilosopher/Artificial-Intelligence-By-Example | [
"47bed1a88db2c9577c492f950069f58353375cfe",
"47bed1a88db2c9577c492f950069f58353375cfe"
]
| [
"Chapter16/math.py",
"Chapter07/k-means_clustering_minibatch.py"
]
| [
"import numpy as np\nimport statistics\n\ndata1 = [1, 2, 3, 4]\nM1=statistics.mean(data1)\nprint(\"Mean data1\",M1)\n\n\n\ndata2 = [1, 2, 3, 5]\nM2=statistics.mean(data2)\nprint(\"Mean data2\",M2)\n\n#var = mean(abs(x - x.mean())**2).\nprint(\"Variance 1\", np.var(data1))\nprint(\"Variance 2\", np.var(data2))\n\n\nx=np.array([[1, 2, 3, 4],\n [1, 2, 3, 5]])\n\na=np.cov(x)\nprint(a)\n\nfrom numpy import linalg as LA\nw, v = LA.eigh(a)\nprint(\"eigenvalue(s)\",w)\nprint(\"eigenvector(s)\",v)\n\n\n\n",
"#K-means clustering with Mini-Batch\n#Build with Sklearn\n#Copyright 2018 Denis Rothman MIT License. See LICENSE.\nfrom sklearn.cluster import KMeans \nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom random import randint\nimport numpy as np\n\n#I.The training Dataset \ndataset = pd.read_csv('data.csv')\nprint (dataset.head())\nprint(dataset)\n'''Output of print(dataset)\n Distance location\n0 80 53\n1 18 8\n2 55 38\n...\n'''\n\nn=1000\ndataset1=np.zeros(shape=(n,2))\n\nli=0\nfor i in range (n):\n j=randint(0,4999)\n dataset1[li][0]=dataset.iloc[j,0]\n dataset1[li][1]=dataset.iloc[j,1]\n li+=1\n \n\n#II.Hyperparameters\n# Features = 2 :implict through the shape of the dataset (2 columns)\nk = 6\nkmeans = KMeans(n_clusters=k)\n\n#III.K-means clustering algorithm\nkmeans = kmeans.fit(dataset1) #Computing k-means clustering\ngcenters = kmeans.cluster_centers_ # the geometric centers or centroids\nprint(\"The geometric centers or centroids:\")\nprint(gcenters)\n\n'''Ouput of centroid coordinates\n\nThe geometric centers or centroids:\n\nMonte Carlo philosophy:\n\nMC[[ 19.7877095 16.40782123]\n [ 21.38965517 15.04597701]]\n\nMC [ 99.87603306 81.1322314 ]\n [ 96.06151645 84.57939914]\n\nMC[ 31.29139073 72.64900662]]\n [ 32.12590799 54.84866828]\n\nMC [ 61.54891304 49.875 ]\n [ 68.84578885 55.63226572]\n\n\nMC [ 63.86206897 84.20689655]\n [ 45.24736842 23.65263158]\n\n\nComplete dataset:\n\n[[ 48.7986755 85.76688742]\n [ 48.44532803 24.4333996 ]\n'''\n\n\n#IV.Defining the Result labels \nlabels = kmeans.labels_\ncolors = ['blue','red','green','black','yellow','brown','orange']\n\n\n#V.Displaying the results : datapoints and clusters\ny = 0\nfor x in labels:\n plt.scatter(dataset1[y,0], dataset1[y,1],color=colors[x])\n y+=1 \nfor x in range(k):\n lines = plt.plot(gcenters[x,0],gcenters[x,1],'kx') \n\ntitle = ('No of clusters (k) = {}').format(k)\nplt.title(title)\nplt.xlabel('Distance')\nplt.ylabel('Location')\nplt.show()\n\n#VI.Test dataset and prediction\nx_test = [[40.0,67],[20.0,61],[90.0,90],\n [50.0,54],[20.0,80],[90.0,60]]\nprediction = kmeans.predict(x_test)\nprint(\"The predictions:\")\nprint (prediction)\n'''\nOutput of the cluster number of each example\n[3 3 2 3 3 4]\n'''\n\n"
]
| [
[
"numpy.linalg.eigh",
"numpy.array",
"numpy.cov",
"numpy.var"
],
[
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
jhnnsrs/arbeider | [
"4c5637913331c998a262ae0deca516b236845200"
]
| [
"elements/models.py"
]
| [
"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.core import serializers\nfrom django.db import models\n# Create your models here.\nfrom pandas import HDFStore\n\nfrom elements.managers import (DelayedRepresentationManager,\n DelayedTransformationManager, PandasManager,\n RepresentationManager, ROIManager,\n TransformationManager)\nfrom larvik.logging import get_module_logger\nfrom larvik.models import LarvikArray\n\nlogger = get_module_logger(__name__)\n\ndef get_sentinel_user():\n return get_user_model().objects.get_or_create(username='deleted')[0]\n\n\nclass Antibody(models.Model):\n name = models.CharField(max_length=100)\n creator = models.ForeignKey(User, blank=True, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"{0}\".format(self.name)\n\n\nclass Experiment(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=1000)\n description_long = models.TextField(null=True,blank=True)\n linked_paper = models.URLField(null=True,blank=True)\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n image = models.ImageField(upload_to='experiment_banner',null=True,blank=True)\n\n def __str__(self):\n return \"Experiment {0} by {1}\".format(self.name,self.creator.username)\n\nclass ExperimentalGroup(models.Model):\n name = models.CharField(max_length=200, help_text=\"The experimental groups name\")\n description = models.CharField(max_length=1000, help_text=\"A brief summary of applied techniques in this group\")\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, help_text=\"The experiment this Group belongs too\")\n iscontrol = models.BooleanField(help_text=\"Is this Experimental Group a ControlGroup?\")\n\n\n def __str__(self):\n return \"ExperimentalGroup {0} on Experiment {1}\".format(self.name,self.experiment.name)\n\nclass FileMatchString(models.Model):\n name = models.CharField(max_length=500)\n regexp = models.CharField(max_length=4000)\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"FileMatchString {0} created by {1}\".format(self.name,self.creator.name)\n\n\nclass Animal(models.Model):\n name = models.CharField(max_length=100)\n age = models.CharField(max_length=400)\n type = models.CharField(max_length=500)\n creator = models.ForeignKey(User, blank=True, on_delete=models.CASCADE)\n experiment = models.ForeignKey(Experiment, blank=True, on_delete=models.CASCADE, null=True)\n experimentalgroup = models.ForeignKey(ExperimentalGroup, blank=True, on_delete=models.CASCADE, null=True)\n\n def __str__(self):\n return \"{0}\".format(self.name)\n\n\nclass Sample(models.Model):\n creator = models.ForeignKey(User, on_delete=models.SET(get_sentinel_user))\n name = models.CharField(max_length=1000)\n experiment = models.ForeignKey(Experiment, on_delete=models.SET_NULL, blank=True, null=True)\n nodeid = models.CharField(max_length=400, null=True, blank=True)\n experimentalgroup = models.ForeignKey(ExperimentalGroup, on_delete=models.SET_NULL, blank=True, null=True)\n animal = models.ForeignKey(Animal, on_delete=models.SET_NULL, blank=True, null=True)\n\n\n def __str__(self):\n return \"{0} by User: {1}\".format(self.name,self.creator.username)\n\n\n def delete(self, *args, **kwargs):\n logger.info(\"Trying to remove Sample H5File\")\n super(Sample, self).delete(*args, **kwargs)\n\n\n def _repr_html_(self):\n from django.core import serializers\n from django.forms.models import model_to_dict\n import pandas as pd\n\n return pd.DataFrame.from_records([model_to_dict(self)])._repr_html_()\n\n\n\n\nclass Pandas(models.Model):\n filepath = models.FilePathField(max_length=400) # aka pandas/$answerid.h5\n vid = models.CharField(max_length=1000) # aca vid0, vid1, vid2, vid3\n type = models.CharField(max_length=100)\n compression = models.CharField(max_length=300, blank=True, null=True)\n # Custom Manager to simply create an array\n objects = PandasManager()\n\n def get_dataframe(self):\n logger.info(\"Trying to access file {0} to get dataframe\".format(self.filepath))\n with HDFStore(self.filepath) as store:\n path = self.type + \"/\" + self.vid\n dataframe = store.get(path)\n return dataframe\n\n def set_dataframe(self,dataframe):\n logger.info(\"Trying to access file {0} to set dataframe\".format(self.filepath))\n with HDFStore(self.filepath) as store:\n path = self.type + \"/\" + self.vid\n store.put(path, dataframe)\n\n def delete(self, *args, **kwargs):\n logger.info(\"Trying to remove Dataframe from Filepath {0}\".format(self.filepath))\n with HDFStore(self.filepath) as store:\n path = self.type + \"/\" + self.vid\n if path in store:\n store.delete(path)\n logger.info(\"Deleted Dataframe with VID {1} from file {0}\".format(self.filepath, self.vid))\n\n super(Pandas, self).delete(*args, **kwargs)\n\n def __str__(self):\n return \"Pandas with VID \" + str(self.vid) + \" at \" + str(self.filepath)\n\n\nclass Channel(object):\n pass\n\n\nclass Slice(object):\n pass\n\nclass Impuls(object):\n pass\n\n\nclass ChannelMap(object):\n pass\n\n\nclass Representation(LarvikArray):\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n inputrep = models.ForeignKey('self', on_delete=models.SET_NULL, blank=True, null= True)\n sample = models.ForeignKey(Sample, on_delete=models.CASCADE,related_name='representations')\n type = models.CharField(max_length=400, blank=True, null=True)\n chain = models.CharField(max_length=9000, blank=True, null=True)\n nodeid = models.CharField(max_length=400, null=True, blank=True)\n meta = models.CharField(max_length=6000, null=True, blank=True) #deprecated\n\n objects = RepresentationManager()\n delayed = DelayedRepresentationManager()\n\n class Meta:\n base_manager_name = \"objects\"\n default_manager_name = \"objects\"\n\n def __str__(self):\n return f'Representation of {self.name}'\n\n def _repr_html_(self):\n return f\"<h3>{self.name}</h3><ul><li>Sample Name: {self.sample.name}</li></ul>\"\n\n\nclass ROI(models.Model):\n nodeid = models.CharField(max_length=400, null=True, blank=True)\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n vectors = models.CharField(max_length=3000, help_text= \"A json dump of the ROI Vectors (specific for each type)\")\n color = models.CharField(max_length=100, blank=True, null=True)\n signature = models.CharField(max_length=300,null=True, blank=True)\n created_at = models.DateTimeField(auto_now=True)\n representation = models.ForeignKey(Representation, on_delete=models.CASCADE,blank=True, null=True, related_name=\"rois\")\n experimentalgroup = models.ForeignKey(ExperimentalGroup, on_delete=models.SET_NULL, blank=True, null=True)\n\n objects = ROIManager()\n\n class Meta:\n base_manager_name = \"objects\"\n default_manager_name = \"objects\"\n\n\n def __str__(self):\n return f\"ROI created by {self.creator.username} on {self.representation.name}\"\n\nclass Transformation(LarvikArray):\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n nodeid = models.CharField(max_length=400, null=True, blank=True)\n roi = models.ForeignKey(ROI, on_delete=models.CASCADE, related_name='transformations')\n representation = models.ForeignKey(Representation, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"transformations\")\n inputtransformation = models.ForeignKey('self', on_delete=models.SET_NULL, blank=True, null= True)\n\n objects = TransformationManager()\n delayed = DelayedTransformationManager()\n\n class Meta:\n base_manager_name = \"objects\"\n default_manager_name = \"objects\"\n\n def __str__(self):\n return self.name\n"
]
| [
[
"pandas.HDFStore"
]
]
|
wang9702/bert_pytorch | [
"7b3d5c3287dbf4e98bb9f8b55ab6646ee2c6bb35"
]
| [
"test_bert.py"
]
| [
"import argparse\n\nfrom torch.utils.data import DataLoader\n\nfrom bert_pytorch.model import BERT\nfrom bert_pytorch.trainer import BERTTrainer\nfrom bert_pytorch.dataset import BERTDataset, WordVocab\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-c\", \"--train_dataset\", required=True,\n type=str, help=\"train dataset for train bert\")\n parser.add_argument(\"-t\", \"--test_dataset\", type=str,\n default=None, help=\"test set for evaluate train set\")\n parser.add_argument(\"-v\", \"--vocab_path\", required=True,\n type=str, help=\"built vocab model path with bert-vocab\")\n parser.add_argument(\"-o\", \"--output_path\", required=True,\n type=str, help=\"ex)output/bert.model\")\n\n parser.add_argument(\"-hs\", \"--hidden\", type=int,\n default=256, help=\"hidden size of transformer model\")\n parser.add_argument(\"-l\", \"--layers\", type=int,\n default=8, help=\"number of layers\")\n parser.add_argument(\"-a\", \"--attn_heads\", type=int,\n default=8, help=\"number of attention heads\")\n parser.add_argument(\"-s\", \"--seq_len\", type=int,\n default=20, help=\"maximum sequence len\")\n\n parser.add_argument(\"-b\", \"--batch_size\", type=int,\n default=64, help=\"number of batch_size\")\n parser.add_argument(\"-e\", \"--epochs\", type=int,\n default=10, help=\"number of epochs\")\n parser.add_argument(\"-w\", \"--num_workers\", type=int,\n default=5, help=\"dataloader worker size\")\n\n parser.add_argument(\"--with_cuda\", type=bool, default=True,\n help=\"training with CUDA: true, or false\")\n parser.add_argument(\"--log_freq\", type=int, default=10,\n help=\"printing loss every n iter: setting n\")\n parser.add_argument(\"--corpus_lines\", type=int,\n default=None, help=\"total number of lines in corpus\")\n parser.add_argument(\"--cuda_devices\", type=int, nargs='+',\n default=None, help=\"CUDA device ids\")\n parser.add_argument(\"--on_memory\", type=bool, default=True,\n help=\"Loading on memory: true or false\")\n\n parser.add_argument(\"--lr\", type=float, default=1e-3,\n help=\"learning rate of adam\")\n parser.add_argument(\"--adam_weight_decay\", type=float,\n default=0.01, help=\"weight_decay of adam\")\n parser.add_argument(\"--adam_beta1\", type=float,\n default=0.9, help=\"adam first beta value\")\n parser.add_argument(\"--adam_beta2\", type=float,\n default=0.999, help=\"adam first beta value\")\n\n args = parser.parse_args()\n\n print(\"Loading Vocab\", args.vocab_path)\n vocab = WordVocab.load_vocab(args.vocab_path)\n print(\"Vocab Size: \", len(vocab))\n\n print(\"Loading Train Dataset\", args.train_dataset)\n train_dataset = BERTDataset(args.train_dataset, vocab, seq_len=args.seq_len,\n corpus_lines=args.corpus_lines, on_memory=args.on_memory)\n\n print(\"Loading Test Dataset\", args.test_dataset)\n test_dataset = BERTDataset(args.test_dataset, vocab, seq_len=args.seq_len, on_memory=args.on_memory) \\\n if args.test_dataset is not None else None\n\n print(\"Creating Dataloader\")\n train_data_loader = DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)\n test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) \\\n if test_dataset is not None else None\n\n print(\"Building BERT model\")\n bert = BERT(len(vocab), hidden=args.hidden,\n n_layers=args.layers, attn_heads=args.attn_heads)\n\n print(\"Creating BERT Trainer\")\n trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,\n lr=args.lr, betas=(\n args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay,\n with_cuda=args.with_cuda, cuda_devices=args.cuda_devices, log_freq=args.log_freq)\n\n print(\"Training Start\")\n for epoch in range(args.epochs):\n trainer.train(epoch)\n trainer.save(epoch, args.output_path)\n\n if test_data_loader is not None:\n trainer.test(epoch)\n"
]
| [
[
"torch.utils.data.DataLoader"
]
]
|
pryo/openNMT | [
"204609435639603022b1068bf915144e36b11f76"
]
| [
"tools/vid_feature_extractor.py"
]
| [
"import argparse\nimport os\n\nimport tqdm\nfrom multiprocessing import Manager\nimport numpy as np\nimport cv2\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nimport pretrainedmodels\nfrom pretrainedmodels.utils import TransformImage\n\n\nQ_FIN = \"finished\" # end-of-queue flag\n\n\ndef read_to_imgs(file):\n \"\"\"Yield images and their frame number from a video file.\"\"\"\n vidcap = cv2.VideoCapture(file)\n success, image = vidcap.read()\n idx = 0\n while success:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n yield image, idx\n idx += 1\n success, image = vidcap.read()\n\n\ndef vid_len(path):\n \"\"\"Return the length of a video.\"\"\"\n return int(cv2.VideoCapture(path).get(cv2.CAP_PROP_FRAME_COUNT))\n\n\nclass VidDset(object):\n \"\"\"For each video, yield its frames.\"\"\"\n def __init__(self, model, root_dir, filenames):\n self.root_dir = root_dir\n self.filenames = filenames\n self.paths = [os.path.join(self.root_dir, f) for f in self.filenames]\n self.xform = TransformImage(model)\n\n self.current = 0\n\n def __len__(self):\n return len(self.filenames)\n\n def __getitem__(self, i):\n path = self.paths[i]\n return ((path, idx, self.xform(Image.fromarray(img)))\n for img, idx in read_to_imgs(path))\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.current >= len(self):\n raise StopIteration\n else:\n self.current += 1\n return self[self.current - 1]\n\n def __next__(self):\n return self.next()\n\n\ndef collate_tensor(batch):\n batch[-1] = torch.stack(batch[-1], 0)\n\n\ndef batch(dset, batch_size):\n \"\"\"Collate frames into batches of equal length.\"\"\"\n batch = [[], [], []]\n batch_ct = 0\n for seq in dset:\n for path, idx, img in seq:\n if batch_ct == batch_size:\n collate_tensor(batch)\n yield batch\n batch = [[], [], []]\n batch_ct = 0\n batch[0].append(path)\n batch[1].append(idx)\n batch[2].append(img)\n batch_ct += 1\n if batch_ct != 0:\n collate_tensor(batch)\n yield batch\n\n\nclass FeatureExtractor(nn.Module):\n \"\"\"Extract feature vectors from a batch of frames.\"\"\"\n def __init__(self):\n super(FeatureExtractor, self).__init__()\n self.model = pretrainedmodels.resnet152()\n self.FEAT_SIZE = 2048\n\n def forward(self, x):\n return self.model.avgpool(\n self.model.features(x)).view(-1, 1, self.FEAT_SIZE)\n\n\nclass Reconstructor(object):\n \"\"\"Turn batches of feature vectors into sequences for each video.\n Assumes data is ordered (use one reconstructor per process).\n :func:`push()` batches in. When finished, :func:`flush()`\n the last sequence.\n \"\"\"\n\n def __init__(self, out_path, finished_queue):\n self.out_path = out_path\n self.feats = None\n self.finished_queue = finished_queue\n\n def save(self, path, feats):\n np.save(path, feats.numpy())\n\n @staticmethod\n def name_(path, out_path):\n vid_path = path\n vid_fname = os.path.basename(vid_path)\n vid_id = os.path.splitext(vid_fname)[0]\n\n save_fname = vid_id + \".npy\"\n save_path = os.path.join(out_path, save_fname)\n return save_path, vid_id\n\n def name(self, path):\n return self.name_(path, self.out_path)\n\n def push(self, paths, idxs, feats):\n start = 0\n for i, idx in enumerate(idxs):\n if idx == 0:\n if self.feats is None and i == 0:\n # degenerate case\n continue\n these_finished_seq_feats = feats[start:i]\n if self.feats is not None:\n all_last_seq_feats = torch.cat(\n [self.feats, these_finished_seq_feats], 0)\n else:\n all_last_seq_feats = these_finished_seq_feats\n if i - 1 < 0:\n name = self.path\n else:\n name = paths[i-1]\n save_path, vid_id = self.name(name)\n self.save(save_path, all_last_seq_feats)\n n_feats = all_last_seq_feats.shape[0]\n self.finished_queue.put((vid_id, n_feats))\n self.feats = None\n start = i\n # cache the features\n if self.feats is None:\n self.feats = feats[start:]\n else:\n self.feats = torch.cat([self.feats, feats[start:]], 0)\n self.path = paths[-1]\n\n def flush(self):\n if self.feats is not None: # shouldn't be\n save_path, vid_id = self.name(self.path)\n self.save(save_path, self.feats)\n self.finished_queue.put((vid_id, self.feats.shape[0]))\n\n\ndef finished_watcher(finished_queue, world_size, root_dir, files):\n \"\"\"Keep a progress bar of frames finished.\"\"\"\n n_frames = sum(vid_len(os.path.join(root_dir, f)) for f in files)\n n_finished_frames = 0\n with tqdm.tqdm(total=n_frames, unit=\"Fr\") as pbar:\n n_proc_finished = 0\n while True:\n item = finished_queue.get()\n if item == Q_FIN:\n n_proc_finished += 1\n if n_proc_finished == world_size:\n return\n else:\n vid_id, n_these_frames = item\n n_finished_frames += n_these_frames\n pbar.set_postfix(vid=vid_id)\n pbar.update(n_these_frames)\n\n\ndef run(device_id, world_size, root_dir, batch_size_per_device,\n feats_queue, files):\n \"\"\"Process a disjoint subset of the videos on each device.\"\"\"\n if world_size > 1:\n these_files = [f for i, f in enumerate(files)\n if i % world_size == device_id]\n else:\n these_files = files\n\n fe = FeatureExtractor()\n dset = VidDset(fe.model, root_dir, these_files)\n dev = torch.device(\"cuda\", device_id) \\\n if device_id >= 0 else torch.device(\"cpu\")\n fe.to(dev)\n fe = fe.eval()\n with torch.no_grad():\n for samp in batch(dset, batch_size_per_device):\n paths, idxs, images = samp\n images = images.to(dev)\n feats = fe(images)\n if torch.is_tensor(feats):\n feats = feats.to(\"cpu\")\n else:\n feats = [f.to(\"cpu\") for f in feats]\n feats_queue.put((paths, idxs, feats))\n feats_queue.put(Q_FIN)\n return\n\n\ndef saver(out_path, feats_queue, finished_queue):\n rc = Reconstructor(out_path, finished_queue)\n while True:\n item = feats_queue.get()\n if item == Q_FIN:\n rc.flush()\n finished_queue.put(Q_FIN)\n return\n else:\n paths, idxs, feats = item\n rc.push(paths, idxs, feats)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--root_dir\", type=str, required=True,\n help=\"Directory of videos.\")\n parser.add_argument(\"--out_dir\", type=str, required=True,\n help=\"Directory for output features.\")\n parser.add_argument(\"--world_size\", type=int, default=1,\n help=\"Number of devices to run on.\")\n parser.add_argument(\"--batch_size_per_device\", type=int, default=512)\n opt = parser.parse_args()\n\n batch_size_per_device = opt.batch_size_per_device\n root_dir = opt.root_dir\n out_path = opt.out_dir\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n\n # mp queues don't work well between procs unless they're from a manager\n manager = Manager()\n finished_queue = manager.Queue()\n\n world_size = opt.world_size if torch.cuda.is_available() else -1\n\n mp = torch.multiprocessing.get_context(\"spawn\")\n procs = []\n\n print(\"Starting processing. Progress bar startup can take some time, but \"\n \"processing will start in the meantime.\")\n\n files = list(sorted(list(os.listdir(root_dir))))\n files = [f for f in files\n if os.path.basename(Reconstructor.name_(f, out_path)[0])\n not in os.listdir(out_path)]\n\n procs.append(mp.Process(\n target=finished_watcher,\n args=(finished_queue, world_size, root_dir, files),\n daemon=False\n ))\n procs[0].start()\n\n if world_size >= 1:\n feat_queues = [manager.Queue(2) for _ in range(world_size)]\n for feats_queue, device_id in zip(feat_queues, range(world_size)):\n # each device has its own saver so that reconstructing is easier\n procs.append(mp.Process(\n target=run,\n args=(device_id, world_size, root_dir,\n batch_size_per_device, feats_queue, files),\n daemon=True))\n procs[-1].start()\n procs.append(mp.Process(\n target=saver,\n args=(out_path, feats_queue, finished_queue),\n daemon=True))\n procs[-1].start()\n else:\n feats_queue = manager.Queue()\n procs.append(mp.Process(\n target=run,\n args=(-1, 1, root_dir,\n batch_size_per_device, feats_queue, files),\n daemon=True))\n procs[-1].start()\n procs.append(mp.Process(\n target=saver,\n args=(out_path, feats_queue, finished_queue),\n daemon=True))\n procs[-1].start()\n\n for p in procs:\n p.join()\n"
]
| [
[
"torch.device",
"torch.cat",
"torch.stack",
"torch.is_tensor",
"torch.no_grad",
"torch.multiprocessing.get_context",
"torch.cuda.is_available"
]
]
|
neolixcn/nutonomy_pointpillars | [
"03f46f6de97c0c97d7bc98d7af3daee215d81a30",
"03f46f6de97c0c97d7bc98d7af3daee215d81a30"
]
| [
"second/kittiviewer/viewer.py",
"second/pytorch/train_pruned_model.py"
]
| [
"import io as sysio\nimport json\nimport os\nimport pickle\nimport sys\nimport time\nfrom functools import partial\nfrom pathlib import Path\nimport datetime\nimport fire\nimport matplotlib.pyplot as plt\nimport numba\nimport numpy as np\nimport OpenGL.GL as pygl\nimport pyqtgraph.opengl as gl\nimport skimage\nfrom matplotlib.backends.backend_qt5agg import \\\n FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import \\\n NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtCore import QTimer, pyqtSignal, pyqtSlot\nfrom PyQt5.QtGui import QIcon, QMouseEvent, QPainter\nfrom PyQt5.QtWidgets import (\n QApplication, QCheckBox, QComboBox, QDialog, QFormLayout, QGroupBox,\n QHBoxLayout, QLabel, QLineEdit, QMainWindow, QPlainTextEdit, QTextEdit,\n QPushButton, QSizePolicy, QVBoxLayout, QWidget, QProgressBar)\nfrom shapely.geometry import Polygon\nfrom skimage import io\n\nimport second.core.box_np_ops as box_np_ops\nimport second.core.preprocess as prep\nimport second.kittiviewer.control_panel as panel\nfrom second.core.anchor_generator import AnchorGeneratorStride\nfrom second.core.box_coders import GroundBox3dCoder\nfrom second.core.point_cloud.point_cloud_ops import points_to_voxel\nfrom second.core.region_similarity import (\n DistanceSimilarity, NearestIouSimilarity, RotateIouSimilarity)\nfrom second.core.sample_ops import DataBaseSamplerV2\nfrom second.core.target_assigner import TargetAssigner\nfrom second.data import kitti_common as kitti\nfrom second.kittiviewer.glwidget import KittiGLViewWidget\nfrom second.protos import pipeline_pb2\nfrom second.utils import bbox_plot\nfrom second.utils.bbox_plot import GLColor\nfrom second.utils.eval import get_coco_eval_result, get_official_eval_result\nfrom second.pytorch.inference import TorchInferenceContext\nfrom second.utils.progress_bar import list_bar\n\"\"\"\nfrom wavedata.tools.obj_detection import obj_utils\nfrom avod.core.anchor_generators import grid_anchor_3d_generator\n\"\"\"\n\n\nclass KittiDrawControl(panel.ControlPanel):\n def __init__(self, title, parent=None):\n super().__init__(column_nums=[2, 1, 1, 2], tab_num=4, parent=parent)\n self.setWindowTitle(title)\n with self.tab(0, \"common\"):\n with self.column(0):\n self.add_listedit(\"UsedClass\", str)\n self.add_fspinbox(\"PointSize\", 0.01, 0.5, 0.01, 0.05)\n self.add_fspinbox(\"PointAlpha\", 0.0, 1.0, 0.05, 0.5)\n self.add_colorbutton(\"PointColor\",\n bbox_plot.gl_color(GLColor.Gray))\n self.add_fspinbox(\"GTPointSize\", 0.01, 0.5, 0.01, 0.2)\n self.add_fspinbox(\"GTPointAlpha\", 0.0, 1.0, 0.05, 0.5)\n self.add_colorbutton(\"GTPointColor\",\n bbox_plot.gl_color(GLColor.Purple))\n self.add_checkbox(\"WithReflectivity\")\n self.add_checkbox(\"DrawGTBoxes\")\n self.add_checkbox(\"DrawGTLabels\")\n self.add_colorbutton(\"GTBoxColor\",\n bbox_plot.gl_color(GLColor.Green))\n self.add_fspinbox(\"GTBoxAlpha\", 0.0, 1.0, 0.05, 0.5)\n self.add_checkbox(\"DrawDTBoxes\")\n \n self.add_checkbox(\"DrawDTLabels\")\n self.add_checkbox(\"DTScoreAsAlpha\")\n self.add_fspinbox(\"DTScoreThreshold\", 0.0, 1.0, 0.01, 0.3)\n self.add_colorbutton(\"DTBoxColor\",\n bbox_plot.gl_color(GLColor.Blue))\n self.add_fspinbox(\"DTBoxAlpha\", 0.0, 1.0, 0.05, 0.5)\n self.add_fspinbox(\"DTBoxLineWidth\", 0.25, 10.0, 0.25, 1.0)\n with self.column(1):\n self.add_arrayedit(\"CoorsRange\", np.float64,\n [-40, -40, -2, 40, 40, 4], [6])\n self.add_arrayedit(\"VoxelSize\", np.float64, [0.2, 0.2, 0.4],\n [3])\n self.add_checkbox(\"DrawVoxels\")\n self.add_colorbutton(\"PosVoxelColor\",\n bbox_plot.gl_color(GLColor.Yellow))\n self.add_fspinbox(\"PosVoxelAlpha\", 0.0, 1.0, 0.05, 0.5)\n self.add_colorbutton(\"NegVoxelColor\",\n bbox_plot.gl_color(GLColor.Purple))\n self.add_fspinbox(\"NegVoxelAlpha\", 0.0, 1.0, 0.05, 0.5)\n self.add_checkbox(\"DrawPositiveVoxelsOnly\")\n self.add_checkbox(\"RemoveOutsidePoint\")\n with self.tab(1, \"inference\"):\n with self.column(0):\n self.add_checkbox(\"TensorflowInference\")\n with self.tab(2, \"anchors\"):\n with self.column(0):\n self.add_checkbox(\"DrawAnchors\")\n self.add_arrayedit(\"AnchorSize\", np.float64, [1.6, 3.9, 1.56],\n [3])\n self.add_arrayedit(\"AnchorOffset\", np.float64,\n [0, -39.8, -1.0], [3])\n self.add_arrayedit(\"AnchorStride\", np.float64, [0.4, 0.4, 0.0],\n [3])\n self.add_fspinbox(\"MatchThreshold\", 0.0, 1.0, 0.1)\n self.add_fspinbox(\"UnMatchThreshold\", 0.0, 1.0, 0.1)\n self.add_combobox(\"IoUMethod\", [\"RotateIoU\", \"NearestIoU\"])\n with self.tab(3, \"sample and augmentation\"):\n with self.column(0):\n self.add_checkbox(\"EnableSample\")\n self.add_jsonedit(\"SampleGroups\")\n self.add_arrayedit(\"SampleGlobleRotRange\", np.float64, [0.78, 2.35],\n [2])\n with self.column(1):\n self.add_checkbox(\"EnableAugmentation\")\n self.add_checkbox(\"GroupNoisePerObject\")\n\n\nclass Settings:\n def __init__(self, cfg_path):\n self._cfg_path = cfg_path\n self._settings = {}\n self._setting_defaultvalue = {}\n if not Path(self._cfg_path).exists():\n with open(self._cfg_path, 'w') as f:\n f.write(json.dumps(self._settings, indent=2, sort_keys=True))\n else:\n with open(self._cfg_path, 'r') as f:\n self._settings = json.loads(f.read())\n\n def set(self, name, value):\n self._settings[name] = value\n with open(self._cfg_path, 'w') as f:\n f.write(json.dumps(self._settings, indent=2, sort_keys=True))\n\n def get(self, name, default_value=None):\n if name in self._settings:\n return self._settings[name]\n if default_value is None:\n raise ValueError(\"name not exist\")\n return default_value\n\n def save(self, path):\n with open(path, 'w') as f:\n f.write(json.dumps(self._settings, indent=2, sort_keys=True))\n\n def load(self, path):\n with open(self._cfg_path, 'r') as f:\n self._settings = json.loads(f.read())\n\n\ndef _riou3d_shapely(rbboxes1, rbboxes2):\n N, K = rbboxes1.shape[0], rbboxes2.shape[0]\n corners1 = box_np_ops.center_to_corner_box2d(\n rbboxes1[:, :2], rbboxes1[:, 3:5], rbboxes1[:, 6])\n corners2 = box_np_ops.center_to_corner_box2d(\n rbboxes2[:, :2], rbboxes2[:, 3:5], rbboxes2[:, 6])\n iou = np.zeros([N, K], dtype=np.float32)\n for i in range(N):\n for j in range(K):\n iw = (min(rbboxes1[i, 2] + rbboxes1[i, 5],\n rbboxes2[j, 2] + rbboxes2[j, 5]) - max(\n rbboxes1[i, 2], rbboxes2[j, 2]))\n if iw > 0:\n p1 = Polygon(corners1[i])\n p2 = Polygon(corners2[j])\n inc = p1.intersection(p2).area * iw\n # inc = p1.intersection(p2).area\n if inc > 0:\n iou[i, j] = inc / (p1.area * rbboxes1[i, 5] +\n p2.area * rbboxes2[j, 5] - inc)\n # iou[i, j] = inc / (p1.area + p2.area - inc)\n\n return iou\n\n\ndef kitti_anno_to_corners(info, annos=None):\n rect = info['calib/R0_rect']\n P2 = info['calib/P2']\n Tr_velo_to_cam = info['calib/Tr_velo_to_cam']\n if annos is None:\n annos = info['annos']\n dims = annos['dimensions']\n loc = annos['location']\n rots = annos['rotation_y']\n scores = None\n if 'score' in annos:\n scores = annos['score']\n boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)\n boxes_lidar = box_np_ops.box_camera_to_lidar(boxes_camera, rect,\n Tr_velo_to_cam)\n boxes_corners = box_np_ops.center_to_corner_box3d(\n boxes_lidar[:, :3],\n boxes_lidar[:, 3:6],\n boxes_lidar[:, 6],\n origin=[0.5, 0.5, 0],\n axis=2)\n return boxes_corners, scores, boxes_lidar\n\n\nclass MatPlotLibView(FigureCanvas):\n def __init__(self, parent=None, rect=[5, 4], dpi=100):\n # super().__init__()\n self.fig = Figure(figsize=(rect[0], rect[1]), dpi=dpi)\n self.ax = self.fig.add_subplot(1, 1, 1)\n # self.ax.axis('off')\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n #self.axes.set_ylim([-1,1])\n #self.axes.set_xlim([0,31.4159*2])\n FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n self.draw()\n\n def reset_plot(self):\n self.fig.clf()\n self.ax = self.fig.add_subplot(1, 1, 1)\n\n\nclass MatPlotLibViewTab(QWidget):\n def __init__(self, num_rect=[5, 4], dpi=100, parent=None):\n # super().__init__()\n self.fig = Figure(figsize=(rect[0], rect[1]), dpi=dpi)\n self.ax = self.fig.add_subplot(1, 1, 1)\n # self.ax.axis('off')\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n #self.axes.set_ylim([-1,1])\n #self.axes.set_xlim([0,31.4159*2])\n FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n self.draw()\n\n def reset_plot(self):\n self.fig.clf()\n self.ax = self.fig.add_subplot(1, 1, 1)\n\n\nclass MatPlotLibWidget(QWidget):\n def __init__(self, parent=None, rect=[5, 4], dpi=100):\n self.w_plot = MatPlotLibView(self, rect, dpi)\n self.w_plt_toolbar = NavigationToolbar(self.w_plot, self)\n plt_layout = QVBoxLayout()\n plt_layout.addWidget(self.w_plot)\n plt_layout.addWidget(self.w_plt_toolbar)\n\n def reset_plot(self):\n return self.w_plot.reset_plot()\n\n @property\n def axis(self):\n return self.w_plot.ax\n\n\nclass KittiPointCloudView(KittiGLViewWidget):\n def __init__(self,\n config,\n parent=None,\n voxel_size=None,\n coors_range=None,\n max_voxels=50000,\n max_num_points=35):\n super().__init__(parent=parent)\n if voxel_size is None:\n voxel_size = [0.2, 0.2, 0.4]\n if coors_range is None:\n coors_range = [0, -40, -3, 70.4, 40, 1]\n self.w_config = config\n self._voxel_size = voxel_size\n self._coors_range = coors_range\n self._max_voxels = max_voxels\n self._max_num_points = max_num_points\n bk_color = (0.8, 0.8, 0.8, 1.0)\n bk_color = list([int(v * 255) for v in bk_color])\n # self.setBackgroundColor(*bk_color)\n # self.w_gl_widget.setBackgroundColor('w')\n self.mousePressed.connect(self.on_mousePressed)\n self.setCameraPosition(distance=20, azimuth=-180, elevation=30)\n\n def on_mousePressed(self, pos):\n pass\n\n def reset_camera(self):\n self.set_camera_position(\n center=(5, 0, 0), distance=20, azimuth=-180, elevation=30)\n self.update()\n\n def draw_frustum(self, bboxes, rect, Trv2c, P2):\n # Y = C(R @ (rect @ Trv2c @ X) + T)\n # uv = [Y0/Y2, Y1/Y2]\n frustums = []\n C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)\n frustums = box_np_ops.get_frustum_v2(bboxes, C)\n frustums -= T\n # frustums = np.linalg.inv(R) @ frustums.T\n frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)\n frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)\n self.boxes3d('frustums', frustums, colors=GLColor.Write, alpha=0.5)\n\n def draw_cropped_frustum(self, bboxes, rect, Trv2c, P2):\n # Y = C(R @ (rect @ Trv2c @ X) + T)\n # uv = [Y0/Y2, Y1/Y2]\n self.boxes3d(\n 'cropped_frustums',\n prep.random_crop_frustum(bboxes, rect, Trv2c, P2),\n colors=GLColor.Write,\n alpha=0.5)\n\n def draw_anchors(self,\n gt_boxes_lidar,\n points=None,\n image_idx=0,\n gt_names=None):\n # print(gt_names)\n voxel_size = np.array(self._voxel_size, dtype=np.float32)\n # voxel_size = np.array([0.2, 0.2, 0.4], dtype=np.float32)\n coors_range = np.array(self._coors_range, dtype=np.float32)\n # coors_range = np.array([0, -40, -3, 70.4, 40, 1], dtype=np.float32)\n grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size\n grid_size = np.round(grid_size).astype(np.int64)\n # print(grid_size)\n bv_range = coors_range[[0, 1, 3, 4]]\n anchor_generator = AnchorGeneratorStride(\n # sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],\n sizes=[0.6, 1.76, 1.73],\n anchor_strides=[0.4, 0.4, 0.0],\n anchor_offsets=[0.2, -39.8, -1.465],\n rotations=[0, 1.5707963267948966],\n match_threshold=0.5,\n unmatch_threshold=0.35,\n )\n anchor_generator1 = AnchorGeneratorStride(\n # sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],\n sizes=[0.6, 0.8, 1.73],\n anchor_strides=[0.4, 0.4, 0.0],\n anchor_offsets=[0.2, -39.8, -1.465],\n rotations=[0, 1.5707963267948966],\n match_threshold=0.5,\n unmatch_threshold=0.35,\n )\n anchor_generator2 = AnchorGeneratorStride(\n # sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],\n sizes=[1.6, 3.9, 1.56],\n anchor_strides=[0.4, 0.4, 0.0],\n anchor_offsets=[0.2, -39.8, -1.55442884],\n rotations=[0, 1.5707963267948966],\n # rotations=[0],\n match_threshold=0.6,\n unmatch_threshold=0.45,\n )\n anchor_generators = [anchor_generator2]\n box_coder = GroundBox3dCoder()\n # similarity_calc = DistanceSimilarity(1.0)\n similarity_calc = NearestIouSimilarity()\n target_assigner = TargetAssigner(box_coder, anchor_generators,\n similarity_calc)\n # anchors = box_np_ops.create_anchors_v2(\n # bv_range, grid_size[:2] // 2, sizes=anchor_dims)\n # matched_thresholds = [0.45, 0.45, 0.6]\n # unmatched_thresholds = [0.3, 0.3, 0.45]\n\n t = time.time()\n feature_map_size = grid_size[:2] // 2\n feature_map_size = [*feature_map_size, 1][::-1]\n print(feature_map_size)\n # \"\"\"\n ret = target_assigner.generate_anchors(feature_map_size)\n anchors = ret[\"anchors\"]\n anchors = anchors.reshape([-1, 7])\n anchors_bv = box_np_ops.rbbox2d_to_near_bbox(\n anchors[:, [0, 1, 3, 4, 6]])\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n print(f\"num_anchors_ {len(anchors)}\")\n if points is not None:\n voxels, coors, num_points = points_to_voxel(\n points,\n self._voxel_size,\n # self._coors_range,\n coors_range,\n self._max_num_points,\n reverse_index=True,\n max_voxels=self._max_voxels)\n\n # print(np.min(coors, 0), np.max(coors, 0))\n dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(\n coors, tuple(grid_size[::-1][1:]))\n dense_voxel_map = dense_voxel_map.cumsum(0)\n dense_voxel_map = dense_voxel_map.cumsum(1)\n anchors_mask = box_np_ops.fused_get_anchors_area(\n dense_voxel_map, anchors_bv, voxel_size, coors_range,\n grid_size) > 1\n print(np.sum(anchors_mask), anchors_mask.shape)\n class_names = [\n 'Vehicle', \"Pedestrian\", \"Cyclist\", 'Others', 'Others_moving', \"Others_stationary\", 'Vehicle'\n ]\n gt_classes = np.array(\n [class_names.index(n) + 1 for n in gt_names], dtype=np.int32)\n t = time.time()\n target_dict = target_assigner.assign(\n anchors,\n gt_boxes_lidar,\n anchors_mask,\n gt_classes=gt_classes,\n matched_thresholds=matched_thresholds,\n unmatched_thresholds=unmatched_thresholds)\n labels = target_dict[\"labels\"]\n reg_targets = target_dict[\"bbox_targets\"]\n reg_weights = target_dict[\"bbox_outside_weights\"]\n # print(labels[labels > 0])\n # decoded_reg_targets = box_np_ops.second_box_decode(reg_targets, anchors)\n # print(decoded_reg_targets.reshape(-1, 7)[labels > 0])\n print(\"target time\", (time.time() - t))\n print(f\"num_pos={np.sum(labels > 0)}\")\n colors = np.zeros([anchors.shape[0], 4])\n ignored_color = bbox_plot.gl_color(GLColor.Gray, 0.5)\n pos_color = bbox_plot.gl_color(GLColor.Cyan, 0.5)\n\n colors[labels == -1] = ignored_color\n colors[labels > 0] = pos_color\n cared_anchors_mask = np.logical_and(labels != 0, anchors_mask)\n colors = colors[cared_anchors_mask]\n anchors_not_neg = box_np_ops.rbbox3d_to_corners(anchors)[\n cared_anchors_mask]\n self.boxes3d(\"anchors\", anchors_not_neg, colors=colors)\n\n\n def draw_bounding_box(self):\n bbox = box_np_ops.minmax_to_corner_3d(np.array([self.w_config.get(\"CoorsRange\")]))\n self.boxes3d(\"bound\", bbox, GLColor.Green)\n\n def draw_voxels(self, points, gt_boxes=None):\n pos_color = self.w_config.get(\"PosVoxelColor\")[:3]\n pos_color = (*pos_color, self.w_config.get(\"PosVoxelAlpha\"))\n neg_color = self.w_config.get(\"NegVoxelColor\")[:3]\n neg_color = (*neg_color, self.w_config.get(\"NegVoxelAlpha\"))\n\n voxel_size = np.array(self.w_config.get(\"VoxelSize\"), dtype=np.float32)\n coors_range = np.array(\n self.w_config.get(\"CoorsRange\"), dtype=np.float32)\n voxels, coors, num_points = points_to_voxel(\n points,\n voxel_size,\n coors_range,\n self._max_num_points,\n reverse_index=True,\n max_voxels=self._max_voxels)\n # print(\"num_voxels\", num_points.shape[0])\n \"\"\"\n total_num_points = 0\n for i in range(self._max_num_points):\n num = np.sum(num_points.astype(np.int64) == i)\n total_num_points += num * i\n if num > 0:\n print(f\"num={i} have {num} voxels\")\n print(\"total_num_points\", points.shape[0], total_num_points)\n \"\"\"\n grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size\n grid_size = np.round(grid_size).astype(np.int64)\n\n shift = coors_range[:3]\n voxel_origins = coors[:, ::-1] * voxel_size + shift\n voxel_maxs = voxel_origins + voxel_size\n voxel_boxes = np.concatenate([voxel_origins, voxel_maxs], axis=1)\n voxel_box_corners = box_np_ops.minmax_to_corner_3d(voxel_boxes)\n pos_only = self.w_config.get(\"DrawPositiveVoxelsOnly\")\n if gt_boxes is not None:\n labels = box_np_ops.assign_label_to_voxel(\n gt_boxes, coors, voxel_size, coors_range).astype(np.bool)\n if pos_only:\n voxel_box_corners = voxel_box_corners[labels]\n colors = np.zeros([voxel_box_corners.shape[0], 4])\n if pos_only:\n colors[:] = pos_color\n else:\n colors[np.logical_not(labels)] = neg_color\n colors[labels] = pos_color\n else:\n if not pos_only:\n colors = np.zeros([voxel_box_corners.shape[0], 4])\n colors[:] = neg_color\n else:\n voxel_box_corners = np.zeros((0, 8, 3))\n colors = np.zeros((0, 4))\n self.boxes3d(\"voxels\", voxel_box_corners, colors)\n\n\nclass KittiViewer(QMainWindow):\n def __init__(self):\n super().__init__()\n self.title = 'KittiViewer'\n self.bbox_window = [10, 10, 1600, 900]\n self.sstream = sysio.StringIO()\n self.json_setting = Settings(str(Path.home() / \".kittiviewerrc\"))\n self.kitti_infos = None\n self.detection_annos = None\n self.image_idxes = None\n self.root_path = None\n self.current_idx = 0\n self.dt_image_idxes = None\n self.current_image = None\n self.init_ui()\n self.kitti_info = None\n self.points = None\n self.gt_boxes = None\n self.gt_names = None\n self.difficulty = None\n self.group_ids = None\n self.inference_ctx = None\n\n def init_ui(self):\n\n self.setWindowTitle(self.title)\n self.setGeometry(*self.bbox_window)\n # self.statusBar().showMessage('Message in statusbar.')\n control_panel_layout = QVBoxLayout()\n root_path = self.json_setting.get(\"kitti_root_path\", \"\")\n self.w_root_path = QLineEdit(root_path)\n iamge_idx = self.json_setting.get(\"image_idx\", \"0\")\n self.w_imgidx = QLineEdit(iamge_idx)\n info_path = self.json_setting.get(\"latest_info_path\", \"\")\n self.w_info_path = QLineEdit(info_path)\n det_path = self.json_setting.get(\"latest_det_path\", \"\")\n self.w_det_path = QLineEdit(det_path)\n # self.w_cmd = QLineEdit()\n # self.w_cmd.returnPressed.connect(self.on_CmdReturnPressed)\n self.w_load = QPushButton('load info')\n self.w_load.clicked.connect(self.on_loadButtonPressed)\n self.w_load_det = QPushButton('load detection')\n self.w_load_det.clicked.connect(self.on_loadDetPressed)\n self.w_config = KittiDrawControl('ctrl')\n config = self.json_setting.get(\"config\", \"\")\n if config != \"\":\n self.w_config.loads(config)\n self.w_config.configChanged.connect(self.on_configchanged)\n self.w_plot = QPushButton('plot')\n self.w_plot.clicked.connect(self.on_plotButtonPressed)\n\n self.w_show_panel = QPushButton('control panel')\n self.w_show_panel.clicked.connect(self.on_panel_clicked)\n\n center_widget = QWidget(self)\n self.w_output = QTextEdit()\n self.w_config_gbox = QGroupBox(\"Read Config\")\n layout = QFormLayout()\n layout.addRow(QLabel(\"root path:\"), self.w_root_path)\n layout.addRow(QLabel(\"info path:\"), self.w_info_path)\n layout.addRow(QLabel(\"image idx:\"), self.w_imgidx)\n layout.addRow(QLabel(\"det path:\"), self.w_det_path)\n self.w_config_gbox.setLayout(layout)\n self.w_plt = MatPlotLibView()\n self.w_plt_toolbar = NavigationToolbar(self.w_plt, center_widget)\n # self.w_plt.ax.set_axis_off()\n # self.w_plt.ax.set_yticklabels([])\n # self.w_plt.ax.set_xticklabels([])\n plt_layout = QVBoxLayout()\n plt_layout.addWidget(self.w_plt)\n plt_layout.addWidget(self.w_plt_toolbar)\n\n control_panel_layout.addWidget(self.w_config_gbox)\n # control_panel_layout.addWidget(self.w_info_path)\n h_layout = QHBoxLayout()\n h_layout.addWidget(self.w_load)\n h_layout.addWidget(self.w_load_det)\n control_panel_layout.addLayout(h_layout)\n\n h_layout = QHBoxLayout()\n h_layout.addWidget(self.w_plot)\n control_panel_layout.addLayout(h_layout)\n control_panel_layout.addWidget(self.w_show_panel)\n\n vcfg_path = self.json_setting.get(\"latest_vxnet_cfg_path\", \"\")\n self.w_vconfig_path = QLineEdit(vcfg_path)\n vckpt_path = self.json_setting.get(\"latest_vxnet_ckpt_path\", \"\")\n self.w_vckpt_path = QLineEdit(vckpt_path)\n layout = QFormLayout()\n layout.addRow(QLabel(\"config path:\"), self.w_vconfig_path)\n layout.addRow(QLabel(\"ckpt path:\"), self.w_vckpt_path)\n control_panel_layout.addLayout(layout)\n self.w_build_net = QPushButton('Build Network')\n self.w_build_net.clicked.connect(self.on_BuildVxNetPressed)\n\n self.w_load_ckpt = QPushButton('load Network checkpoint')\n self.w_load_ckpt.clicked.connect(self.on_loadVxNetCkptPressed)\n h_layout = QHBoxLayout()\n h_layout.addWidget(self.w_build_net)\n h_layout.addWidget(self.w_load_ckpt)\n control_panel_layout.addLayout(h_layout)\n self.w_inference = QPushButton('Inference Network')\n self.w_inference.clicked.connect(self.on_InferenceVxNetPressed)\n control_panel_layout.addWidget(self.w_inference)\n self.w_load_infer = QPushButton('Load and Inference Network')\n self.w_load_infer.clicked.connect(self.on_LoadInferenceVxNetPressed)\n control_panel_layout.addWidget(self.w_load_infer)\n # self.w_eval_net = QPushButton('Evaluation VoxelNet')\n # self.w_eval_net.clicked.connect(self.on_EvalVxNetPressed)\n # control_panel_layout.addWidget(self.w_eval_net)\n layout = QFormLayout()\n self.w_cb_gt_curcls = QCheckBox(\"Indexed by GroundTruth Class\")\n self.w_cb_gt_curcls.setChecked(True)\n self.w_cb_gt_curcls.stateChanged.connect(\n self.on_gt_checkbox_statechanged)\n\n self.gt_combobox = QComboBox()\n self.gt_combobox.addItem(\"All\")\n for cls_name in kitti.get_classes():\n self.gt_combobox.addItem(cls_name)\n self._current_gt_cls_ids = None\n self._current_gt_cls_idx = 0\n self.gt_combobox.currentTextChanged.connect(\n self.on_gt_combobox_changed)\n layout.addRow(self.w_cb_gt_curcls, self.gt_combobox)\n\n self.w_cb_dt_curcls = QCheckBox(\"Indexed by Detection Class\")\n self.w_cb_dt_curcls.setChecked(False)\n self.w_cb_dt_curcls.stateChanged.connect(\n self.on_dt_checkbox_statechanged)\n\n self.dt_combobox = QComboBox()\n self.dt_combobox.addItem(\"All\")\n self._current_dt_cls_ids = None\n self._current_dt_cls_idx = 0\n self.dt_combobox.currentTextChanged.connect(\n self.on_dt_combobox_changed)\n layout.addRow(self.w_cb_dt_curcls, self.dt_combobox)\n\n control_panel_layout.addLayout(layout)\n self.w_next = QPushButton('next')\n self.w_next.clicked.connect(\n partial(self.on_nextOrPrevPressed, prev=False))\n self.w_prev = QPushButton('prev')\n self.w_prev.clicked.connect(\n partial(self.on_nextOrPrevPressed, prev=True))\n\n layout = QHBoxLayout()\n layout.addWidget(self.w_prev)\n layout.addWidget(self.w_next)\n control_panel_layout.addLayout(layout)\n\n self.w_next = QPushButton('next current class')\n self.w_next.clicked.connect(\n partial(self.on_nextOrPrevCurClsPressed, prev=False))\n self.w_prev = QPushButton('prev current class')\n self.w_prev.clicked.connect(\n partial(self.on_nextOrPrevCurClsPressed, prev=True))\n\n layout = QHBoxLayout()\n layout.addWidget(self.w_prev)\n layout.addWidget(self.w_next)\n control_panel_layout.addLayout(layout)\n\n control_panel_layout.addLayout(plt_layout)\n save_image_path = self.json_setting.get(\"save_image_path\", \"\")\n self.w_image_save_path = QLineEdit(save_image_path)\n # self.w_cmd = QLineEdit()\n # self.w_cmd.returnPressed.connect(self.on_CmdReturnPressed)\n self.w_save_image = QPushButton('save image')\n self.w_save_image.clicked.connect(self.on_saveimg_clicked)\n control_panel_layout.addWidget(self.w_image_save_path)\n control_panel_layout.addWidget(self.w_save_image)\n # control_panel_layout.addWidget(self.w_cmd)\n control_panel_layout.addWidget(self.w_output)\n self.center_layout = QHBoxLayout()\n\n self.w_pc_viewer = KittiPointCloudView(\n self.w_config, coors_range=self.w_config.get(\"CoorsRange\"))\n\n self.center_layout.addWidget(self.w_pc_viewer)\n self.center_layout.addLayout(control_panel_layout)\n self.center_layout.setStretch(0, 2)\n self.center_layout.setStretch(1, 1)\n center_widget.setLayout(self.center_layout)\n self.setCentralWidget(center_widget)\n self.show()\n\n def on_panel_clicked(self):\n if self.w_config.isHidden():\n self.w_config.show()\n else:\n self.w_config.hide()\n\n def on_saveimg_clicked(self):\n self.save_image(self.current_image)\n\n def on_gt_checkbox_statechanged(self):\n self.w_cb_gt_curcls.setChecked(True)\n self.w_cb_dt_curcls.setChecked(False)\n\n def on_dt_checkbox_statechanged(self):\n self.w_cb_gt_curcls.setChecked(False)\n self.w_cb_dt_curcls.setChecked(True)\n\n def on_gt_combobox_changed(self):\n self._current_gt_cls_idx = 0\n self.on_loadButtonPressed()\n\n def on_dt_combobox_changed(self):\n self._current_dt_cls_idx = 0\n annos = kitti.filter_empty_annos(self.detection_annos)\n if self.dt_image_idxes is not None and annos is not None:\n current_class = self.dt_combobox.currentText()\n if current_class == \"All\":\n self._current_dt_cls_ids = self.dt_image_idxes\n else:\n self._current_dt_cls_ids = [\n anno[\"image_idx\"][0] for anno in annos\n if current_class in anno[\"name\"]\n ]\n\n def message(self, value, *arg, color=\"Black\"):\n colorHtml = f\"<font color=\\\"{color}\\\">\"\n endHtml = \"</font><br>\"\n msg = self.print_str(value, *arg)\n self.w_output.insertHtml(colorHtml + msg + endHtml)\n self.w_output.verticalScrollBar().setValue(\n self.w_output.verticalScrollBar().maximum())\n\n def error(self, value, *arg):\n time_str = datetime.datetime.now().strftime(\"[%H:%M:%S]\")\n return self.message(time_str, value, *arg, color=\"Red\")\n\n def info(self, value, *arg):\n time_str = datetime.datetime.now().strftime(\"[%H:%M:%S]\")\n return self.message(time_str, value, *arg, color=\"Black\")\n\n def warning(self, value, *arg):\n time_str = datetime.datetime.now().strftime(\"[%H:%M:%S]\")\n return self.message(time_str, value, *arg, color=\"Yellow\")\n\n def save_image(self, image):\n\n img_path = self.w_image_save_path.text()\n self.json_setting.set(\"save_image_path\", img_path)\n if self.current_image is not None:\n io.imsave(img_path, image)\n # p = self.w_pc_viewer.grab()\n p = self.w_pc_viewer.grabFrameBuffer()\n\n # p = QtGui.QPixmap.grabWindow(self.w_pc_viewer)\n pc_img_path = str(\n Path(img_path).parent / (str(Path(img_path).stem) + \"_pc.jpg\"))\n # p.save(pc_img_path, 'jpg')\n p.save(pc_img_path, 'jpg')\n self.info(\"image saved to\", img_path)\n\n def print_str(self, value, *arg):\n #self.strprint.flush()\n self.sstream.truncate(0)\n self.sstream.seek(0)\n print(value, *arg, file=self.sstream)\n return self.sstream.getvalue()\n\n def on_nextOrPrevPressed(self, prev):\n if prev is True:\n self.current_idx = max(self.current_idx - 1, 0)\n else:\n info_len = len(self.image_idxes)\n self.current_idx = min(self.current_idx + 1, info_len - 1)\n image_idx = self.image_idxes[self.current_idx]\n self.w_imgidx.setText(str(image_idx))\n self.plot_all(image_idx)\n\n def on_nextOrPrevCurClsPressed(self, prev):\n if self.w_cb_dt_curcls.isChecked():\n if prev is True:\n self._current_dt_cls_idx = max(self._current_dt_cls_idx - 1, 0)\n else:\n info_len = len(self._current_dt_cls_ids)\n self._current_dt_cls_idx = min(self._current_dt_cls_idx + 1,\n info_len - 1)\n image_idx = self._current_dt_cls_ids[self._current_dt_cls_idx]\n self.info(\"current dt image idx:\", image_idx)\n elif self.w_cb_gt_curcls.isChecked():\n if prev is True:\n self._current_gt_cls_idx = max(self._current_gt_cls_idx - 1, 0)\n else:\n info_len = len(self._current_gt_cls_ids)\n self._current_gt_cls_idx = min(self._current_gt_cls_idx + 1,\n info_len - 1)\n image_idx = self._current_gt_cls_ids[self._current_gt_cls_idx]\n self.info(\"current gt image idx:\", image_idx)\n self.plot_all(image_idx)\n\n def on_CmdReturnPressed(self):\n cmd = self.print_str(self.cmd.text())\n self.output.insertPlainText(cmd)\n\n def on_loadButtonPressed(self):\n self.root_path = Path(self.w_root_path.text())\n if not (self.root_path / \"training\").exists():\n self.error(\"ERROR: your root path is incorrect.\")\n return\n self.json_setting.set(\"kitti_root_path\", str(self.root_path))\n info_path = self.w_info_path.text()\n if info_path == '':\n info_path = self.root_path / 'kitti_infos_val.pkl'\n else:\n info_path = Path(info_path)\n if not info_path.exists():\n self.error(\"ERROR: info file not exist\")\n return\n self.json_setting.set(\"latest_info_path\", str(info_path))\n with open(info_path, 'rb') as f:\n self.kitti_infos = pickle.load(f)\n db_infos_path = Path(self.root_path) / \"neolix_dbinfos_train.pkl\"\n if db_infos_path.exists():\n with open(db_infos_path, 'rb') as f:\n self.db_infos = pickle.load(f)\n global_rot_range = self.w_config.get(\"SampleGlobleRotRange\")\n groups = self.w_config.get(\"SampleGroups\")\n self.info(\"init database sampler with group:\")\n self.info(groups)\n self.db_sampler = DataBaseSamplerV2(self.db_infos, groups, global_rot_range=global_rot_range)\n self.info(\"load db_infos.\")\n self.image_idxes = [info['image_idx'] for info in self.kitti_infos]\n self.info(\"load\", len(self.kitti_infos), \"infos.\")\n current_class = self.gt_combobox.currentText()\n if current_class == \"All\":\n self._current_gt_cls_ids = self.image_idxes\n else:\n self._current_gt_cls_ids = [\n info[\"image_idx\"] for info in self.kitti_infos\n if current_class in info[\"annos\"][\"name\"]\n ]\n self._current_gt_cls_idx = 0\n\n def on_loadDetPressed(self):\n det_path = self.w_det_path.text()\n if Path(det_path).is_file():\n with open(det_path, \"rb\") as f:\n dt_annos = pickle.load(f)\n else:\n dt_annos = kitti.get_label_annos(det_path)\n if len(dt_annos) == 0:\n self.warning(\"detection path contain nothing.\")\n return\n self.detection_annos = dt_annos\n self.info(f\"load {len(dt_annos)} detections.\")\n self.json_setting.set(\"latest_det_path\", det_path)\n annos = kitti.filter_empty_annos(self.detection_annos)\n self.dt_image_idxes = [anno[\"image_idx\"][0] for anno in annos]\n # get class in dt\n available_cls = []\n for anno in self.detection_annos:\n for name in anno[\"name\"]:\n if name not in available_cls:\n available_cls.append(name)\n\n self.dt_combobox.clear()\n self.dt_combobox.addItem(\"All\")\n for cls_name in available_cls:\n self.dt_combobox.addItem(cls_name)\n\n current_class = self.dt_combobox.currentText()\n if current_class == \"All\":\n self._current_dt_cls_ids = self.dt_image_idxes\n else:\n self._current_dt_cls_ids = [\n anno[\"image_idx\"][0] for anno in annos\n if anno[\"name\"] == current_class\n ]\n self._current_dt_cls_idx = 0\n \"\"\"\n if self.kitti_infos is not None:\n t = time.time()\n gt_annos = [info[\"annos\"] for info in self.kitti_infos]\n self.message(get_official_eval_result(gt_annos, dt_annos, 0))\n self.message(f\"eval use time: {time.time() - t:.4f}\")\n \"\"\"\n\n def sample_to_current_data(self):\n if self.kitti_info is None:\n self.error(\"you must load infos and choose a existing image idx first.\")\n return\n\n sampled_difficulty = []\n # class_names = [\"Car\"]\n rect = self.kitti_info['calib/R0_rect']\n P2 = self.kitti_info['calib/P2']\n Trv2c = self.kitti_info['calib/Tr_velo_to_cam']\n num_features = 4\n if 'pointcloud_num_features' in self.kitti_info:\n num_features = self.kitti_info['pointcloud_num_features']\n\n # class_names = self.w_config.get(\"UsedClass\")\n # class_names_group = [[\"trailer\", \"tractor\"]]\n\n if self.db_sampler is not None:\n # gt_boxes_mask = np.array(\n # [n in class_names for n in self.gt_names], dtype=np.bool_)\n gt_boxes_mask = np.ones((self.gt_names.shape[0],), np.bool_)\n sampled_dict = self.db_sampler.sample_all(\n self.root_path,\n self.gt_boxes,\n self.gt_names,\n num_features,\n False,\n gt_group_ids=self.group_ids,\n rect=rect,\n Trv2c=Trv2c,\n P2=P2)\n if sampled_dict is not None:\n sampled_gt_names = sampled_dict[\"gt_names\"]\n sampled_gt_boxes = sampled_dict[\"gt_boxes\"]\n sampled_points = sampled_dict[\"points\"]\n sampled_gt_masks = sampled_dict[\"gt_masks\"]\n sampled_difficulty = sampled_dict[\"difficulty\"]\n # gt_names = gt_names[gt_boxes_mask].tolist()\n self.gt_names = np.concatenate(\n [self.gt_names, sampled_gt_names], axis=0)\n # gt_names += [s[\"name\"] for s in sampled]\n self.gt_boxes = np.concatenate(\n [self.gt_boxes, sampled_gt_boxes])\n gt_boxes_mask = np.concatenate(\n [gt_boxes_mask, sampled_gt_masks], axis=0)\n self.difficulty = np.concatenate(\n [self.difficulty, sampled_difficulty], axis=0)\n self.points = np.concatenate(\n [sampled_points, self.points], axis=0)\n sampled_group_ids = sampled_dict[\"group_ids\"]\n if self.group_ids is not None:\n self.group_ids = np.concatenate(\n [self.group_ids, sampled_group_ids])\n\n '''\n prep.noise_per_object_(\n self.gt_boxes,\n self.points,\n gt_boxes_mask,\n rotation_perturb=[-1.57, 1.57],\n center_noise_std=[1.0, 1.0, 1.0],\n num_try=50)'''\n # should remove unrelated objects after noise per object\n self.gt_boxes = self.gt_boxes[gt_boxes_mask]\n self.gt_names = self.gt_names[gt_boxes_mask]\n self.difficulty = self.difficulty[gt_boxes_mask]\n if self.group_ids is not None:\n self.group_ids = self.group_ids[gt_boxes_mask]\n else:\n self.error(\"you enable sample but not provide a database\")\n\n def data_augmentation(self):\n if self.kitti_info is None:\n self.error(\"you must load infos and choose a existing image idx first.\")\n return\n\n seed = np.random.randint(5000000)\n np.random.seed(seed)\n # seed = 1798767\n self.info(f\"prep random seed: {seed}\")\n t = time.time()\n group_ids = None\n if self.w_config.get(\"GroupNoisePerObject\"):\n group_ids = self.group_ids\n prep.noise_per_object_v3_(\n self.gt_boxes,\n self.points,\n # rotation_perturb=0.0,\n # center_noise_std=0,\n global_random_rot_range=[np.pi / 4, np.pi / 4 * 3],\n # global_random_rot_range=[0, 6.28],\n group_ids=group_ids,\n num_try=100)\n self.info(\"noise time\", time.time() - t)\n # self.gt_boxes, self.points = prep.random_flip(\n # self.gt_boxes, self.points)\n # self.gt_boxes, self.points = prep.global_rotation(\n # self.gt_boxes, self.points)\n # self.gt_boxes[:, 6] = box_np_ops.limit_angles(self.gt_boxes[:, 6])\n # self.gt_boxes, self.points = prep.global_scaling(\n # self.gt_boxes, self.points)\n # mask = prep.filter_gt_box_outside_range(\n # self.gt_boxes, [0, -40, 70.4, 40])\n # self.gt_boxes = self.gt_boxes[mask]\n\n def draw_gt_in_image(self):\n if self.kitti_info is None:\n self.error(\"you must load infos and choose a existing image idx first.\")\n return\n if self.gt_boxes is None:\n return\n rect = self.kitti_info['calib/R0_rect']\n P2 = self.kitti_info['calib/P2']\n Trv2c = self.kitti_info['calib/Tr_velo_to_cam']\n gt_boxes_camera = box_np_ops.box_lidar_to_camera(\n self.gt_boxes, rect, Trv2c)\n boxes_3d = box_np_ops.center_to_corner_box3d(gt_boxes_camera[:, :3],\n gt_boxes_camera[:, 3:6],\n gt_boxes_camera[:, 6])\n boxes_3d = boxes_3d.reshape((-1, 3))\n boxes_3d_p2 = box_np_ops.project_to_image(boxes_3d, P2)\n boxes_3d_p2 = boxes_3d_p2.reshape([-1, 8, 2])\n if self.current_image is not None:\n bbox_plot.draw_3d_bbox_in_ax(\n self.w_plt.ax, boxes_3d_p2, colors='b')\n\n def draw_detection(self, detection_anno, label_color=GLColor.Blue):\n if self.kitti_info is None:\n self.error(\"you must load infos and choose a existing image idx first.\")\n return\n\n dt_box_color = self.w_config.get(\"DTBoxColor\")[:3]\n dt_box_color = (*dt_box_color, self.w_config.get(\"DTBoxAlpha\"))\n\n rect = self.kitti_info['calib/R0_rect']\n P2 = self.kitti_info['calib/P2']\n Trv2c = self.kitti_info['calib/Tr_velo_to_cam']\n # detection_anno = kitti.remove_low_height(detection_anno, 25)\n detection_anno = kitti.remove_low_score(detection_anno, self.w_config.get(\"DTScoreThreshold\"))\n \n dt_bboxes = detection_anno[\"bbox\"]\n \n dt_boxes_corners, scores, dt_box_lidar = kitti_anno_to_corners(\n self.kitti_info, detection_anno)\n if self.gt_boxes is not None:\n iou = _riou3d_shapely(self.gt_boxes, dt_box_lidar)\n if iou.shape[0] != 0:\n dt_to_gt_box_iou = iou.max(0)\n else:\n dt_to_gt_box_iou = np.zeros([0, 0])\n num_dt = dt_box_lidar.shape[0]\n dt_boxes_corners_cam = box_np_ops.lidar_to_camera(\n dt_boxes_corners, rect, Trv2c)\n dt_boxes_corners_cam = dt_boxes_corners_cam.reshape((-1, 3))\n dt_boxes_corners_cam_p2 = box_np_ops.project_to_image(\n dt_boxes_corners_cam, P2)\n dt_boxes_corners_cam_p2 = dt_boxes_corners_cam_p2.reshape([-1, 8, 2])\n dt_labels = detection_anno[\"name\"]\n\n dt_scores_text = None\n if scores is not None:\n if self.gt_boxes is not None:\n dt_scores_text = [\n f'score={s:.2f}, iou={i:.2f}'\n for s, i in zip(scores, dt_to_gt_box_iou)\n ]\n else:\n dt_scores_text = [\n f'score={s:.2f}, z={z:.2f}'\n for s, z in zip(scores, dt_box_lidar[:, 2])\n ]\n if self.w_config.get(\"DrawDTLabels\"):\n self.w_pc_viewer.labels(\"dt_boxes/labels\",\n dt_boxes_corners[:, 1, :], dt_scores_text,\n label_color, 15)\n dt_box_color = np.tile(np.array(dt_box_color)[np.newaxis, ...], [num_dt, 1])\n if self.w_config.get(\"DTScoreAsAlpha\") and scores is not None:\n dt_box_color = np.concatenate([dt_box_color[:, :3], scores[..., np.newaxis]], axis=1)\n self.w_pc_viewer.boxes3d(\"dt_boxes\", dt_boxes_corners, dt_box_color,\n self.w_config.get(\"DTBoxLineWidth\"), 1.0)\n\n def plot_gt_boxes_in_pointcloud(self):\n if self.kitti_info is None:\n self.error(\"you must load infos and choose a existing image idx first.\")\n return\n if 'annos' in self.kitti_info:\n gt_box_color = self.w_config.get(\"GTBoxColor\")[:3]\n gt_box_color = (*gt_box_color, self.w_config.get(\"GTBoxAlpha\"))\n diff = self.difficulty.tolist()\n diff_to_name = {-1: \"unk\", 0: \"easy\", 1: \"moderate\", 2: \"hard\"}\n diff_names = [diff_to_name[d] for d in diff]\n label_idx = list(range(self.gt_names.shape[0]))\n labels_ = [\n f'{i}:{l}, {d}'\n for i, l, d in zip(label_idx, self.gt_names, diff_names)\n ]\n boxes_corners = box_np_ops.center_to_corner_box3d(\n self.gt_boxes[:, :3],\n self.gt_boxes[:, 3:6],\n self.gt_boxes[:, 6],\n origin=[0.5, 0.5, 0],\n axis=2)\n # print(self.gt_boxes[:, 6])\n # print(self.gt_boxes[:, :3])\n self.w_pc_viewer.boxes3d(\"gt_boxes\", boxes_corners, gt_box_color,\n 3.0, 1.0)\n if self.w_config.get(\"DrawGTLabels\"):\n self.w_pc_viewer.labels(\"gt_boxes/labels\", boxes_corners[:, 0, :],\n labels_, GLColor.Green, 15)\n\n def plot_pointcloud(self):\n if self.kitti_info is None:\n self.error(\"you must load infos and choose a existing image idx first.\")\n return\n\n point_color = self.w_config.get(\"PointColor\")[:3]\n point_color = (*point_color, self.w_config.get(\"PointAlpha\"))\n point_color = np.tile(np.array(point_color), [self.points.shape[0], 1])\n\n # self.w_pc_viewer.reset_camera()\n point_size = np.full(\n [self.points.shape[0]],\n self.w_config.get(\"PointSize\"),\n dtype=np.float32)\n # self.w_pc_viewer.draw_point_cloud(self.points, color=points_rgb, with_reflectivity=False, size=0.1)\n self.w_pc_viewer.draw_bounding_box()\n idx = self.image_idxes.index(self.kitti_info[\"image_idx\"])\n if 'annos' in self.kitti_info:\n # poses = np.zeros([self.gt_boxes.shape[0], 3])\n # self.w_pc_viewer.circles(\n # \"circles\", poses, np.linalg.norm(\n # self.gt_boxes[:, :3], axis=-1))\n # self.w_pc_viewer.draw_anchors_v1(\n # self.gt_boxes, self.points, gt_names=gt_names)\n # self.w_pc_viewer.draw_frustum(bboxes, rect, Trv2c, P2)\n # self.w_pc_viewer.draw_cropped_frustum(bboxes, rect, Trv2c, P2)\n gt_point_mask = box_np_ops.points_in_rbbox(self.points,\n self.gt_boxes).any(1)\n point_size[gt_point_mask] = self.w_config.get(\"GTPointSize\")\n gt_point_color = self.w_config.get(\"GTPointColor\")\n gt_point_color = (*gt_point_color[:3],\n self.w_config.get(\"GTPointAlpha\"))\n point_color[gt_point_mask] = gt_point_color\n self.w_pc_viewer.remove(\"dt_boxes/labels\")\n self.w_pc_viewer.remove(\"dt_boxes\")\n if self.detection_annos is not None and self.w_config.get(\"DrawDTBoxes\"):\n detection_anno = self.detection_annos[idx]\n self.draw_detection(detection_anno)\n if self.w_config.get(\"WithReflectivity\"):\n if self.points.shape[1] < 4:\n self.error(\"Your pointcloud don't contain reflectivity.\")\n else:\n point_color = np.concatenate(\n [point_color[:, :3], self.points[:, 3:4] * 0.8 + 0.2],\n axis=1)\n\n self.w_pc_viewer.scatter(\n \"pointcloud\", self.points[:, :3], point_color, size=point_size)\n\n def load_info(self, image_idx):\n if self.kitti_infos is None:\n self.error(\"you must load infos first.\")\n return\n\n if image_idx not in self.image_idxes:\n self.error(f\"index{image_idx} not exist.\")\n return False\n self.json_setting.set(\"image_idx\", str(image_idx))\n idx = self.image_idxes.index(image_idx)\n self.kitti_info = self.kitti_infos[idx]\n if \"timestamp\" in self.kitti_info:\n self.message(\"timestamp\", self.kitti_info[\"timestamp\"])\n image = None\n if 'img_path' in self.kitti_info:\n img_path = self.kitti_info['img_path']\n if img_path != \"\":\n image = io.imread(str(self.root_path / img_path))\n self.current_image = image\n\n else:\n self.current_image = None\n else:\n self.current_image = None\n v_path = str(self.root_path / self.kitti_info['velodyne_path'])\n num_features = 4\n if 'pointcloud_num_features' in self.kitti_info:\n num_features = self.kitti_info['pointcloud_num_features']\n\n points = np.fromfile(\n v_path, dtype=np.float32, count=-1).reshape([-1, num_features])\n self.points = points\n rect = self.kitti_info['calib/R0_rect']\n P2 = self.kitti_info['calib/P2']\n Trv2c = self.kitti_info['calib/Tr_velo_to_cam']\n image_shape = None\n if 'img_shape' in self.kitti_info:\n image_shape = self.kitti_info['img_shape']\n # self.info(\"num_points before remove:\", self.points.shape[0])\n if self.w_config.get(\"RemoveOutsidePoint\"):\n self.points = box_np_ops.remove_outside_points(\n self.points, rect, Trv2c, P2, image_shape)\n # self.info(\"num_points after remove:\", self.points.shape[0])\n img_path = self.w_image_save_path.text()\n img_path = str(Path(img_path).parent / f\"{image_idx}.jpg\")\n self.w_image_save_path.setText(img_path)\n self.json_setting.set(\"save_image_path\", img_path)\n\n if 'annos' in self.kitti_info:\n annos = self.kitti_info['annos']\n # annos = kitti.filter_kitti_anno(annos,\n # self.w_config.get(\"UsedClass\"))\n labels = annos['name']\n num_obj = len([n for n in annos['name'] if n != 'DontCare'])\n # print(annos[\"group_ids\"].shape)\n dims = annos['dimensions'][:num_obj]\n loc = annos['location'][:num_obj]\n rots = annos['rotation_y'][:num_obj]\n self.difficulty = annos[\"difficulty\"][:num_obj]\n self.gt_names = labels[:num_obj]\n gt_boxes_camera = np.concatenate(\n [loc, dims, rots[..., np.newaxis]], axis=1)\n self.gt_boxes = box_np_ops.box_camera_to_lidar(\n gt_boxes_camera, rect, Trv2c)\n \n if 'group_ids' in annos:\n self.group_ids = annos['group_ids'][:num_obj]\n else:\n self.gt_boxes = None\n self.gt_names = None\n self.difficulty = None\n self.group_ids = None\n if self.w_config.get(\"EnableSample\"):\n self.sample_to_current_data()\n if self.w_config.get(\"EnableAugmentation\"):\n self.data_augmentation()\n\n def plot_image(self):\n if self.kitti_info is None:\n self.error(\"you need to load the info first before plot image\")\n return False\n if self.current_image is not None:\n self.w_plt.ax.imshow(self.current_image)\n if 'annos' in self.kitti_info:\n annos = self.kitti_info['annos']\n annos = kitti.filter_kitti_anno(annos,\n self.w_config.get(\"UsedClass\"))\n labels = annos['name']\n num_obj = len([n for n in annos['name'] if n != 'DontCare'])\n bbox_plot.draw_bbox_in_ax(\n self.w_plt.ax,\n annos['bbox'],\n edgecolors=['g'] * num_obj + ['b'] * num_obj,\n labels=[f'{i}: {labels[i]}' for i in range(len(labels))])\n\n def plot_all(self, image_idx):\n self.w_plt.reset_plot()\n self.load_info(image_idx)\n self.plot_image()\n self.draw_gt_in_image()\n self.w_plt.draw() # this isn't supported in ubuntu.\n self.plot_pointcloud()\n if self.w_config.get(\"DrawGTBoxes\"):\n self.plot_gt_boxes_in_pointcloud()\n if self.w_config.get(\"DrawVoxels\"):\n self.w_pc_viewer.draw_voxels(self.points, self.gt_boxes)\n\n return True\n\n def on_plotButtonPressed(self):\n if self.kitti_infos is None:\n self.error(\"you must load Kitti Infos first.\")\n return\n image_idx = int(self.w_imgidx.text())\n if self.plot_all(image_idx):\n self.current_idx = self.image_idxes.index(image_idx)\n\n def closeEvent(self, event):\n config_str = self.w_config.dumps()\n self.json_setting.set(\"config\", config_str)\n return super().closeEvent(event)\n\n def on_configchanged(self, msg):\n # self.warning(msg.name, msg.value)\n # save config to file\n idx = self.image_idxes.index(self.kitti_info[\"image_idx\"])\n config_str = self.w_config.dumps()\n self.json_setting.set(\"config\", config_str)\n pc_redraw_msgs = [\"PointSize\", \"PointAlpha\", \"GTPointSize\"]\n pc_redraw_msgs += [\"GTPointAlpha\", \"WithReflectivity\"]\n pc_redraw_msgs += [\"PointColor\", \"GTPointColor\"]\n box_redraw = [\"GTBoxColor\", \"GTBoxAlpha\"]\n dt_redraw = [\"DTBoxColor\", \"DTBoxAlpha\", \"DrawDTLabels\", \"DTScoreAsAlpha\", \"DTScoreThreshold\", \"DTBoxLineWidth\"]\n\n vx_redraw_msgs = [\"DrawPositiveVoxelsOnly\", \"DrawVoxels\"]\n vx_redraw_msgs += [\"PosVoxelColor\", \"PosVoxelAlpha\"]\n vx_redraw_msgs += [\"NegVoxelColor\", \"NegVoxelAlpha\"]\n all_redraw_msgs = [\"RemoveOutsidePoint\"]\n if msg.name in vx_redraw_msgs:\n if self.w_config.get(\"DrawVoxels\"):\n self.w_pc_viewer.draw_voxels(self.points, self.gt_boxes)\n else:\n self.w_pc_viewer.remove(\"voxels\")\n elif msg.name in pc_redraw_msgs:\n self.plot_pointcloud()\n elif msg.name in all_redraw_msgs:\n self.on_plotButtonPressed()\n elif msg.name in box_redraw:\n self.plot_gt_boxes_in_pointcloud()\n elif msg.name in dt_redraw:\n if self.detection_annos is not None and self.w_config.get(\"DrawDTBoxes\"):\n detection_anno = self.detection_annos[idx]\n self.draw_detection(detection_anno)\n\n def on_loadVxNetCkptPressed(self):\n ckpt_path = Path(self.w_vckpt_path.text())\n self.json_setting.set(\"latest_vxnet_ckpt_path\",\n self.w_vckpt_path.text())\n self.inference_ctx.restore(ckpt_path)\n # self.w_load_ckpt.setText(self.w_load_ckpt.text() + f\": {ckpt_path.stem}\")\n self.info(\"load VoxelNet ckpt succeed.\")\n\n def on_BuildVxNetPressed(self):\n self.inference_ctx = TorchInferenceContext()\n vconfig_path = Path(self.w_vconfig_path.text())\n self.inference_ctx.build(vconfig_path)\n self.json_setting.set(\"latest_vxnet_cfg_path\", str(vconfig_path))\n self.info(\"Build VoxelNet ckpt succeed.\")\n # self.w_load_config.setText(self.w_load_config.text() + f\": {vconfig_path.stem}\")\n\n def on_InferenceVxNetPressed(self):\n t = time.time()\n inputs = self.inference_ctx.get_inference_input_dict(\n self.kitti_info, self.points)\n self.info(\"input preparation time:\", time.time() - t)\n t = time.time()\n with self.inference_ctx.ctx():\n det_annos = self.inference_ctx.inference(inputs)\n self.info(\"detection time:\", time.time() - t)\n self.draw_detection(det_annos[0])\n\n def on_LoadInferenceVxNetPressed(self):\n self.on_BuildVxNetPressed()\n self.on_loadVxNetCkptPressed()\n self.on_InferenceVxNetPressed()\n\n def on_EvalVxNetPressed(self):\n if \"annos\" not in self.kitti_infos[0]:\n self.error(\"ERROR: infos don't contain gt label.\")\n t = time.time()\n det_annos = []\n input_cfg = self.inference_ctx.config.eval_input_reader\n model_cfg = self.inference_ctx.config.model.second\n\n class_names = list(input_cfg.class_names)\n num_features = model_cfg.num_point_features\n with self.inference_ctx.ctx():\n for info in list_bar(self.kitti_infos):\n v_path = self.root_path / info['velodyne_path']\n # v_path = v_path.parent.parent / (\n # v_path.parent.stem + \"_reduced\") / v_path.name\n points = np.fromfile(\n str(v_path), dtype=np.float32,\n count=-1).reshape([-1, num_features])\n rect = info['calib/R0_rect']\n P2 = info['calib/P2']\n Trv2c = info['calib/Tr_velo_to_cam']\n image_shape = info['img_shape']\n if self.w_config.get(\"RemoveOutsidePoint\"):\n points = box_np_ops.remove_outside_points(\n points, rect, Trv2c, P2, image_shape)\n inputs = self.inference_ctx.get_inference_input_dict(\n info, points)\n det_annos += self.inference_ctx.inference(inputs)\n self.info(\"total detection time:\", time.time() - t)\n gt_annos = [i[\"annos\"] for i in self.kitti_infos]\n self.info(get_official_eval_result(gt_annos, det_annos, class_names))\n\n @staticmethod\n def get_simpify_labels(labels):\n label_map = {\n \"Vehicle\": \"V\",\n \"Pedestrian\": \"P\",\n \"Cyclist\": \"C\",\n \"Others\": \"O\",\n \"Others_moving\": \"Om\",\n \"Others_stationary\": \"Os\",\n \"Vehicle\": \"V\"\n }\n label_count = {\n \"Vehicle\": 0,\n \"Pedestrian\": 0,\n \"Cyclist\": 0,\n \"Others\": 0,\n \"Others_moving\": 0,\n \"Others_stationary\": 0,\n \"Vehicle\": 0\n }\n ret = []\n for i, name in enumerate(labels):\n count = 0\n if name in label_count:\n count = label_count[name]\n label_count[name] += 1\n else:\n label_count[name] = 0\n ret.append(f\"{label_map[name]}{count}\")\n return ret\n\n @staticmethod\n def get_false_pos_neg(gt_boxes, dt_boxes, labels, fp_thresh=0.1):\n iou = _riou3d_shapely(gt_boxes, dt_boxes)\n ret = np.full([len(gt_boxes)], 2, dtype=np.int64)\n assigned_dt = np.zeros([len(dt_boxes)], dtype=np.bool_)\n label_thresh_map = {\n \"Car\": 0.7,\n \"Pedestrian\": 0.5,\n \"Cyclist\": 0.5,\n \"car\": 0.7,\n \"tractor\": 0.7,\n \"trailer\": 0.7,\n }\n tp_thresh = np.array([label_thresh_map[n] for n in labels])\n if len(gt_boxes) != 0 and len(dt_boxes) != 0:\n iou_max_dt_for_gt = iou.max(1)\n dt_iou_max_dt_for_gt = iou.argmax(1)\n ret[iou_max_dt_for_gt >= tp_thresh] = 0\n ret[np.logical_and(iou_max_dt_for_gt < tp_thresh,\n iou_max_dt_for_gt > fp_thresh)] = 1 # FP\n assigned_dt_inds = dt_iou_max_dt_for_gt\n assigned_dt_inds = assigned_dt_inds[iou_max_dt_for_gt >= fp_thresh]\n assigned_dt[assigned_dt_inds] = True\n return ret, assigned_dt\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = KittiViewer()\n sys.exit(app.exec_())\n",
"# -*- coding: utf-8 -*-\nimport os\nimport pathlib\nimport pickle\nimport shutil\nimport time\nfrom functools import partial\nimport sys\nsys.path.append('../')\nfrom pathlib import Path\nimport fire\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport os\nprint(torch.__version__)\nprint(os.environ['PYTHONPATH'])\nfrom google.protobuf import text_format\nfrom tensorboardX import SummaryWriter\n\nimport torchplus\nimport second.data.kitti_common as kitti\nfrom second.builder_pruned import target_assigner_builder, voxel_builder\nfrom second.data.preprocess import merge_second_batch\nfrom second.protos import pipeline_pb2\nfrom second.pytorch.builder_pruned import (box_coder_builder, input_reader_builder,\n lr_scheduler_builder, optimizer_builder,\n second_builder)\nfrom second.utils.eval import get_coco_eval_result, get_official_eval_result\nfrom second.utils.progress_bar import ProgressBar\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n# torch.cuda.set_device(1)\n\n\ndef get_paddings_indicator(actual_num, max_num, axis=0):\n \"\"\"\n Create boolean mask by actually number of a padded tensor.\n :param actual_num:\n :param max_num:\n :param axis:\n :return: [type]: [description]\n \"\"\"\n actual_num = torch.unsqueeze(actual_num, axis+1)\n max_num_shape = [1] * len(actual_num.shape)\n max_num_shape[axis+1] = -1\n max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)\n # tiled_actual_num : [N, M, 1]\n # tiled_actual_num : [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]\n # title_max_num : [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]\n paddings_indicator = actual_num.int() > max_num\n # paddings_indicator shape : [batch_size, max_num]\n return paddings_indicator\n\ndef _get_pos_neg_loss(cls_loss, labels):\n # cls_loss: [N, num_anchors, num_class]\n # labels: [N, num_anchors]\n batch_size = cls_loss.shape[0]\n if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:\n cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_pos_loss = cls_pos_loss.sum() / batch_size\n cls_neg_loss = cls_neg_loss.sum() / batch_size\n else:\n cls_pos_loss = cls_loss[..., 1:].sum() / batch_size\n cls_neg_loss = cls_loss[..., 0].sum() / batch_size\n return cls_pos_loss, cls_neg_loss\n\n\ndef _flat_nested_json_dict(json_dict, flatted, sep=\".\", start=\"\"):\n for k, v in json_dict.items():\n if isinstance(v, dict):\n _flat_nested_json_dict(v, flatted, sep, start + sep + k)\n else:\n flatted[start + sep + k] = v\n\n\ndef flat_nested_json_dict(json_dict, sep=\".\") -> dict:\n \"\"\"flat a nested json-like dict. this function make shadow copy.\n \"\"\"\n flatted = {}\n for k, v in json_dict.items():\n if isinstance(v, dict):\n _flat_nested_json_dict(v, flatted, sep, k)\n else:\n flatted[k] = v\n return flatted\n\n\ndef example_convert_to_torch(example, dtype=torch.float32, device=None) -> dict:\n # device = device or torch.device(\"cuda:0\")\n example_torch = {}\n # float_names = [\"voxels\", \"anchors\", \"reg_targets\", \"reg_weights\", \"bev_map\", \"rect\", \"Trv2c\", \"P2\"]\n float_names = [\"voxels\", \"anchors\", \"reg_targets\", \"reg_weights\", \"bev_map\"]\n\n\n for k, v in example.items():\n if k in float_names:\n example_torch[k] = torch.as_tensor(v, dtype=dtype).cuda()\n elif k in [\"coordinates\", \"labels\", \"num_points\"]:\n example_torch[k] = torch.as_tensor(v, dtype=torch.int32).cuda()\n elif k in [\"anchors_mask\"]:\n example_torch[k] = torch.as_tensor(v, dtype=torch.uint8).cuda()\n # torch.uint8 is now deprecated, please use a dtype torch.bool instead\n else:\n example_torch[k] = v\n return example_torch\n\n\ndef train(config_path,\n model_dir,\n result_path=None,\n create_folder=False,\n display_step=50,\n summary_step=5,\n pickle_result=True,\n percent = 0):\n \"\"\"train a VoxelNet model specified by a config file.\n \"\"\"\n if create_folder:\n if pathlib.Path(model_dir).exists():\n model_dir = torchplus.train.create_folder(model_dir)\n\n model_dir = pathlib.Path(model_dir)\n model_dir.mkdir(parents=True, exist_ok=True)\n eval_checkpoint_dir = model_dir / 'eval_checkpoints'\n eval_checkpoint_dir.mkdir(parents=True, exist_ok=True)\n if result_path is None:\n result_path = model_dir / 'results'\n config_file_bkp = \"pipeline.config\"\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n shutil.copyfile(config_path, str(model_dir / config_file_bkp))\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n class_names = list(input_cfg.class_names)\n #########################\n # Build Voxel Generator\n #########################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n #########################\n # Build Target Assigner\n #########################\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg, bv_range, box_coder)\n ######################\n # Build NetWork\n ######################\n center_limit_range = model_cfg.post_center_limit_range\n # net = second_builder.build(model_cfg, voxel_generator, target_assigner)\n net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)\n # print(\"We'll use\", torch.cuda.device_count(), \"GPUs!\")\n # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n # devices_ids = [0, 1, 2, 3]\n # net = nn.DataParallel(net, device_ids=devices_ids)\n net = net.cuda()\n # net_train = torch.nn.DataParallel(net).cuda()\n # for n, p in net.named_parameters():\n # print(n, p.shape)\n ######################\n # Build Optimizer\n ######################\n # we need global_step to create lr_scheduler, so restore net first.\n #pretrained_dict = torch.load(\"/nfs/nas/model/songhongli/neolix_3cls/voxelnet-419516.tckpt\")\n # pretrained_dict = torch.load(\"/nfs/nas/model/songhongli/neolix_shanghai_part1_init_net/voxelnet-1119644.tckpt\")\n # model_dict = net.state_dict()\n # print(model_dict.items())\n # pretrained_dict = {k : v for k, v in pretrained_dict.items() if k in model_dict}\n # model_dict.update(pretrained_dict)\n # model_dict['global_step'] = torch.Tensor([0]).to(pretrained_dict['global_step'].device)\n # net.load_state_dict(model_dict)\n # for param in net.parameters():\n # param.requires_grad = False\n # for param in net.rpn.conv_box.parameters():\n # param.requires_grad = True\n # for param in net.rpn.conv_cls.parameters():\n # param.requires_grad = True\n # for param in net.rpn.conv_dir_cls.parameters():\n # param.requires_grad = True\n # for i in net.named_parameters():\n # print(i)\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n # torchplus.train.try_restore_latest_checkpoints_multi_gpus(model_dir, [net.module])\n net.zero_global_step()\n gstep = net.get_global_step() - 1\n # gstep = net.module.get_global_step() - 1\n\n optimizer_cfg = train_cfg.optimizer\n if train_cfg.enable_mixed_precision:\n net.half()\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n\n optimizer = optimizer_builder.build(optimizer_cfg, net.parameters())\n # optimizer = optimizer_builder.build(optimizer_cfg, filter(lambda p: p.requires_grad, net.parameters()))\n if train_cfg.enable_mixed_precision:\n loss_scale = train_cfg.loss_scale_factor\n mixed_optimizer = torchplus.train.MixedPrecisionWrapper(optimizer, loss_scale)\n else:\n mixed_optimizer = optimizer\n # must restore optimizer AFTER using MixedPrecisionWrapper\n torchplus.train.try_restore_latest_checkpoints(model_dir, [mixed_optimizer]) # restore pretrained model\n # torchplus.train.try_restore_latest_checkpoints_multi_gpus(model_dir, [mixed_optimizer]) # restore pretrained model\n\n # gstep = -1 # restore pretrained model\n lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, optimizer, gstep)\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n ######################\n # Prepare Input\n ######################\n\n pruned_flag = False\n use_ragular = True\n if pruned_flag:\n total = 0\n for m in net.modules():\n if isinstance(m, nn.BatchNorm2d):\n total += m.weight.data.shape[0]\n\n bn = torch.zeros(total)\n index = 0\n for m in net.modules():\n if isinstance(m, nn.BatchNorm2d):\n size = m.weight.data.shape[0]\n bn[index:(index+size)] = m.weight.data.abs().clone()\n index += size\n\n y, i = torch.sort(bn)\n thre_index = int(total * percent)\n thre = y[thre_index].cpu().detach().numpy().tolist()\n print(\"thresh = :\", thre)\n pruned = 0\n cfg = []\n cfg_mask = []\n cfg_idx = []\n\n for k, m in enumerate(net.rpn.modules()):\n if isinstance(m, nn.BatchNorm2d):\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(thre).float().cuda()\n if use_ragular:\n sum_mask = torch.sum(mask)\n final = 8\n for i in range(7):\n if sum_mask <= ragular[i + 1] and sum_mask > ragular[i]:\n final = ragular[i + 1]\n a, b = torch.sort(weight_copy)\n th = a[-final]\n mask = weight_copy.ge(th).float().cuda()\n pruned = pruned + mask.shape[0] - torch.sum(mask)\n m.weight.data.mul_(mask)\n m.bias.data.mul_(mask)\n cfg.append(int(torch.sum(mask)))\n cfg_mask.append(mask.clone())\n cfg_idx.append(np.where(mask.cpu().detach().numpy() > 0))\n print('layer index: {:d} \\t total channel: {:d} \\t remaining channel: {:d}'.\n format(k, mask.shape[0], int(torch.sum(mask))))\n net_pruned = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size, cfg)\n net_pruned.cuda()\n layer_id_in_cfg = 0\n start_mask = torch.ones(64)\n end_mask = cfg_mask[layer_id_in_cfg]\n flag = False\n\n\n\n for [m0, m1] in zip(net.rpn.modules(), net_pruned.rpn.modules()):\n if isinstance(m0, nn.BatchNorm2d):\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n if idx1.size == 1:\n idx1 = np.resize(idx1,(1,))\n m1.weight.data = m0.weight.data[idx1.tolist()].clone()\n m1.bias.data = m0.bias.data[idx1.tolist()].clone()\n m1.running_mean = m0.running_mean[idx1.tolist()].clone()\n m1.running_var = m0.running_var[idx1.tolist()].clone()\n layer_id_in_cfg += 1\n if not flag:\n start_mask = end_mask.clone()\n else:\n start_mask = cfg_mask[layer_id_in_cfg - 2]\n if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC\n end_mask = cfg_mask[layer_id_in_cfg]\n elif isinstance(m0, nn.Conv2d):\n if layer_id_in_cfg >= len(cfg_mask):\n break\n idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w0 = m0.weight.data.clone()\n w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()\n w1 = w1[idx1.tolist(), :, :, :].clone()\n m1.weight.data = w1.clone()\n flag = False\n elif isinstance(m0, nn.ConvTranspose2d):\n idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w0 = m0.weight.data.clone()\n w1 = m0.weight.data[:, idx1.tolist(), :, :].clone()\n w1 = w1[idx0.tolist(), :, :, :].clone()\n m1.weight.data = w1.clone()\n flag = True\n\n cut_channel = np.where(torch.cat((cfg_mask[4], cfg_mask[11], cfg_mask[18]), dim = 0).cpu().detach().numpy().reshape(-1) > 0.5)\n net_pruned.rpn.conv_cls.weight.data = net.rpn.conv_cls.weight.data[:, np.array(cut_channel).reshape(-1), :, :].clone()\n net_pruned.rpn.conv_box.weight.data = net.rpn.conv_box.weight.data[:, np.array(cut_channel).reshape(-1), :, :].clone()\n net_pruned.rpn.conv_dir_cls.weight.data = net.rpn.conv_dir_cls.weight.data[:, np.array(cut_channel).reshape(-1), :, :].clone()\n net_pruned.rpn.conv_cls.bias.data = net.rpn.conv_cls.bias.data.clone()\n net_pruned.rpn.conv_box.bias.data = net.rpn.conv_box.bias.data.clone()\n net_pruned.rpn.conv_dir_cls.bias.data = net.rpn.conv_dir_cls.bias.data.clone()\n net.rpn = net_pruned.rpn\n\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n eval_dataset = input_reader_builder.build(\n eval_input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n\n def _worker_init_fn(worker_id):\n time_seed = np.array(time.time(), dtype=np.int32)\n np.random.seed(time_seed + worker_id)\n print(f\"WORKER {worker_id} seed:\", np.random.get_state()[1][0])\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=eval_input_cfg.batch_size,\n shuffle=False,\n num_workers=eval_input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n data_iter = iter(dataloader)\n\n ######################\n # Training\n ######################\n log_path = model_dir / 'log.txt'\n logf = open(log_path, 'a')\n logf.write(proto_str)\n logf.write(\"\\n\")\n summary_dir = model_dir / 'summary'\n summary_dir.mkdir(parents=True, exist_ok=True)\n writer = SummaryWriter(str(summary_dir))\n\n total_step_elapsed = 0\n # remain_steps = train_cfg.steps - net.get_global_step()\n remain_steps = train_cfg.steps - net.get_global_step()\n t = time.time()\n ckpt_start_time = t\n\n total_loop = train_cfg.steps // train_cfg.steps_per_eval + 1\n # total_loop = remain_steps // train_cfg.steps_per_eval + 1\n clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch\n\n if train_cfg.steps % train_cfg.steps_per_eval == 0:\n total_loop -= 1\n mixed_optimizer.zero_grad()\n try:\n for _ in range(total_loop):\n if total_step_elapsed + train_cfg.steps_per_eval > train_cfg.steps:\n steps = train_cfg.steps % train_cfg.steps_per_eval\n else:\n steps = train_cfg.steps_per_eval\n for step in range(steps):\n lr_scheduler.step()\n try:\n example = next(data_iter)\n except StopIteration:\n print(\"end epoch\")\n if clear_metrics_every_epoch:\n net.clear_metrics()\n data_iter = iter(dataloader)\n example = next(data_iter)\n example_torch = example_convert_to_torch(example, float_dtype)\n # [\"voxels\", \"num_points\", \"coordinates\", \"anchors\", \"anchors_mask\", \"labels\", \"reg_targets\", \"reg_weights\", \"pc_idx\"]\n batch_size = example[\"anchors\"].shape[0]\n\n example_tuple = list(example_torch.values())\n example_tuple[8] = torch.from_numpy(example_tuple[8]) # pc_idx\n\n # example_tuple[12] = torch.from_numpy(example_tuple[12]) # image_shape\n\n assert 9 == len(example_tuple), \"something write with training input size!\"\n\n # ret_dict = net(example_torch)\n\n # Training Input form example\n pillar_x = example_tuple[0][:,:,0].unsqueeze(0).unsqueeze(0)\n pillar_y = example_tuple[0][:,:,1].unsqueeze(0).unsqueeze(0)\n pillar_z = example_tuple[0][:,:,2].unsqueeze(0).unsqueeze(0)\n pillar_i = example_tuple[0][:,:,3].unsqueeze(0).unsqueeze(0)\n num_points_per_pillar = example_tuple[1].float().unsqueeze(0)\n\n ################################################################\n # Find distance of x, y, z from pillar center\n # assume config_file xyres_16.proto\n coors_x = example_tuple[2][:, 3].float()\n coors_y = example_tuple[2][:, 2].float()\n # self.x_offset = self.vx / 2 + pc_range[0]\n # self.y_offset = self.vy / 2 + pc_range[1]\n # this assumes xyres 20\n # x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1\n # y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9\n ################################################################\n\n # assumes xyres_16\n x_sub = coors_x.unsqueeze(1) * 0.16 - 22.96#+ 0.08 #annotate by shl: * voxel_size + min(x) + voxel_size/2\n y_sub = coors_y.unsqueeze(1) * 0.16 - 22.96#- 19.76\n ones = torch.ones([1, 100], dtype=torch.float32, device=pillar_x.device)\n x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)\n y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)\n\n num_points_for_a_pillar = pillar_x.size()[3]\n mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)\n mask = mask.permute(0, 2, 1)\n mask = mask.unsqueeze(1)\n mask = mask.type_as(pillar_x)\n\n coors = example_tuple[2]\n anchors = example_tuple[3]\n labels = example_tuple[5]\n reg_targets = example_tuple[6]\n\n input = [pillar_x, pillar_y, pillar_z, pillar_i, num_points_per_pillar,\n x_sub_shaped, y_sub_shaped, mask, coors, anchors, labels, reg_targets]\n\n ret_dict = net(input)\n sr = 0\n if sr > 0:\n for idx , m in enumerate(net.rpn.modules()):\n if isinstance(m, nn.BatchNorm2d):\n bn_module = m\n bn_module.weight.grad.data.add_(sr * torch.sign(bn_module.weight.data))\n assert 10 == len(ret_dict), \"something write with training output size!\"\n\n cls_preds = ret_dict[5]\n loss = ret_dict[0].mean()\n cls_loss_reduced = ret_dict[7].mean()\n loc_loss_reduced = ret_dict[8].mean()\n cls_pos_loss = ret_dict[3]\n cls_neg_loss = ret_dict[4]\n loc_loss = ret_dict[2]\n cls_loss = ret_dict[1]\n dir_loss_reduced = ret_dict[6]\n cared = ret_dict[9]\n labels = example_tuple[5]\n if train_cfg.enable_mixed_precision:\n loss *= loss_scale\n loss.backward()\n torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)\n mixed_optimizer.step()\n mixed_optimizer.zero_grad()\n net.update_global_step()\n net_metrics = net.update_metrics(cls_loss_reduced,\n loc_loss_reduced, cls_preds,\n labels, cared)\n\n step_time = (time.time() - t)\n t = time.time()\n metrics = {}\n num_pos = int((labels > 0)[0].float().sum().cpu().numpy())\n num_neg = int((labels == 0)[0].float().sum().cpu().numpy())\n # if 'anchors_mask' not in example_torch:\n # num_anchors = example_torch['anchors'].shape[1]\n # else:\n # num_anchors = int(example_torch['anchors_mask'][0].sum())\n num_anchors = int(example_tuple[4][0].sum())\n global_step = net.get_global_step()\n if global_step % display_step == 0:\n loc_loss_elem = [\n float(loc_loss[:, :, i].sum().detach().cpu().numpy() /\n batch_size) for i in range(loc_loss.shape[-1])\n ]\n metrics[\"step\"] = global_step\n metrics[\"steptime\"] = step_time\n metrics.update(net_metrics)\n metrics[\"loss\"] = {}\n metrics[\"loss\"][\"loc_elem\"] = loc_loss_elem\n metrics[\"loss\"][\"cls_pos_rt\"] = float(cls_pos_loss.detach().cpu().numpy())\n metrics[\"loss\"][\"cls_neg_rt\"] = float(cls_neg_loss.detach().cpu().numpy())\n # if unlabeled_training:\n # metrics[\"loss\"][\"diff_rt\"] = float(\n # diff_loc_loss_reduced.detach().cpu().numpy())\n if model_cfg.use_direction_classifier:\n metrics[\"loss\"][\"dir_rt\"] = float(dir_loss_reduced.detach().cpu().numpy())\n\n\n metrics[\"num_vox\"] = int(example_tuple[0].shape[0])\n metrics[\"num_pos\"] = int(num_pos)\n metrics[\"num_neg\"] = int(num_neg)\n metrics[\"num_anchors\"] = int(num_anchors)\n metrics[\"lr\"] = float(mixed_optimizer.param_groups[0]['lr'])\n metrics[\"pc_idx\"] = example_tuple[8][0]\n flatted_metrics = flat_nested_json_dict(metrics)\n flatted_summarys = flat_nested_json_dict(metrics, \"/\")\n for k, v in flatted_summarys.items():\n if isinstance(v, (list, tuple)):\n v = {str(i): e for i, e in enumerate(v)}\n writer.add_scalars(k, v, global_step)\n else:\n writer.add_scalar(k, v, global_step)\n metrics_str_list = []\n for k, v in flatted_metrics.items():\n if isinstance(v, float):\n metrics_str_list.append(f\"{k}={v:.3}\")\n elif isinstance(v, (list, tuple)):\n if v and isinstance(v[0], float):\n v_str = ', '.join([f\"{e:.3}\" for e in v])\n metrics_str_list.append(f\"{k}=[{v_str}]\")\n else:\n metrics_str_list.append(f\"{k}={v}\")\n else:\n metrics_str_list.append(f\"{k}={v}\")\n log_str = ', '.join(metrics_str_list)\n print(log_str, file=logf)\n print(log_str)\n ckpt_elasped_time = time.time() - ckpt_start_time\n if ckpt_elasped_time > train_cfg.save_checkpoints_secs:\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n ckpt_start_time = time.time()\n\n total_step_elapsed += steps\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n\n # Ensure that all evaluation points are saved forever\n torchplus.train.save_models(eval_checkpoint_dir, [net, optimizer], net.get_global_step(), max_to_keep=100)\n\n except Exception as e:\n # torchplus.train.save_models(model_dir, [net.module, optimizer], net.module.get_global_step())\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n logf.close()\n raise e\n # save model before exit\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n rpn_input = torch.ones([3, 64, 288, 288], dtype=torch.float32, device=pillar_x.device)\n torch.onnx.export(net_pruned.rpn, rpn_input, \"rpn_pruned.onnx\", verbose=True, input_names=[\"rpn_input_features\"], output_names=[\"box\", \"cls\", \"dir\"])\n logf.close()\n\n\ndef _predict_kitti_to_file(net,\n example,\n result_save_path,\n class_names,\n center_limit_range=None,\n lidar_input=False):\n # batch_image_shape = example['image_shape']\n # batch_imgidx = example['image_idx']\n\n\n ############################################\n ## copy from predict_kitti_to_anno\n # eval example : [0: 'voxels', 1: 'num_points', 2: 'coordinates', 3: 'rect'\n # 4: 'Trv2c', 5: 'P2', 6: 'anchors', 7: 'anchors_mask'\n # 8: 'image_idx', 9: 'image_shape']\n\n\n batch_image_shape = example[9]\n\n batch_imgidx = example[8]\n\n pillar_x = example[0][:, :, 0].unsqueeze(0).unsqueeze(0)\n pillar_y = example[0][:, :, 1].unsqueeze(0).unsqueeze(0)\n pillar_z = example[0][:, :, 2].unsqueeze(0).unsqueeze(0)\n pillar_i = example[0][:, :, 3].unsqueeze(0).unsqueeze(0)\n num_points_per_pillar = example[1].float().unsqueeze(0)\n\n # Find distance of x, y, and z from pillar center\n # assuming xyres_16.proto\n coors_x = example[2][:, 3].float()\n coors_y = example[2][:, 2].float()\n x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1\n y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9\n ones = torch.ones([1, 100], dtype=torch.float32, device=pillar_x.device)\n x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)\n y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)\n\n num_points_for_a_pillar = pillar_x.size()[3]\n mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)\n mask = mask.permute(0, 2, 1)\n mask = mask.unsqueeze(1)\n mask = mask.type_as(pillar_x)\n\n coors = example[2]\n anchors = example[6]\n anchors_mask = example[7]\n anchors_mask = torch.as_tensor(anchors_mask, dtype=torch.uint8, device=pillar_x.device)\n anchors_mask = anchors_mask.byte()\n rect = example[3]\n Trv2c = example[4]\n P2 = example[5]\n image_idx = example[8]\n\n input = [pillar_x, pillar_y, pillar_z, pillar_i,\n num_points_per_pillar, x_sub_shaped, y_sub_shaped,\n mask, coors, anchors, anchors_mask, rect, Trv2c, P2, image_idx]\n ######################################################\n\n predictions_dicts = net(input)\n # t = time.time()\n for i, preds_dict in enumerate(predictions_dicts):\n image_shape = batch_image_shape[i]\n img_idx = preds_dict[\"image_idx\"]\n if preds_dict[\"bbox\"] is not None:\n box_2d_preds = preds_dict[\"bbox\"].data.cpu().numpy()\n box_preds = preds_dict[\"box3d_camera\"].data.cpu().numpy()\n scores = preds_dict[\"scores\"].data.cpu().numpy()\n box_preds_lidar = preds_dict[\"box3d_lidar\"].data.cpu().numpy()\n # write pred to file\n box_preds = box_preds[:, [0, 1, 2, 4, 5, 3, 6]] # lhw->hwl(label file format)\n label_preds = preds_dict[\"label_preds\"].data.cpu().numpy()\n # label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)\n result_lines = []\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n if not lidar_input:\n if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:\n continue\n if bbox[2] < 0 or bbox[3] < 0:\n continue\n # print(img_shape)\n if center_limit_range is not None:\n limit_range = np.array(center_limit_range)\n if (np.any(box_lidar[:3] < limit_range[:3])\n or np.any(box_lidar[:3] > limit_range[3:])):\n continue\n bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n result_dict = {\n 'name': class_names[int(label)],\n 'alpha': -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6],\n 'bbox': bbox,\n 'location': box[:3],\n 'dimensions': box[3:6],\n 'rotation_y': box[6],\n 'score': score,\n }\n result_line = kitti.kitti_result_line(result_dict)\n result_lines.append(result_line)\n else:\n result_lines = []\n result_file = f\"{result_save_path}/{kitti.get_image_index_str(img_idx)}.txt\"\n result_str = '\\n'.join(result_lines)\n with open(result_file, 'w') as f:\n f.write(result_str)\n\ndataid = -1\n# aka = 0\ndef predict_kitti_to_anno(net,\n example,\n class_names,\n center_limit_range=None,\n lidar_input=False,\n global_set=None):\n\n # eval example : [0: 'voxels', 1: 'num_points', 2: 'coordinates', 3: 'rect'\n # 4: 'Trv2c', 5: 'P2', 6: 'anchors', 7: 'anchors_mask'\n # 8: 'image_idx', 9: 'image_shape']\n\n # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',\n # 4: 'anchor_mask', 5: 'pc_idx']\n\n # batch_image_shape = example[9]\n\n # batch_imgidx = example[8]\n\n pillar_x = example[0][:, :, 0].unsqueeze(0).unsqueeze(0)\n pillar_y = example[0][:, :, 1].unsqueeze(0).unsqueeze(0)\n pillar_z = example[0][:, :, 2].unsqueeze(0).unsqueeze(0)\n pillar_i = example[0][:, :, 3].unsqueeze(0).unsqueeze(0)\n num_points_per_pillar = example[1].float().unsqueeze(0)\n\n # Find distance of x, y, and z from pillar center\n # assuming xyres_16.proto\n coors_x = example[2][:, 3].float()\n coors_y = example[2][:, 2].float()\n x_sub = coors_x.unsqueeze(1) * 0.16 -22.96 #+ 0.08#-22.96#+ 0.08#-22.96#-19.76\n y_sub = coors_y.unsqueeze(1) * 0.16 -22.96#- 19.76 #-22.96#-19.76#-22.96#-19.76\n ones = torch.ones([1, 100], dtype=torch.float32, device=pillar_x.device)\n # global aka\n # aka+= 1\n # print(aka)\n # from IPython import embed\n \n # embed()\n x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)\n y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)\n\n num_points_for_a_pillar = pillar_x.size()[3]\n mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)\n mask = mask.permute(0, 2, 1)\n mask = mask.unsqueeze(1)\n mask = mask.type_as(pillar_x)\n\n coors = example[2]\n anchors = example[3]\n anchors_mask = example[4]\n anchors_mask = torch.as_tensor(anchors_mask, dtype=torch.uint8, device=pillar_x.device)\n anchors_mask = anchors_mask.byte()\n # rect = example[3]\n # Trv2c = example[4]\n # P2 = example[5]\n pc_idx = example[5]\n\n input = [pillar_x, pillar_y, pillar_z, pillar_i,\n num_points_per_pillar, x_sub_shaped, y_sub_shaped,\n mask, coors, anchors, anchors_mask, pc_idx]\n predictions_dicts = net(input)\n # lidar_box, final_score, label_preds, pc_idx\n\n annos = []\n for i, preds_dict in enumerate(predictions_dicts):\n # image_shape = batch_image_shape[i]\n pc_idx = preds_dict[3]\n\n if preds_dict[0] is not None: # bbox list\n # box_2d_preds = preds_dict[0].detach().cpu().numpy() # bbox\n # box_preds = preds_dict[1].detach().cpu().numpy() # bbox3d_camera\n scores = preds_dict[1].detach().cpu().numpy() # scores\n box_preds_lidar = preds_dict[0].detach().cpu().numpy() # box3d_lidar\n # write pred to file\n label_preds = preds_dict[2].detach().cpu().numpy() # label_preds\n\n anno = kitti.get_start_result_anno()\n num_example = 0\n content = ''\n for box_lidar, score, label in zip(\n box_preds_lidar, scores, label_preds):\n # if not lidar_input:\n # if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:\n # continue\n # if bbox[2] < 0 or bbox[3] < 0:\n # continue\n # print(img_shape)\n if center_limit_range is not None:\n limit_range = np.array(center_limit_range)\n if (np.any(box_lidar[:3] < limit_range[:3])\n or np.any(box_lidar[:3] > limit_range[3:])):\n continue\n # image_shape = [image_shape[0], image_shape[1]]\n # bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n # bbox[:2] = np.maximum(bbox[:2], [0, 0])\n content += str(label) + \" 0.0 0 0.0 0.0 0.0 0.0 0.0 \" + str(box_lidar[5]) + \" \" + str(box_lidar[3]) + \" \"\\\n + str(box_lidar[4]) + \" \" + str(box_lidar[0]) + \" \" + str(box_lidar[1]) + \" \" + str(box_lidar[2]) + \" \" + str(box_lidar[6]) + \" \" + str(score) + \"\\n\"\n anno[\"name\"].append(class_names[int(label)])\n anno[\"truncated\"].append(0.0)\n anno[\"occluded\"].append(0)\n anno[\"alpha\"].append(-np.arctan2(-box_lidar[1], box_lidar[0]) +\n box_lidar[6])\n anno[\"bbox\"].append(np.array([0, 0, 0, 0]))\n anno[\"dimensions\"].append([box_lidar[4], box_lidar[5], box_lidar[3]]) # annotate by shl\n # anno[\"dimensions\"].append(box_lidar[3:6])\n anno[\"location\"].append(box_lidar[:3])\n anno[\"rotation_y\"].append(box_lidar[6])\n if global_set is not None:\n for i in range(100000):\n if score in global_set:\n score -= 1 / 100000\n else:\n global_set.add(score)\n break\n anno[\"score\"].append(score)\n\n num_example += 1\n content = content.strip()\n global dataid\n dataid += 1\n # print(\"content\", content)\n with open(\"./pre_test/%06d.txt\" % pc_idx, 'w') as f:\n f.write(content)\n if num_example != 0:\n anno = {n: np.stack(v) for n, v in anno.items()}\n annos.append(anno)\n else:\n annos.append(kitti.empty_result_anno())\n else:\n annos.append(kitti.empty_result_anno())\n num_example = annos[-1][\"name\"].shape[0]\n annos[-1][\"pc_idx\"] = np.array(\n [pc_idx] * num_example, dtype=np.int64)\n return annos\n\nragular = [8, 16, 32, 64, 128, 256, 512, 1024]\ndef evaluate(config_path,\n model_dir,\n result_path=None,\n predict_test=False,\n ckpt_path=None,\n ref_detfile=None,\n pickle_result=True,\n read_predict_pkl_path=None):\n\n model_dir = str(Path(model_dir).resolve())\n if predict_test:\n result_name = 'predict_test'\n else:\n result_name = 'eval_results'\n if result_path is None:\n model_dir = Path(model_dir)\n result_path = model_dir / result_name\n else:\n result_path = pathlib.Path(result_path)\n\n if isinstance(config_path, str):\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n class_names = list(input_cfg.class_names)\n center_limit_range = model_cfg.post_center_limit_range\n #########################\n # Build Voxel Generator\n #########################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n\n net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)\n net.cuda()\n if train_cfg.enable_mixed_precision:\n net.half()\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n\n if ckpt_path is None:\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n else:\n torchplus.train.restore(ckpt_path, net)\n\n eval_dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n # print(net.rpn.deconv1[1].weight.data)\n pruned_flag = True\n use_ragular = True\n if pruned_flag:\n percent = 0.5\n total = 0\n for m in net.modules():\n if isinstance(m, nn.BatchNorm2d):\n total += m.weight.data.shape[0]\n\n bn = torch.zeros(total)\n index = 0\n for m in net.modules():\n if isinstance(m, nn.BatchNorm2d):\n size = m.weight.data.shape[0]\n bn[index:(index+size)] = m.weight.data.abs().clone()\n index += size\n\n y, i = torch.sort(bn)\n thre_index = int(total * percent)\n thre = y[thre_index].cpu().detach().numpy().tolist()\n print(\"thresh = :\", thre)\n pruned = 0\n cfg = []\n cfg_mask = []\n cfg_idx = []\n for k, m in enumerate(net.rpn.modules()):\n if isinstance(m, nn.BatchNorm2d):\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(thre).float().cuda()\n if use_ragular:\n sum_mask = torch.sum(mask)\n final = 8\n for i in range(7):\n if sum_mask <= ragular[i + 1] and sum_mask > ragular[i]:\n final = ragular[i + 1]\n a, b = torch.sort(weight_copy)\n th = a[-final]\n mask = weight_copy.ge(th).float().cuda()\n pruned = pruned + mask.shape[0] - torch.sum(mask)\n m.weight.data.mul_(mask)\n m.bias.data.mul_(mask)\n cfg.append(int(torch.sum(mask)))\n cfg_mask.append(mask.clone())\n cfg_idx.append(np.where(mask.cpu().detach().numpy() > 0))\n print('layer index: {:d} \\t total channel: {:d} \\t remaining channel: {:d}'.\n format(k, mask.shape[0], int(torch.sum(mask))))\n net_pruned = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size, cfg)\n net_pruned.cuda()\n layer_id_in_cfg = 0\n start_mask = torch.ones(64)\n end_mask = cfg_mask[layer_id_in_cfg]\n flag = False\n for [m0, m1] in zip(net.rpn.modules(), net_pruned.rpn.modules()):\n if isinstance(m0, nn.BatchNorm2d):\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n if idx1.size == 1:\n idx1 = np.resize(idx1,(1,))\n m1.weight.data = m0.weight.data[idx1.tolist()].clone()\n m1.bias.data = m0.bias.data[idx1.tolist()].clone()\n m1.running_mean = m0.running_mean[idx1.tolist()].clone()\n m1.running_var = m0.running_var[idx1.tolist()].clone()\n layer_id_in_cfg += 1\n if not flag:\n start_mask = end_mask.clone()\n else:\n start_mask = cfg_mask[layer_id_in_cfg - 2]\n if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC\n end_mask = cfg_mask[layer_id_in_cfg]\n elif isinstance(m0, nn.Conv2d):\n if layer_id_in_cfg >= len(cfg_mask):\n break\n idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w0 = m0.weight.data.clone()\n w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()\n w1 = w1[idx1.tolist(), :, :, :].clone()\n m1.weight.data = w1.clone()\n flag = False\n elif isinstance(m0, nn.ConvTranspose2d):\n idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w0 = m0.weight.data.clone()\n w1 = m0.weight.data[:, idx1.tolist(), :, :].clone()\n w1 = w1[idx0.tolist(), :, :, :].clone()\n m1.weight.data = w1.clone()\n flag = True\n cut_channel = np.where(torch.cat((cfg_mask[4], cfg_mask[11], cfg_mask[18]), dim = 0).cpu().detach().numpy().reshape(-1) > 0.5)\n net_pruned.rpn.conv_cls.weight.data = net.rpn.conv_cls.weight.data[:, np.array(cut_channel).reshape(-1), :, :].clone()\n net_pruned.rpn.conv_box.weight.data = net.rpn.conv_box.weight.data[:, np.array(cut_channel).reshape(-1), :, :].clone()\n net_pruned.rpn.conv_dir_cls.weight.data = net.rpn.conv_dir_cls.weight.data[:, np.array(cut_channel).reshape(-1), :, :].clone()\n net_pruned.rpn.conv_cls.bias.data = net.rpn.conv_cls.bias.data.clone()\n net_pruned.rpn.conv_box.bias.data = net.rpn.conv_box.bias.data.clone()\n net_pruned.rpn.conv_dir_cls.bias.data = net.rpn.conv_dir_cls.bias.data.clone()\n net.rpn = net_pruned.rpn\n del(net_pruned)\n \n rpn_input = torch.ones([1, 64, 288, 288], dtype=torch.float32).cuda()\n if read_predict_pkl_path is not None:\n gt_annos = [info[\"annos\"] for info in eval_dataset.dataset.kitti_infos]\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n with open(read_predict_pkl_path + \"result.pkl\", 'rb') as f:\n dt_annos = pickle.load(f)\n # with open(\"/nfs/nas/model/songhongli/neolix_shanghai_3026/eval_results/step_264000/\" + \"result.pkl\", 'rb') as f:\n # dt_annos = pickle.load(f)\n result = get_official_eval_result(gt_annos, dt_annos, class_names)\n print(result)\n else:\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=input_cfg.batch_size,\n shuffle=False,\n num_workers=input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n net.eval()\n # from IPython import embed\n # embed()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n t = time.time()\n dt_annos = []\n global_set = None\n print(\"Generate output labels...\")\n bar = ProgressBar()\n bar.start(len(eval_dataset) // input_cfg.batch_size + 1)\n\n for example in iter(eval_dataloader):\n # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinates', 3: 'rect'\n # 4: 'Trv2c', 5: 'P2', 6: 'anchors', 7: 'anchors_mask'\n # 8: 'image_idx', 9: 'image_shape']\n\n # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',\n # 4: 'anchor_mask', 5: 'pc_idx']\n example = example_convert_to_torch(example, float_dtype)\n # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',\n # 4: 'anchor_mask', 5: 'pc_idx']\n\n example_tuple = list(example.values())\n example_tuple[5] = torch.from_numpy(example_tuple[5])\n # example_tuple[9] = torch.from_numpy(example_tuple[9])\n\n if (example_tuple[3].size()[0] != input_cfg.batch_size):\n continue\n\n if pickle_result:\n dt_annos += predict_kitti_to_anno(\n net, example_tuple, class_names, center_limit_range,\n model_cfg.lidar_input, global_set)\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(dt_annos, f)\n\n else:\n _predict_kitti_to_file(net, example_tuple, result_path_step, class_names,\n center_limit_range, model_cfg.lidar_input)\n bar.print_bar()\n\n sec_per_example = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_example:.2f}/s). start eval:')\n\n print(f\"avg forward time per example: {net.avg_forward_time:.3f}\")\n print(f\"avg postprocess time per example: {net.avg_postprocess_time:.3f}\")\n if not predict_test:\n gt_annos = [info[\"annos\"] for info in eval_dataset.dataset.kitti_infos]\n # if (len(gt_annos)%2 != 0):\n # del gt_annos[-1]\n if not pickle_result:\n dt_annos = kitti.get_label_annos(result_path_step)\n result = get_official_eval_result(gt_annos, dt_annos, class_names)\n print(result)\n # result = get_coco_eval_result(gt_annos, dt_annos, class_names)\n # print(result)\n # if pickle_result:\n # with open(result_path_step / \"result.pkl\", 'wb') as f:\n # pickle.dump(dt_annos, f)\n\n\ndef export_onnx(net, example, class_names,\n center_limit_range=None, lidar_input=False, global_set=None):\n\n # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',\n # 4: 'anchor_mask', 5: 'pc_idx']\n pillar_x = example[0][:,:,0].unsqueeze(0).unsqueeze(0)\n pillar_y = example[0][:,:,1].unsqueeze(0).unsqueeze(0)\n pillar_z = example[0][:,:,2].unsqueeze(0).unsqueeze(0)\n pillar_i = example[0][:,:,3].unsqueeze(0).unsqueeze(0)\n num_points_per_pillar = example[1].float().unsqueeze(0)\n\n # Find distance of x, y, and z from pillar center\n # assuming xyres_16.proto\n coors_x = example[2][:, 3].float()\n coors_y = example[2][:, 2].float()\n x_sub = coors_x.unsqueeze(1) * 0.16 - 22.96\n y_sub = coors_y.unsqueeze(1) * 0.16 - 22.96\n ones = torch.ones([1, 100],dtype=torch.float32, device=pillar_x.device)\n x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)\n y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)\n\n num_points_for_a_pillar = pillar_x.size()[3]\n mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)\n mask = mask.permute(0, 2, 1)\n mask = mask.unsqueeze(1)\n mask = mask.type_as(pillar_x)\n\n coors = example[2]\n\n #######################################################\n anchors = example[3]\n anchors_mask = example[4]\n anchors_mask = torch.as_tensor(anchors_mask, dtype=torch.uint8, device=pillar_x.device)\n anchors_mask = anchors_mask.byte()\n # rect = example[3]\n # Trv2c = example[4]\n # P2 = example[5]\n pc_idx = example[5]\n\n # voxels:10715\n # spatial_features torch.Size([1, 64, 288, 288])\n\n\n #######################################################\n\n print(pillar_x.size())\n print(pillar_y.size())\n print(pillar_z.size())\n print(pillar_i.size())\n print(num_points_per_pillar.size())\n print(x_sub_shaped.size())\n print(y_sub_shaped.size())\n print(mask.size())\n\n print(coors.size())\n print(anchors.size())\n print(anchors_mask.size())\n # print(rect.size())\n # print(Trv2c.size())\n # print(P2.size())\n\n\n input_names = [\"pillar_x\", \"pillar_y\", \"pillar_z\", \"pillar_i\",\n \"num_points_per_pillar\", \"x_sub_shaped\", \"y_sub_shaped\", \"mask\", \"coors\"]\n\n # Wierd Convloution\n pillar_x = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n pillar_y = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n pillar_z = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n pillar_i = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n num_points_per_pillar = torch.ones([1, 10000], dtype=torch.float32, device=pillar_x.device)\n x_sub_shaped = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n y_sub_shaped = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n mask = torch.ones([1, 1, 10000, 100], dtype=torch.float32, device=pillar_x.device)\n\n device = torch.device(\"cuda:0\")\n coors_numpy = np.loadtxt('./onnx_predict_outputs/coors_batch_size_3.txt', dtype=np.int32)\n coors = torch.from_numpy(coors_numpy)\n coors = coors.to(device)\n\n # De-Convolution\n # pillar_x = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )\n # pillar_y = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )\n # pillar_z = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )\n # pillar_i = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )\n # num_points_per_pillar = torch.ones([1, 8599],dtype=torch.float32, device=pillar_x.device )\n # x_sub_shaped = torch.ones([1, 100,8599, 1],dtype=torch.float32, device=pillar_x.device )\n # y_sub_shaped = torch.ones([1, 100,8599, 1],dtype=torch.float32, device=pillar_x.device )\n # mask = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )\n\n # example1 = [pillar_x, pillar_y, pillar_z, pillar_i,\n # num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask]\n ##########################################################################\n example1 = [pillar_x, pillar_y, pillar_z, pillar_i,\n num_points_per_pillar, x_sub_shaped, y_sub_shaped,\n mask]\n ##########################################################################\n\n # print('-------------- network readable visiual --------------')\n # example2 = [torch.ones([10000, 64], dtype=torch.float32, device=pillar_x.device), coors]\n # torch.onnx.export(net.middle_feature_extractor, example2, \"scatter.onnx\")\n # # torch.onnx.export(net, example1, \"net.onnx\", verbose=True, input_names=input_names,\n # # output_names=[\"box\", \"cls\", \"dir\"])\n # print('net.onnx transfer success ...')\n\n print('-------------- network readable visiual --------------')\n torch.onnx.export(net.voxel_feature_extractor, example1, \"pfe.onnx\", verbose=True, input_names=input_names,\n output_names=[\"rpn_input_features\"])\n print('pfe.onnx transfer success ...')\n\n rpn_input = torch.ones([1, 64, 288, 288], dtype=torch.float32, device=pillar_x.device)\n torch.onnx.export(net.rpn, rpn_input, \"rpn.onnx\", verbose=True, input_names=[\"rpn_input_features\"],\n output_names=[\"box\", \"cls\", \"dir\"])\n print('rpn.onnx transfer success ...')\n\n return 0\n\n\ndef onnx_model_generate(config_path,\n model_dir,\n result_path=None,\n predict_test=False,\n ckpt_path=None\n ):\n model_dir = pathlib.Path(model_dir)\n if predict_test:\n result_name = 'predict_test'\n else:\n result_name = 'eval_results'\n if result_path is None:\n result_path = model_dir / result_name\n else:\n result_path = pathlib.Path(result_path)\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n class_names = list(input_cfg.class_names)\n center_limit_range = model_cfg.post_center_limit_range\n\n ##########################\n ## Build Voxel Generator\n ##########################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n\n\n net = second_builder.build(model_cfg, voxel_generator, target_assigner, 1)\n net.cuda()\n if train_cfg.enable_mixed_precision:\n net.half()\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n\n if ckpt_path is None:\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n else:\n torchplus.train.restore(ckpt_path, net)\n\n eval_dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=1,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n\n dt_annos = []\n global_set = None\n print(\"Generate output labels...\")\n bar = ProgressBar()\n bar.start(len(eval_dataset) // input_cfg.batch_size + 1)\n\n for example in iter(eval_dataloader):\n example = example_convert_to_torch(example, float_dtype)\n example_tuple = list(example.values())\n # batch_image_shape = example_tuple[8]\n example_tuple[5] = torch.from_numpy(example_tuple[5])\n # example_tuple[9] = torch.from_numpy(example_tuple[9])\n # torch.onnx.export(net, example_tuple, \"./tst_onnxnet.onnx\", verbose = False)\n\n dt_annos = export_onnx(\n net, example_tuple, class_names, center_limit_range,\n model_cfg.lidar_input, global_set)\n return 0\n bar.print_bar()\n\nif __name__ == '__main__':\n fire.Fire()\n\n"
]
| [
[
"numpy.concatenate",
"numpy.logical_not",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.sum",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__",
"numpy.ones",
"numpy.round",
"numpy.logical_and",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.random.randint",
"numpy.fromfile",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry",
"numpy.linalg.inv",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy"
],
[
"torch.cat",
"numpy.minimum",
"torch.ones",
"numpy.resize",
"torch.sum",
"torch.unsqueeze",
"torch.utils.data.DataLoader",
"torch.as_tensor",
"torch.zeros",
"torch.device",
"numpy.array",
"torch.mm",
"numpy.loadtxt",
"numpy.stack",
"numpy.arctan2",
"torch.sort",
"torch.arange",
"numpy.random.seed",
"torch.sign",
"torch.from_numpy",
"numpy.any",
"torch.onnx.export",
"numpy.random.get_state",
"numpy.maximum"
]
]
|
yolande-prog/pytorch-for-information-extraction | [
"caa3c4d9b3f3a0ddc329819c5638826810c717ec"
]
| [
"code/modules/extraction/coordinates.py"
]
| [
"import math\nimport os\n\nimport numpy as np\n\nimport utils\n\n\ndef denormalise_point(normalised_point, lengh):\n if math.floor(normalised_point * lengh) <= 0:\n return math.ceil(normalised_point * lengh)\n if math.ceil(normalised_point * lengh) >= lengh:\n return math.floor(normalised_point * lengh)\n else:\n return round(normalised_point * lengh)\n\n\ndef compute_relative_coordinates(annotation_folder, ocr_parameters_url='ocr_parameters.json'):\n def is_annotation_file(annotation_file):\n return annotation_file.__contains__('.json') and not annotation_file == ocr_parameters_url\n\n annotation_files = [annotation_file for annotation_file in os.listdir(annotation_folder) if\n is_annotation_file(annotation_file)]\n annotations = [utils.load_json(os.path.join(annotation_folder, annotation_file)) for annotation_file in\n annotation_files]\n ocr_parameters = utils.load_json(os.path.join(annotation_folder, ocr_parameters_url))\n ocr_coordinates = dict()\n for annotation in annotations:\n width = annotation['imageWidth']\n height = annotation['imageHeight']\n for shape in annotation['shapes']:\n if not ocr_coordinates.__contains__(shape['label']): ocr_coordinates[shape['label']] = []\n if shape['shape_type'] == 'rectangle':\n (x1, y1), (x2, y2) = shape['points']\n x1, y1, x2, y2 = x1 / width, y1 / height, x2 / width, y2 / height\n ocr_coordinates[shape['label']].append([x1, y1, x2, y2])\n for field in ocr_coordinates.keys():\n if not ocr_parameters.__contains__(field): ocr_parameters[field] = dict()\n ocr_parameters[field]['cords'] = np.average(ocr_coordinates[field], axis=0).tolist()\n utils.dump_json(ocr_parameters, os.path.join(annotation_folder, ocr_parameters_url))\n return ocr_coordinates\n\n\nif __name__ == '__main__':\n pass\n"
]
| [
[
"numpy.average"
]
]
|
DVA-Project-Sp22/dva-project | [
"5c3aeb5306e78b3cdd96759a25c3f4096673e289"
]
| [
"api/api-generateplaylist/lambda_function.py"
]
| [
"import json\nimport awswrangler as wr\nimport numpy as np\n\n\ndef lambda_handler(event, context):\n # TODO implement\n\n def song_recommender(user_input, rec_count=10):\n t = tuple(user_input)\n query = f\"select distinct cluster_id from clusteredsongs where track_id IN {t}\"\n\n user_input_idx = wr.athena.read_sql_query(\n sql=query, database=\"millionsongdataset\"\n ).values.tolist()\n\n dist_matrix = wr.athena.read_sql_query(\n sql=\"select * from distances\", database=\"millionsongdataset\"\n ).to_numpy()\n\n user_songs = []\n\n for j in user_input_idx:\n user_song_dist = dist_matrix[j].flatten(order=\"C\")\n user_songs.append(user_song_dist)\n\n user_songs_mean = np.mean(np.array(user_songs), axis=0)\n\n ind = np.argpartition(user_songs_mean, -10)[-10:]\n\n cluster_density = wr.athena.read_sql_query(\n sql=\"select cluster_id, count(*) as count from clusteredsongs group by cluster_id\",\n database=\"millionsongdataset\",\n )\n\n vals = 1\n for i in ind:\n if int(cluster_density[cluster_density[\"cluster_id\"] == i][\"count\"]) < 10:\n vals += 1\n else:\n break\n\n selected_clusters = list(ind[0:vals])\n\n tsc = tuple(selected_clusters)\n\n if len(tsc) == 1:\n result_query = f\"\"\"\n SELECT track_id\n , song_title\n , artist_name\n , spotify_id\n , 0 as duration\n , loudness\n , tempo\n , artist_familiarity\n , 0 as artist_hotness\n FROM clusteredsongs\n WHERE cluster_id = {tsc[0]}\n \"\"\"\n else:\n result_query = f\"\"\"\n SELECT track_id\n , song_title\n , artist_name\n , spotify_id\n , 0 as duration\n , loudness\n , tempo\n , artist_familiarity\n , 0 as artist_hotness\n FROM clusteredsongs\n WHERE cluster_id in {tsc}\n \"\"\"\n\n result = wr.athena.read_sql_query(\n sql=result_query, database=\"millionsongdataset\"\n ).head(10)\n\n return result.to_json(orient=\"records\")\n\n dl = json.loads(event[\"body\"])[\"dislikedSongs\"]\n\n # dl = [\"TRSVZFO128F4294EAA\",\"TRSVZGB128EF34463A\",\"TRSVZGB128EF34463A\"]\n\n parsed = json.loads(song_recommender(dl))\n x = {\"request\": event, \"songs\": parsed, \"disliked\": dl}\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(x),\n }\n\n"
]
| [
[
"numpy.array",
"numpy.argpartition"
]
]
|
NeilGirdhar/tikzify | [
"5de296c118188e532788234971de387f9fe1416e"
]
| [
"tikzify/node_graph/constraints.py"
]
| [
"from __future__ import annotations\n\nimport itertools as it\nfrom typing import Any, Iterable, List, Optional, Reversible, Sequence, Tuple, Union\n\nimport numpy as np\n\n__all__ = ['Constraints']\n\n\nclass Constraints:\n \"\"\"\n This class transforms a set of constraints into two-dimensional positions\n that best satisfy those constraints.\n \"\"\"\n\n def __init__(self, labels: Sequence[str]):\n self.labels = labels\n self.a = np.zeros((0, 2 * len(labels)))\n self.b = np.zeros((0,))\n self.solution: Optional[np.ndarray[Any, Any]] = None\n\n # Properties --------------------------------------------------------------\n @property\n def num_labels(self) -> int:\n return len(self.labels)\n\n @property\n def num_constraints(self) -> int:\n return self.b.shape[0]\n\n # New methods -------------------------------------------------------------\n def add_constraint(self, a: np.ndarray[Any, Any], b: float) -> None:\n self.a = np.vstack((self.a, a.reshape((1, 2 * self.num_labels))))\n self.b = np.hstack((self.b, b))\n\n def blank(self) -> np.ndarray[Any, Any]:\n return np.zeros((2 * self.num_labels,))\n\n def index(self, coord: str, label: str) -> int:\n return self.labels.index(label) + (self.num_labels if coord == 'y' else 0)\n\n def set_value(self, coord: str, c: str, value: float = 0.0) -> None:\n a = self.blank()\n a[self.index(coord, c)] = 1.0\n self.add_constraint(a, value)\n\n def set_delta(self, coord: str, *args: str, delta: Union[float, Iterable[float]] = 0.0) -> None:\n if len(args) <= 1:\n raise ValueError\n deltas: Iterable[float]\n if isinstance(delta, float):\n deltas = it.repeat(delta)\n else:\n deltas = delta\n for x, y, this_delta in zip(args, args[1:], deltas):\n a = self.blank()\n a[self.index(coord, x)] = 1.0\n a[self.index(coord, y)] = -1.0\n self.add_constraint(a, this_delta)\n\n def set_deltas(self,\n *args: str,\n delta: Union[Tuple[float, float],\n List[Tuple[float, float]]] = (0.0, 0.0)) -> None:\n deltas: Sequence[Tuple[float, float]]\n if isinstance(delta, tuple):\n deltas = [delta for _ in range(len(args))]\n else:\n deltas = delta\n self.set_delta_x(*args, delta_x=[x for x, y in deltas])\n self.set_delta_y(*args, delta_y=[y for x, y in deltas])\n\n def set_x(self, c: str, x: float) -> None:\n self.set_value('x', c, x)\n\n def set_y(self, c: str, y: float) -> None:\n self.set_value('y', c, y)\n\n def set_delta_x(self, *args: str, delta_x: Union[Reversible[float], float] = 0.0) -> None:\n \"\"\"\n Stack each of the args horizontally, spaced by delta_x, from left to\n right.\n \"\"\"\n if not isinstance(delta_x, float):\n delta_x = list(reversed(delta_x))\n self.set_delta('x', *reversed(args), delta=delta_x)\n\n def set_delta_y(self, *args: str, delta_y: Union[Sequence[float], float] = 0.0) -> None:\n \"\"\"\n Stack each of the args vertically, spaced by delta_y, from top to\n bottom.\n \"\"\"\n self.set_delta('y', *args, delta=delta_y)\n\n def set_location(self, c: str, location: Tuple[float, float]) -> None:\n \"Set c.x, c.y = location.\"\n if len(location) != 2:\n raise ValueError\n self.set_x(c, location[0])\n self.set_y(c, location[1])\n\n def set_between(self,\n coord: str,\n *items: str,\n ratios: Optional[Sequence[float]] = None) -> None:\n \"\"\"\n Space n items with spacing according to (n-1) ratios.\n E.g., ratios=[0, 1] puts item[1] at item[0]\n fraction = (d-c)/(d-c + (e-d)) => (d-c) = (e-c) * fraction\n \"\"\"\n if ratios is None:\n ratios = [1] * (len(items) - 1)\n for c, d, e, r1, r2 in zip(items, items[1:], items[2:], ratios, ratios[1:]):\n fraction = r1 / (r1 + r2)\n a = self.blank()\n if c is not None:\n a[self.index(coord, c)] += fraction - 1.0\n if d is not None:\n a[self.index(coord, d)] += 1.0\n if e is not None:\n a[self.index(coord, e)] -= fraction\n self.add_constraint(a, 0.0)\n\n def set_x_between(self, *items: str, ratios: Optional[Sequence[float]] = None) -> None:\n self.set_between('x', *items, ratios=ratios)\n\n def set_y_between(self, *items: str, ratios: Optional[Sequence[float]] = None) -> None:\n self.set_between('y', *items, ratios=ratios)\n\n def set_deltas_equal(self, c: str, d: str, e: str, f: str) -> None:\n self.set_delta_x_equal(c, d, e, f)\n self.set_delta_y_equal(c, d, e, f)\n\n def set_delta_equal(self, coord: str, c: str, d: str, e: str, f: str) -> None:\n \"Set c - d = e - f.\"\n a = self.blank()\n if c is not None:\n a[self.index(coord, c)] += 1.0\n if d is not None:\n a[self.index(coord, d)] -= 1.0\n if e is not None:\n a[self.index(coord, e)] -= 1.0\n if f is not None:\n a[self.index(coord, f)] += 1.0\n self.add_constraint(a, 0.0)\n\n def set_delta_x_equal(self, c: str, d: str, e: str, f: str) -> None:\n self.set_delta_equal('x', c, d, e, f)\n\n def set_delta_y_equal(self, c: str, d: str, e: str, f: str) -> None:\n self.set_delta_equal('y', c, d, e, f)\n\n def set_slope(self, slope: float, *args: str) -> None:\n if len(args) < 2:\n raise ValueError\n for c, d in zip(args, args[1:]):\n a = self.blank()\n a[self.index('x', c)] += slope\n a[self.index('x', d)] -= slope\n a[self.index('y', c)] -= 1.0\n a[self.index('y', d)] += 1.0\n self.add_constraint(a, 0.0)\n\n def solve(self, decimals: int = 6) -> None:\n solution, _, rank, _ = np.linalg.lstsq(self.a, self.b, rcond=None) # type: ignore\n # solution = np.linalg.solve(self.a, self.b)\n if rank != 2 * self.num_labels:\n raise Constraints.InsufficientConstraints(\n f\"Only {rank} constraints provided for a problem that needs \"\n + f\"{2 * self.num_labels}\")\n solution = np.around(solution, decimals=decimals)\n self.solution = np.where(np.signbit(solution) & (solution == 0.0), -solution, solution)\n\n def solved(self, c: str) -> np.ndarray[Any, Any]:\n if self.solution is None:\n raise ValueError\n return self.solution[[self.index('x', c), self.index('y', c)]]\n\n # Exceptions --------------------------------------------------------------\n class InsufficientConstraints(Exception):\n pass\n"
]
| [
[
"numpy.zeros",
"numpy.linalg.lstsq",
"numpy.hstack",
"numpy.around",
"numpy.signbit"
]
]
|
dbstein/pybie2d | [
"1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58"
]
| [
"pybie2d/kernels/high_level/stokes.py"
]
| [
"\"\"\"\nThis submodule provides higher-level wrappers for the Stokes Kernel Functions\n\"\"\"\n\nimport numpy as np\nimport numexpr as ne\nimport numba\nimport warnings\n\nfrom ...backend_defaults import get_backend\nfrom ... import have_fmm\nif have_fmm:\n from ... import FMM\n\nfrom ..low_level.stokes import Stokes_Kernel_Apply, Stokes_Kernel_Form\n\n################################################################################\n# Applies\n\ndef check_and_convert(x, bdy):\n \"\"\"\n utility function to convert sources between linear/stacked forms\n \"\"\"\n if x is not None and len(x.shape) == 1:\n return x.reshape(2, bdy.N)\n else:\n return x\n\ndef Stokes_Layer_Apply(source, target=None, forces=None, dipstr=None,\n backend='fly', out_type='flat'):\n \"\"\"\n Stokes Layer Apply\n\n Parameters:\n source, required, Boundary, source\n target, optional, PointSet, target\n forces, optional, float(2, ns), forces\n dipstr, optional, float(2, ns), dipole strength\n weights, optional, float(ns), weights\n backend, optional, str, 'fly', 'numba', 'FMM'\n out_type, optional, str, 'flat' or 'stacked'\n\n forces/dipstr can also be given as float(2*ns)\n\n If source is not target, then this function assumes that source and\n target have no coincident points\n If source is target, this function computes a naive quadrature,\n ignoring the i=j term in the sum\n \"\"\"\n forces = check_and_convert(forces, source)\n dipstr = check_and_convert(dipstr, source)\n dipvec = None if dipstr is None else source.get_stacked_normal(T=True)\n if target is None:\n target = source\n backend = get_backend(source.N, target.N, backend)\n out = Stokes_Kernel_Apply(\n source = source.get_stacked_boundary(T=True),\n target = target.get_stacked_boundary(T=True),\n forces = forces,\n dipstr = dipstr,\n dipvec = dipvec,\n weights = source.weights,\n backend = backend,\n )\n if out_type == 'flat':\n return out.reshape(2*target.N)\n else:\n return out\n\ndef Stokes_Layer_Singular_Apply(source, forces=None, dipstr=None,\n backend='fly'):\n \"\"\"\n Stokes Layer Singular Apply\n\n Parameters:\n source, required, Boundary, source\n forces, optional, float(2, ns), forces\n dipstr, optional, float(2, ns), dipole strength\n weights, optional, float(ns), weights\n backend, optional, str, 'fly', 'numba', 'FMM'\n\n forces/dipstr can also be given as float(2*ns)\n \"\"\"\n forces = check_and_convert(forces, source)\n dipstr = check_and_convert(dipstr, source)\n uALP = np.zeros([2, source.N], dtype=float)\n if dipstr is not None:\n # evaluate the DLP\n uDLP = Stokes_Layer_Apply(source, dipstr=dipstr, backend=backend)\n tx = source.tangent_x\n ty = source.tangent_y\n scale = -0.5*source.curvature*source.weights/np.pi\n s01 = scale*tx*ty\n uDLP[0] += (scale*tx*tx*dipstr[0] + s01*dipstr[1])\n uDLP[1] += (s01*dipstr[0] + scale*ty*ty*dipstr[1])\n ne.evaluate('uALP+uDLP', out=uALP)\n if forces is not None:\n # form the SLP Matrix\n # because this is singular, this depends on the type of layer itself\n # and the SLP formation must be implemented in that class!\n backend = get_backend(source.N, source.N, backend)\n uSLP = source.Stokes_SLP_Self_Apply(forces, backend=backend)\n ne.evaluate('uALP+uSLP', out=uALP)\n if out_type == 'flat':\n return uALP.reshape(2*source.N)\n else:\n return uALP\n\n################################################################################\n# Formations\n\ndef Stokes_Layer_Form(source, target=None, ifforce=False, fweight=None,\n ifdipole=False, dpweight=None):\n \"\"\"\n Stokes Layer Evaluation (potential and gradient in 2D)\n\n Parameters:\n source, required, Boundary, source\n target, optional, Boundary, target\n ifforce, optional, bool, include effect of force (SLP)\n fweight, optional, float, scalar weight for the SLP portion\n ifdipole, optional, bool, include effect of dipole (DLP)\n dpweight, optional, float, scalar weight for the DLP portion\n\n If source is not target, then this function assumes that source and\n target have no coincident points\n If source is target, this function computes a naive quadrature,\n ignoring the i=j term in the sum\n \"\"\"\n dipvec = None if ifdipole is None else source.get_stacked_normal(T=True)\n if target is None:\n target = source\n return Stokes_Kernel_Form(\n source = source.get_stacked_boundary(T=True),\n target = target.get_stacked_boundary(T=True),\n ifforce = ifforce,\n fweight = fweight,\n ifdipole = ifdipole,\n dpweight = dpweight,\n dipvec = dipvec,\n weights = source.weights,\n )\n\ndef Stokes_Layer_Singular_Form(source, ifforce=False, fweight=None,\n ifdipole=False, dpweight=None):\n \"\"\"\n Stokes Layer Singular Form\n\n Parameters:\n source, required, Boundary, source\n ifforce, optional, bool, include effect of force (SLP)\n fweight, optional, float, scalar weight for the SLP portion\n ifdipole, optional, bool, include effect of dipole (DLP)\n dpweight, optional, float, scalar weight for the DLP portion\n \"\"\"\n sn = source.N\n ALP = np.zeros([2*sn, 2*sn], dtype=float)\n if ifdipole:\n # form the DLP Matrix\n DLP = Stokes_Layer_Form(source, ifdipole=True)\n # fix the diagonal\n scale = -0.5*source.curvature*source.weights/np.pi\n tx = source.tangent_x\n ty = source.tangent_y\n s01 = scale*tx*ty\n np.fill_diagonal(DLP[:sn, :sn], scale*tx*tx)\n np.fill_diagonal(DLP[sn:, :sn], s01)\n np.fill_diagonal(DLP[:sn, sn:], s01)\n np.fill_diagonal(DLP[sn:, sn:], scale*ty*ty)\n # weight, if necessary, and add to ALP\n if dpweight is None:\n ne.evaluate('ALP + DLP', out=ALP)\n else:\n ne.evaluate('ALP + DLP*dpweight', out=ALP)\n if ifforce:\n # form the SLP Matrix\n # because this is singular, this depends on the type of layer itself\n # and the SLP formation must be implemented in that class!\n SLP = source.Stokes_SLP_Self_Form()\n # weight, if necessary, and add to ALP\n if fweight is None:\n ne.evaluate('ALP + SLP', out=ALP)\n else:\n ne.evaluate('ALP + SLP*fweight', out=ALP)\n return ALP\n"
]
| [
[
"numpy.fill_diagonal",
"numpy.zeros"
]
]
|
gngdb/aistplusplus_api | [
"26935370d8e719d2e2fe49bd6b46463efd22c573"
]
| [
"aist_plusplus/utils.py"
]
| [
"# coding=utf-8\n# Copyright 2020 The Google AI Perception Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utils for AIST++ Dataset.\"\"\"\nimport os\n\nimport ffmpeg\nimport numpy as np\n\n\ndef ffmpeg_video_read(video_path, fps=None):\n \"\"\"Video reader based on FFMPEG.\n\n This function supports setting fps for video reading. It is critical\n as AIST++ Dataset are constructed under exact 60 fps, while some of\n the AIST dance videos are not percisely 60 fps.\n\n Args:\n video_path: A video file.\n fps: Use specific fps for video reading. (optional)\n Returns:\n A `np.array` with the shape of [seq_len, height, width, 3]\n \"\"\"\n assert os.path.exists(video_path), f'{video_path} does not exist!'\n try:\n probe = ffmpeg.probe(video_path)\n except ffmpeg.Error as e:\n print('stdout:', e.stdout.decode('utf8'))\n print('stderr:', e.stderr.decode('utf8'))\n raise e\n video_info = next(stream for stream in probe['streams']\n if stream['codec_type'] == 'video')\n width = int(video_info['width'])\n height = int(video_info['height'])\n stream = ffmpeg.input(video_path)\n if fps:\n stream = ffmpeg.filter(stream, 'fps', fps=fps, round='up')\n stream = ffmpeg.output(stream, 'pipe:', format='rawvideo', pix_fmt='rgb24')\n out, _ = ffmpeg.run(stream, capture_stdout=True)\n out = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])\n return out.copy()\n\n\ndef ffmpeg_video_write(data, video_path, fps=25):\n \"\"\"Video writer based on FFMPEG.\n\n Args:\n data: A `np.array` with the shape of [seq_len, height, width, 3]\n video_path: A video file.\n fps: Use specific fps for video writing. (optional)\n \"\"\"\n assert len(data.shape) == 4, f'input shape is not valid! Got {data.shape}!'\n _, height, width, _ = data.shape\n os.makedirs(os.path.dirname(video_path), exist_ok=True)\n writer = (\n ffmpeg\n .input('pipe:', framerate=fps, format='rawvideo',\n pix_fmt='rgb24', s='{}x{}'.format(width, height))\n .output(video_path, pix_fmt='yuv420p')\n .overwrite_output()\n .run_async(pipe_stdin=True)\n )\n for frame in data:\n writer.stdin.write(frame.astype(np.uint8).tobytes())\n writer.stdin.close()\n\n"
]
| [
[
"numpy.frombuffer"
]
]
|
yutiansut/xalpha | [
"d5f161cc3c7ef648c93f111a80dea988d593a167"
]
| [
"xalpha/trade.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodule for trade class\n\"\"\"\nimport datetime as dt\n\nimport pandas as pd\nfrom pyecharts.charts import Line, Bar\nfrom pyecharts.options import AxisOpts, DataZoomOpts\n\nimport xalpha.remain as rm\nfrom xalpha.cons import convert_date, xirr, myround, yesterdayobj, line_opts\nfrom xalpha.exceptions import ParserFailure, TradeBehaviorError\n\n\ndef xirrcal(cftable, trades, date, guess):\n \"\"\"\n calculate the xirr rate\n\n :param cftable: cftable (pd.Dateframe) with date and cash column\n :param trades: list [trade1, ...], every item is an trade object,\n whose shares would be sold out virtually\n :param date: string of date or datetime object,\n the date when virtually all holding positions being sold\n :param guess: floating number, a guess at the xirr rate solution to be used\n as a starting point for the numerical solution\n :returns: the IRR as a single floating number\n \"\"\"\n date = convert_date(date)\n partcftb = cftable[cftable[\"date\"] <= date]\n if len(partcftb) == 0:\n return 0\n cashflow = [(row[\"date\"], row[\"cash\"]) for i, row in partcftb.iterrows()]\n rede = 0\n for fund in trades:\n rede += fund.aim.shuhui(\n fund.briefdailyreport(date).get(\"currentshare\", 0),\n date,\n fund.remtable[fund.remtable[\"date\"] <= date].iloc[-1].rem,\n )[1]\n cashflow.append((date, rede))\n return xirr(cashflow, guess)\n\n\ndef bottleneck(cftable):\n \"\"\"\n find the max total input in the history given cftable with cash column\n\n :param cftable: pd.DataFrame of cftable\n \"\"\"\n if len(cftable) == 0:\n return 0\n # cftable = cftable.reset_index(drop=True) # unnecessary as iloc use natural rows instead of default index\n inputl = [-sum(cftable.iloc[:i].cash) for i in range(1, len(cftable) + 1)]\n return myround(max(inputl))\n\n\ndef turnoverrate(cftable, end=yesterdayobj()):\n \"\"\"\n calculate the annualized turnoverrate\n\n :param cftable: pd.DataFrame of cftable\n :param end: str or obj of datetime for the end date of the estimation\n \"\"\"\n if len(cftable) == 0:\n return 0\n end = convert_date(end)\n start = cftable.iloc[0].date\n tradeamount = sum(abs(cftable.loc[:, \"cash\"]))\n turnover = tradeamount / bottleneck(cftable) / 2.0\n if (end - start).days <= 0:\n return 0\n return turnover * 365 / (end - start).days\n\n\ndef vtradevolume(cftable, freq=\"D\"):\n \"\"\"\n aid function on visualization of trade summary\n\n :param cftable: cftable (pandas.DataFrame) with at least date and cash columns\n :param freq: one character string, frequency label, now supporting D for date,\n W for week and M for month, namely the trade volume is shown based on the time unit\n :returns: the Bar object\n \"\"\"\n ### WARN: datazoom and time conflict, sliding till 1970..., need further look into pyeacharts\n ### very unsatisfied about current visualize effect, and it seems the order of add and set option matters a lot\n if freq == \"D\":\n datedata = [d.to_pydatetime() for d in cftable[\"date\"]]\n selldata = [\n [row[\"date\"].to_pydatetime(), row[\"cash\"]]\n for _, row in cftable.iterrows()\n if row[\"cash\"] > 0\n ]\n buydata = [\n [row[\"date\"].to_pydatetime(), row[\"cash\"]]\n for _, row in cftable.iterrows()\n if row[\"cash\"] < 0\n ]\n elif freq == \"W\":\n cfmerge = cftable.groupby([cftable[\"date\"].dt.year, cftable[\"date\"].dt.week])[\n \"cash\"\n ].sum()\n datedata = [\n dt.datetime.strptime(str(a) + \"4\", \"(%Y, %W)%w\")\n for a, _ in cfmerge.iteritems()\n ]\n selldata = [\n [dt.datetime.strptime(str(a) + \"4\", \"(%Y, %W)%w\"), b]\n for a, b in cfmerge.iteritems()\n if b > 0\n ]\n buydata = [\n [dt.datetime.strptime(str(a) + \"4\", \"(%Y, %W)%w\"), b]\n for a, b in cfmerge.iteritems()\n if b < 0\n ]\n elif freq == \"M\":\n cfmerge = cftable.groupby([cftable[\"date\"].dt.year, cftable[\"date\"].dt.month])[\n \"cash\"\n ].sum()\n datedata = [\n dt.datetime.strptime(str(a) + \"15\", \"(%Y, %m)%d\")\n for a, _ in cfmerge.iteritems()\n ]\n selldata = [\n [dt.datetime.strptime(str(a) + \"15\", \"(%Y, %m)%d\"), b]\n for a, b in cfmerge.iteritems()\n if b > 0\n ]\n buydata = [\n [dt.datetime.strptime(str(a) + \"15\", \"(%Y, %m)%d\"), b]\n for a, b in cfmerge.iteritems()\n if b < 0\n ]\n else:\n raise ParserFailure(\"no such freq tag supporting\")\n\n bar = Bar()\n bar.add_xaxis(datedata)\n bar.add_yaxis(series_name=\"卖出\", yaxis_data=selldata, category_gap=\"90%\")\n bar.add_yaxis(series_name=\"买入\", yaxis_data=buydata, category_gap=\"90%\")\n\n bar.set_global_opts(\n xaxis_opts=AxisOpts(type_=\"time\"),\n datazoom_opts=[DataZoomOpts(range_start=99, range_end=100)],\n )\n\n return bar.render_notebook()\n\n\nclass trade:\n \"\"\"\n Trade class with fundinfo obj as input and its main attrs are cftable and remtable:\n\n 1. cftable: pd.Dataframe, 现金流量表,每行为不同变更日期,三列分别为 date,cash, share,标记对于某个投资标的\n 现金的进出和份额的变化情况,所有的份额数据为交易当时的不复权数据。基金份额折算通过流量表中一次性的份额增减体现。\n\n 2. remtable:pd.Dataframe, 持仓情况表,每行为不同变更日期,两列分别为 date 和 rem, rem 数据结构是一个嵌套的列表,\n 包含了不同时间买入仓位的剩余情况,详情参见 remain 模块。这一表格如非必需,避免任何直接调用。\n\n :param infoobj: info object as the trading aim\n :param status: status table, obtained from record class\n \"\"\"\n\n def __init__(self, infoobj, status):\n self.aim = infoobj\n code = self.aim.code\n self.cftable = pd.DataFrame([], columns=[\"date\", \"cash\", \"share\"])\n self.remtable = pd.DataFrame([], columns=[\"date\", \"rem\"])\n self.status = status.loc[:, [\"date\", code]]\n self._arrange()\n\n def _arrange(self):\n while 1:\n try:\n self._addrow()\n except Exception as e:\n if e.args[0] == \"no other info to be add into cashflow table\":\n break\n else:\n raise e\n\n def _addrow(self):\n \"\"\"\n Return cashflow table with one more line or raise an exception if there is no more line to add\n The same logic also applies to rem table\n 关于对于一个基金多个操作存在于同一交易日的说明:无法处理历史买入第一笔同时是分红日的情形, 事实上也不存在这种情形。无法处理一日多笔买卖的情形。\n 同一日既有卖也有买不现实,多笔买入只能在 csv 上合并记录,由此可能引起份额计算 0.01 的误差。可以处理分红日买入卖出的情形。\n 分级份额折算日封闭无法买入,所以程序直接忽略当天的买卖。因此不会出现多个操作共存的情形。\n \"\"\"\n # the design on data remtable is disaster, it is very dangerous though works now\n\n code = self.aim.code\n if len(self.cftable) == 0:\n if len(self.status[self.status[code] != 0]) == 0:\n raise Exception(\"no other info to be add into cashflow table\")\n i = 0\n while self.status.iloc[i].loc[code] == 0:\n i += 1\n value = self.status.iloc[i].loc[code]\n date = self.status.iloc[i].date\n if value > 0:\n rdate, cash, share = self.aim.shengou(value, date)\n rem = rm.buy([], share, rdate)\n else:\n raise TradeBehaviorError(\"You cannot sell first when you never buy\")\n elif len(self.cftable) > 0:\n recorddate = list(self.status.date)\n lastdate = self.cftable.iloc[-1].date + pd.Timedelta(1, unit=\"d\")\n while (lastdate not in self.aim.specialdate) and (\n (lastdate not in recorddate)\n or (\n (lastdate in recorddate)\n and (\n self.status[self.status[\"date\"] == lastdate].loc[:, code].any()\n == 0\n )\n )\n ):\n lastdate += pd.Timedelta(1, unit=\"d\")\n if (lastdate - yesterdayobj()).days >= 1:\n raise Exception(\"no other info to be add into cashflow table\")\n date = lastdate\n label = 0\n cash = 0\n share = 0\n rem = self.remtable.iloc[-1].rem\n rdate = date\n\n if (date in recorddate) and (date not in self.aim.zhesuandate):\n # deal with buy and sell and label the fenhongzaitouru, namely one label a 0.05 in the original table to label fenhongzaitouru\n value = self.status[self.status[\"date\"] == date].iloc[0].loc[code]\n fenhongmark = round(10 * value - int(10 * value), 1)\n if fenhongmark == 0.5:\n label = 1 # fenhong reinvest\n value = round(value, 1)\n\n if value > 0: # value stands for purchase money\n rdate, dcash, dshare = self.aim.shengou(value, date)\n rem = rm.buy(rem, dshare, rdate)\n\n elif value < -0.005: # value stands for redemp share\n rdate, dcash, dshare = self.aim.shuhui(\n -value, date, self.remtable.iloc[-1].rem\n )\n _, rem = rm.sell(rem, -dshare, rdate)\n elif value >= -0.005 and value < 0:\n # value now stands for the ratio to be sold in terms of remain positions, -0.005 stand for sell 100%\n remainshare = sum(self.cftable.loc[:, \"share\"])\n ratio = -value / 0.005\n rdate, dcash, dshare = self.aim.shuhui(\n remainshare * ratio, date, self.remtable.iloc[-1].rem\n )\n _, rem = rm.sell(rem, -dshare, rdate)\n else: # in case value=0, when specialday is in record day\n rdate, dcash, dshare = date, 0, 0\n\n cash += dcash\n share += dshare\n if date in self.aim.specialdate: # deal with fenhong and xiazhe\n comment = (\n self.aim.price[self.aim.price[\"date\"] == date]\n .iloc[0]\n .loc[\"comment\"]\n )\n if isinstance(comment, float):\n if comment < 0:\n dcash2, dshare2 = (\n 0,\n sum([myround(sh * (-comment - 1)) for _, sh in rem]),\n ) # xiazhe are seperately carried out based on different purchase date\n rem = rm.trans(rem, -comment, date)\n # myround(sum(cftable.loc[:,'share'])*(-comment-1))\n elif comment > 0 and label == 0:\n dcash2, dshare2 = (\n myround(sum(self.cftable.loc[:, \"share\"]) * comment),\n 0,\n )\n rem = rm.copy(rem)\n\n elif comment > 0 and label == 1:\n dcash2, dshare2 = (\n 0,\n myround(\n sum(self.cftable.loc[:, \"share\"])\n * (\n comment\n / self.aim.price[self.aim.price[\"date\"] == date]\n .iloc[0]\n .netvalue\n )\n ),\n )\n rem = rm.buy(rem, dshare2, date)\n\n cash += dcash2\n share += dshare2\n else:\n raise ParserFailure(\"comments not recoginized\")\n\n self.cftable = self.cftable.append(\n pd.DataFrame([[rdate, cash, share]], columns=[\"date\", \"cash\", \"share\"]),\n ignore_index=True,\n )\n self.remtable = self.remtable.append(\n pd.DataFrame([[rdate, rem]], columns=[\"date\", \"rem\"]), ignore_index=True\n )\n\n def xirrrate(self, date=yesterdayobj(), guess=0.1):\n \"\"\"\n give the xirr rate for all the trade of the aim before date (virtually sold out on date)\n\n :param date: string or obj of datetime, the virtually sell-all date\n \"\"\"\n return xirrcal(self.cftable, [self], date, guess)\n\n def dailyreport(self, date=yesterdayobj()):\n \"\"\"\n breif report dict of certain date status on the fund investment\n\n :param date: string or obj of date, show info of the date given\n :returns: dict of various data on the trade positions\n \"\"\"\n date = convert_date(date)\n partcftb = self.cftable[self.cftable[\"date\"] <= date]\n value = self.aim.price[self.aim.price[\"date\"] <= date].iloc[-1].netvalue\n\n if len(partcftb) == 0:\n reportdict = {\n \"基金名称\": [self.aim.name],\n \"基金代码\": [self.aim.code],\n \"当日净值\": [value],\n \"持有份额\": [0],\n \"基金现值\": [0],\n \"基金总申购\": [0],\n \"历史最大占用\": [0],\n \"基金分红与赎回\": [0],\n \"基金收益总额\": [0],\n }\n df = pd.DataFrame(reportdict, columns=reportdict.keys())\n return df\n # totinput = myround(-sum(partcftb.loc[:,'cash']))\n totinput = myround(\n -sum([row[\"cash\"] for _, row in partcftb.iterrows() if row[\"cash\"] < 0])\n )\n totoutput = myround(\n sum([row[\"cash\"] for _, row in partcftb.iterrows() if row[\"cash\"] > 0])\n )\n\n currentshare = myround(sum(partcftb.loc[:, \"share\"]))\n currentcash = myround(currentshare * value)\n btnk = bottleneck(partcftb)\n turnover = turnoverrate(partcftb, date)\n ereturn = myround(currentcash + totoutput - totinput)\n if currentshare == 0:\n unitcost = 0\n else:\n unitcost = round((totinput - totoutput) / currentshare, 4)\n if btnk == 0:\n returnrate = 0\n else:\n returnrate = round((ereturn / btnk) * 100, 4)\n\n reportdict = {\n \"基金名称\": [self.aim.name],\n \"基金代码\": [self.aim.code],\n \"当日净值\": [value],\n \"单位成本\": [unitcost],\n \"持有份额\": [currentshare],\n \"基金现值\": [currentcash],\n \"基金总申购\": [totinput],\n \"历史最大占用\": [btnk],\n \"基金持有成本\": [totinput - totoutput],\n \"基金分红与赎回\": [totoutput],\n \"换手率\": [turnover],\n \"基金收益总额\": [ereturn],\n \"投资收益率\": [returnrate],\n }\n df = pd.DataFrame(reportdict, columns=reportdict.keys())\n return df\n\n def briefdailyreport(self, date=yesterdayobj()):\n \"\"\"\n quick summary of highly used attrs for trade\n\n :param date: string or object of datetime\n :returns: dict with several attrs: date, unitvalue, currentshare, currentvalue\n \"\"\"\n date = convert_date(date)\n partcftb = self.cftable[self.cftable[\"date\"] <= date]\n if len(partcftb) == 0:\n return {}\n\n unitvalue = self.aim.price[self.aim.price[\"date\"] <= date].iloc[-1].netvalue\n currentshare = myround(sum(partcftb.loc[:, \"share\"]))\n currentvalue = myround(currentshare * unitvalue)\n\n return {\n \"date\": date,\n \"unitvalue\": unitvalue,\n \"currentshare\": currentshare,\n \"currentvalue\": currentvalue,\n }\n\n def unitcost(self, date=yesterdayobj()):\n \"\"\"\n give the unitcost of fund positions\n\n :param date: string or object of datetime\n :returns: float number of unitcost\n \"\"\"\n partcftb = self.cftable[self.cftable[\"date\"] <= date]\n if len(partcftb) == 0:\n return 0\n totnetinput = myround(-sum(partcftb.loc[:, \"cash\"]))\n currentshare = self.briefdailyreport(date).get(\"currentshare\", 0)\n # totnetinput\n if currentshare > 0:\n unitcost = totnetinput / currentshare\n else:\n unitcost = 0\n return unitcost\n\n def v_tradevolume(self, freq=\"D\"):\n \"\"\"\n visualization on trade summary\n\n :param freq: string, \"D\", \"W\" and \"M\" are supported\n :returns: pyecharts.charts.bar.render_notebook()\n \"\"\"\n return vtradevolume(self.cftable, freq=freq)\n\n def v_tradecost(self, start=None, end=yesterdayobj(), vopts=None):\n \"\"\"\n visualization giving the average cost line together with netvalue line\n\n :param vopts: global option for line in pyecharts\n :returns: pyecharts.line\n \"\"\"\n funddata = []\n costdata = []\n pprice = self.aim.price[self.aim.price[\"date\"] <= end]\n if start is not None:\n pprice = pprice[pprice[\"date\"] >= start]\n for _, row in pprice.iterrows():\n date = row[\"date\"]\n funddata.append(row[\"netvalue\"])\n cost = 0\n if (date - self.cftable.iloc[0].date).days >= 0:\n cost = self.unitcost(date)\n costdata.append(cost)\n\n line = Line()\n if vopts is None:\n vopts = line_opts\n\n line.add_xaxis([d.date() for d in pprice.date])\n line.add_yaxis(series_name=\"基金净值\", y_axis=funddata, is_symbol_show=False)\n line.add_yaxis(series_name=\"持仓成本\", y_axis=costdata, is_symbol_show=False)\n line.set_global_opts(**vopts)\n return line.render_notebook()\n\n def v_totvalue(self, end=yesterdayobj(), vopts=None):\n \"\"\"\n visualization on the total values daily change of the aim\n \"\"\"\n partp = self.aim.price[self.aim.price[\"date\"] >= self.cftable.iloc[0].date]\n partp = partp[partp[\"date\"] <= end]\n\n date = [d.date() for d in partp.date]\n valuedata = [\n self.briefdailyreport(d).get(\"currentvalue\", 0) for d in partp.date\n ]\n\n line = Line()\n if vopts is None:\n vopts = line_opts\n\n line.add_xaxis(date)\n line.add_yaxis(series_name=\"持仓总值\", y_axis=valuedata, is_symbol_show=False)\n line.set_global_opts(**vopts)\n\n return line.render_notebook()\n\n def __repr__(self):\n return self.aim.name + \" 交易情况\"\n\n\n\"\"\"\n可视化图的合并可参考以下代码 v0.5.5\nfrom pyecharts import Overlap\noverlap = Overlap()\noverlap.add(self.v_tradecost())\noverlap.add(self.v_tradevolume(bar_category_gap='95%'), yaxis_index=1,is_add_yaxis=True)\noverlap\n\"\"\"\n"
]
| [
[
"pandas.DataFrame",
"pandas.Timedelta"
]
]
|
luyuliu/congestion_detector | [
"46f2bfb37d8c52a9803645625e5249b18b322cd6"
]
| [
"scr/training.py"
]
| [
"from keras.models import Sequential\nfrom keras.layers import Dense, Activation, normalization,core\nfrom keras import optimizers,callbacks\nimport numpy as np\nimport csv\n\n\ninput_shape = 211\nepochs = 500\ntimestamp = 1511398860\ndata_size = 9850\nbatch_size = 100\n\nclass LossHistory(callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = []\n\n def on_batch_end(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n\nhistory = LossHistory()\nmodel = Sequential()\nmodel.add(Dense(units=input_shape, activation='relu', use_bias=True,\n kernel_initializer='random_uniform', bias_initializer='he_normal', input_dim=input_shape))\nmodel.add(normalization.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))\nmodel.add(core.Dropout(0.25, noise_shape=None, seed=None))\nfor i in range(9):\n model.add(Dense(units=input_shape, activation='relu', use_bias=True,\n kernel_initializer='random_uniform', bias_initializer='he_normal'))\n model.add(normalization.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))\n model.add(core.Dropout(0.25, noise_shape=None, seed=None))\n\nsgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='mean_squared_error', optimizer=sgd)\n\ndelays = np.loadtxt('D:/Luyu/data/delaycsv/delay_' +\n str(timestamp) + '.csv', skiprows=1, unpack=True)\nlabels = np.loadtxt('D:/Luyu/data/labelcsv/label_' +\n str(timestamp) + '.csv', skiprows=1, unpack=True)\n\n\ni = 0\nwhile i < data_size - 1:\n timestamp += 60\n try:\n delay = np.loadtxt('D:/Luyu/data/delaycsv/delay_' +\n str(timestamp) + '.csv', skiprows=1, unpack=True)\n label = np.loadtxt('D:/Luyu/data/labelcsv/label_' +\n str(timestamp) + '.csv', skiprows=1, unpack=True)\n except:\n continue\n\n delays = np.vstack((delays, delay))\n labels = np.vstack((labels, label))\n\n i = i + 1\n if i % 100 == 0:\n print(i)\nprint(delays.shape, labels.shape, i)\nmodel.fit(delays, labels, epochs=epochs, verbose=1,callbacks=[history])\n\n\n\npath_model = \"D:\\\\Luyu\\\\data\\\\model.h5\"\npath_history=\"D:\\\\Luyu\\\\data\\\\history.csv\"\nmodel.save(path_model)\n\nnp.savetxt(path_history,history.losses,delimiter=',')\n"
]
| [
[
"numpy.savetxt",
"numpy.vstack"
]
]
|
katiezzzzz/PyBaMM | [
"0b0fc47125c0f078da99a58f497e0700eb25225a"
]
| [
"pybamm/expression_tree/unary_operators.py"
]
| [
"#\n# Unary operator classes and methods\n#\nimport numpy as np\nimport pybamm\nfrom scipy.sparse import issparse, csr_matrix\n\n\nclass UnaryOperator(pybamm.Symbol):\n \"\"\"A node in the expression tree representing a unary operator\n (e.g. '-', grad, div)\n\n Derived classes will specify the particular operator\n\n **Extends:** :class:`Symbol`\n\n Parameters\n ----------\n name : str\n name of the node\n child : :class:`Symbol`\n child node\n\n \"\"\"\n\n def __init__(self, name, child, domain=None, auxiliary_domains=None):\n if domain is None:\n domain = child.domain\n if auxiliary_domains is None:\n auxiliary_domains = child.auxiliary_domains\n super().__init__(\n name, children=[child], domain=domain, auxiliary_domains=auxiliary_domains\n )\n self.child = self.children[0]\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"{}({!s})\".format(self.name, self.child)\n\n def new_copy(self):\n \"\"\" See :meth:`pybamm.Symbol.new_copy()`. \"\"\"\n new_child = self.child.new_copy()\n return self._unary_new_copy(new_child)\n\n def _unary_new_copy(self, child):\n \"\"\"Make a new copy of the unary operator, with child `child`\"\"\"\n\n return self.__class__(child)\n\n def _unary_jac(self, child_jac):\n \"\"\" Calculate the jacobian of a unary operator. \"\"\"\n raise NotImplementedError\n\n def _unary_simplify(self, simplified_child):\n \"\"\"\n Simplify a unary operator. Default behaviour is to make a new copy, with\n simplified child.\n \"\"\"\n\n return self._unary_new_copy(simplified_child)\n\n def _unary_evaluate(self, child):\n \"\"\"Perform unary operation on a child. \"\"\"\n raise NotImplementedError\n\n def evaluate(self, t=None, y=None, y_dot=None, inputs=None, known_evals=None):\n \"\"\" See :meth:`pybamm.Symbol.evaluate()`. \"\"\"\n if known_evals is not None:\n if self.id not in known_evals:\n child, known_evals = self.child.evaluate(\n t, y, y_dot, inputs, known_evals\n )\n known_evals[self.id] = self._unary_evaluate(child)\n return known_evals[self.id], known_evals\n else:\n child = self.child.evaluate(t, y, y_dot, inputs)\n return self._unary_evaluate(child)\n\n def _evaluate_for_shape(self):\n \"\"\"\n Default behaviour: unary operator has same shape as child\n See :meth:`pybamm.Symbol.evaluate_for_shape()`\n \"\"\"\n return self.children[0].evaluate_for_shape()\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return self.child.evaluates_on_edges(dimension)\n\n\nclass Negate(UnaryOperator):\n \"\"\"A node in the expression tree representing a `-` negation operator\n\n **Extends:** :class:`UnaryOperator`\n \"\"\"\n\n def __init__(self, child):\n \"\"\" See :meth:`pybamm.UnaryOperator.__init__()`. \"\"\"\n super().__init__(\"-\", child)\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"{}{!s}\".format(self.name, self.child)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n return -self.child.diff(variable)\n\n def _unary_jac(self, child_jac):\n \"\"\" See :meth:`pybamm.UnaryOperator._unary_jac()`. \"\"\"\n return -child_jac\n\n def _unary_evaluate(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_evaluate()`. \"\"\"\n return -child\n\n\nclass AbsoluteValue(UnaryOperator):\n \"\"\"A node in the expression tree representing an `abs` operator\n\n **Extends:** :class:`UnaryOperator`\n \"\"\"\n\n def __init__(self, child):\n \"\"\" See :meth:`pybamm.UnaryOperator.__init__()`. \"\"\"\n super().__init__(\"abs\", child)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n child = self.child.new_copy()\n return Sign(child) * child.diff(variable)\n\n def _unary_jac(self, child_jac):\n \"\"\" See :meth:`pybamm.UnaryOperator._unary_jac()`. \"\"\"\n child = self.child.new_copy()\n return Sign(child) * child_jac\n\n def _unary_evaluate(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_evaluate()`. \"\"\"\n return np.abs(child)\n\n\nclass Sign(UnaryOperator):\n \"\"\"A node in the expression tree representing a `sign` operator\n\n **Extends:** :class:`UnaryOperator`\n \"\"\"\n\n def __init__(self, child):\n \"\"\" See :meth:`pybamm.UnaryOperator.__init__()`. \"\"\"\n super().__init__(\"sign\", child)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n return pybamm.Scalar(0)\n\n def _unary_jac(self, child_jac):\n \"\"\" See :meth:`pybamm.UnaryOperator._unary_jac()`. \"\"\"\n return pybamm.Scalar(0)\n\n def _unary_evaluate(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_evaluate()`. \"\"\"\n if issparse(child):\n return csr_matrix.sign(child)\n else:\n return np.sign(child)\n\n\nclass Floor(UnaryOperator):\n \"\"\"A node in the expression tree representing an `floor` operator\n\n **Extends:** :class:`UnaryOperator`\n \"\"\"\n\n def __init__(self, child):\n \"\"\" See :meth:`pybamm.UnaryOperator.__init__()`. \"\"\"\n super().__init__(\"floor\", child)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n return pybamm.Scalar(0)\n\n def _unary_jac(self, child_jac):\n \"\"\" See :meth:`pybamm.UnaryOperator._unary_jac()`. \"\"\"\n return pybamm.Scalar(0)\n\n def _unary_evaluate(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_evaluate()`. \"\"\"\n return np.floor(child)\n\n\nclass Ceiling(UnaryOperator):\n \"\"\"A node in the expression tree representing a `ceil` operator\n\n **Extends:** :class:`UnaryOperator`\n \"\"\"\n\n def __init__(self, child):\n \"\"\" See :meth:`pybamm.UnaryOperator.__init__()`. \"\"\"\n super().__init__(\"ceil\", child)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n return pybamm.Scalar(0)\n\n def _unary_jac(self, child_jac):\n \"\"\" See :meth:`pybamm.UnaryOperator._unary_jac()`. \"\"\"\n return pybamm.Scalar(0)\n\n def _unary_evaluate(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_evaluate()`. \"\"\"\n return np.ceil(child)\n\n\nclass Index(UnaryOperator):\n \"\"\"A node in the expression tree, which stores the index that should be\n extracted from its child after the child has been evaluated.\n\n Parameters\n ----------\n child : :class:`pybamm.Symbol`\n The symbol of which to take the index\n index : int or slice\n The index (if int) or indices (if slice) to extract from the symbol\n name : str, optional\n The name of the symbol\n check_size : bool, optional\n Whether to check if the slice size exceeds the child size. Default is True.\n This should always be True when creating a new symbol so that the appropriate\n check is performed, but should be False for creating a new copy to avoid\n unnecessarily repeating the check.\n \"\"\"\n\n def __init__(self, child, index, name=None, check_size=True):\n self.index = index\n if index == -1:\n self.slice = slice(-1, None)\n if name is None:\n name = \"Index[-1]\"\n elif isinstance(index, int):\n self.slice = slice(index, index + 1)\n if name is None:\n name = \"Index[\" + str(index) + \"]\"\n elif isinstance(index, slice):\n self.slice = index\n if name is None:\n if index.start is None:\n name = \"Index[:{:d}]\".format(index.stop)\n else:\n name = \"Index[{:d}:{:d}]\".format(index.start, index.stop)\n else:\n raise TypeError(\"index must be integer or slice\")\n\n if check_size:\n if self.slice in (slice(0, 1), slice(-1, None)):\n pass\n elif self.slice.stop > child.size:\n raise ValueError(\"slice size exceeds child size\")\n\n super().__init__(name, child)\n\n # no domain for integer value key\n if isinstance(index, int):\n self.clear_domains()\n\n def _unary_jac(self, child_jac):\n \"\"\" See :meth:`pybamm.UnaryOperator._unary_jac()`. \"\"\"\n\n # if child.jac returns a matrix of zeros, this subsequently gives a bug\n # when trying to simplify the node Index(child_jac). Instead, search the\n # tree for StateVectors and return a matrix of zeros of the correct size\n # if none are found.\n if not self.has_symbol_of_classes(pybamm.StateVector):\n jac = csr_matrix((1, child_jac.shape[1]))\n return pybamm.Matrix(jac)\n else:\n return Index(child_jac, self.index)\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()` \"\"\"\n self._id = hash(\n (\n self.__class__,\n self.name,\n self.slice.start,\n self.slice.stop,\n self.children[0].id,\n )\n + tuple(self.domain)\n )\n\n def _unary_evaluate(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_evaluate()`. \"\"\"\n return child[self.slice]\n\n def _unary_new_copy(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_new_copy()`. \"\"\"\n\n return self.__class__(child, self.index, check_size=False)\n\n def _evaluate_for_shape(self):\n return self._unary_evaluate(self.children[0].evaluate_for_shape())\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n\nclass SpatialOperator(UnaryOperator):\n \"\"\"A node in the expression tree representing a unary spatial operator\n (e.g. grad, div)\n\n Derived classes will specify the particular operator\n\n This type of node will be replaced by the :class:`Discretisation`\n class with a :class:`Matrix`\n\n **Extends:** :class:`UnaryOperator`\n\n Parameters\n ----------\n\n name : str\n name of the node\n child : :class:`Symbol`\n child node\n\n \"\"\"\n\n def __init__(self, name, child, domain=None, auxiliary_domains=None):\n super().__init__(name, child, domain, auxiliary_domains)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n # We shouldn't need this\n raise NotImplementedError\n\n def _unary_simplify(self, simplified_child):\n \"\"\" See :meth:`pybamm.UnaryOperator.simplify()`. \"\"\"\n\n # if there are none of these nodes in the child tree, then this expression\n # does not depend on space, and therefore the spatial operator result is zero\n search_types = (pybamm.Variable, pybamm.StateVector, pybamm.SpatialVariable)\n\n # do the search, return a scalar zero node if no relevent nodes are found\n if not self.has_symbol_of_classes(search_types):\n return pybamm.Scalar(0)\n else:\n return self.__class__(simplified_child)\n\n\nclass Gradient(SpatialOperator):\n \"\"\"A node in the expression tree representing a grad operator\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child):\n if child.domain == []:\n raise pybamm.DomainError(\n \"Cannot take gradient of '{}' since its domain is empty. \".format(child)\n + \"Try broadcasting the object first, e.g.\\n\\n\"\n \"\\tpybamm.grad(pybamm.PrimaryBroadcast(symbol, 'domain'))\"\n )\n if child.evaluates_on_edges(\"primary\") is True:\n raise TypeError(\n \"Cannot take gradient of '{}' since it evaluates on edges\".format(child)\n )\n super().__init__(\"grad\", child)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return True\n\n\nclass Divergence(SpatialOperator):\n \"\"\"A node in the expression tree representing a div operator\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child):\n if child.domain == []:\n raise pybamm.DomainError(\n \"Cannot take divergence of '{}' since its domain is empty. \".format(\n child\n )\n + \"Try broadcasting the object first, e.g.\\n\\n\"\n \"\\tpybamm.div(pybamm.PrimaryBroadcast(symbol, 'domain'))\"\n )\n if child.evaluates_on_edges(\"primary\") is False:\n raise TypeError(\n \"Cannot take divergence of '{}' since it does not \".format(child)\n + \"evaluates on nodes. Usually, a gradient should be taken before the \"\n \"divergence.\"\n )\n super().__init__(\"div\", child)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n\nclass Laplacian(SpatialOperator):\n \"\"\"A node in the expression tree representing a laplacian operator. This is\n currently only implemeted in the weak form for finite element formulations.\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child):\n super().__init__(\"laplacian\", child)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n\nclass Gradient_Squared(SpatialOperator):\n \"\"\"A node in the expression tree representing a the inner product of the grad\n operator with itself. In particular, this is useful in the finite element\n formualtion where we only require the (sclar valued) square of the gradient,\n and not the gradient itself.\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child):\n super().__init__(\"grad squared\", child)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return True\n\n\nclass Mass(SpatialOperator):\n \"\"\"Returns the mass matrix for a given symbol, accounting for Dirchlet boundary\n conditions where necessary (e.g. in the finite element formualtion)\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child):\n super().__init__(\"mass\", child)\n\n def _evaluate_for_shape(self):\n return pybamm.evaluate_for_shape_using_domain(self.domain, typ=\"matrix\")\n\n\nclass BoundaryMass(SpatialOperator):\n \"\"\"Returns the mass matrix for a given symbol assembled over the boundary of\n the domain, accounting for Dirchlet boundary conditions where necessary\n (e.g. in the finite element formualtion)\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child):\n super().__init__(\"boundary mass\", child)\n\n def _evaluate_for_shape(self):\n return pybamm.evaluate_for_shape_using_domain(self.domain, typ=\"matrix\")\n\n\nclass Integral(SpatialOperator):\n \"\"\"A node in the expression tree representing an integral operator\n\n .. math::\n I = \\\\int_{a}^{b}\\\\!f(u)\\\\,du,\n\n where :math:`a` and :math:`b` are the left-hand and right-hand boundaries of\n the domain respectively, and :math:`u\\\\in\\\\text{domain}`.\n\n Parameters\n ----------\n function : :class:`pybamm.Symbol`\n The function to be integrated (will become self.children[0])\n integration_variable : :class:`pybamm.IndependentVariable`\n The variable over which to integrate\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child, integration_variable):\n if not isinstance(integration_variable, list):\n integration_variable = [integration_variable]\n\n name = \"integral\"\n for var in integration_variable:\n if isinstance(var, pybamm.SpatialVariable):\n # Check that child and integration_variable domains agree\n if var.domain == child.domain:\n self._integration_dimension = \"primary\"\n elif (\n \"secondary\" in child.auxiliary_domains\n and var.domain == child.auxiliary_domains[\"secondary\"]\n ):\n self._integration_dimension = \"secondary\"\n elif (\n \"tertiary\" in child.auxiliary_domains\n and var.domain == child.auxiliary_domains[\"tertiary\"]\n ):\n self._integration_dimension = \"tertiary\"\n else:\n raise pybamm.DomainError(\n \"integration_variable must be the same as child domain or \"\n \"an auxiliary domain\"\n )\n else:\n raise TypeError(\n \"integration_variable must be of type pybamm.SpatialVariable, \"\n \"not {}\".format(type(var))\n )\n name += \" d{}\".format(var.name)\n\n if self._integration_dimension == \"primary\":\n # integral of a child takes the domain from auxiliary domain of the child\n if child.auxiliary_domains != {}:\n domain = child.auxiliary_domains[\"secondary\"]\n if \"tertiary\" in child.auxiliary_domains:\n auxiliary_domains = {\n \"secondary\": child.auxiliary_domains[\"tertiary\"]\n }\n else:\n auxiliary_domains = {}\n # if child has no auxiliary domain, integral removes domain\n else:\n domain = []\n auxiliary_domains = {}\n elif self._integration_dimension == \"secondary\":\n # integral in the secondary dimension keeps the same domain, moves tertiary\n # domain to secondary domain\n domain = child.domain\n if \"tertiary\" in child.auxiliary_domains:\n auxiliary_domains = {\"secondary\": child.auxiliary_domains[\"tertiary\"]}\n else:\n auxiliary_domains = {}\n elif self._integration_dimension == \"tertiary\":\n # integral in the tertiary dimension keeps the domain and secondary domain\n domain = child.domain\n auxiliary_domains = {\"secondary\": child.auxiliary_domains[\"secondary\"]}\n\n if any(isinstance(var, pybamm.SpatialVariable) for var in integration_variable):\n name += \" {}\".format(child.domain)\n\n self._integration_variable = integration_variable\n super().__init__(\n name, child, domain=domain, auxiliary_domains=auxiliary_domains\n )\n\n @property\n def integration_variable(self):\n return self._integration_variable\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()` \"\"\"\n self._id = hash(\n (self.__class__, self.name)\n + tuple(\n [\n integration_variable.id\n for integration_variable in self.integration_variable\n ]\n )\n + (self.children[0].id,)\n + tuple(self.domain)\n )\n\n def _unary_simplify(self, simplified_child):\n \"\"\" See :meth:`UnaryOperator._unary_simplify()`. \"\"\"\n\n return self.__class__(simplified_child, self.integration_variable)\n\n def _unary_new_copy(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_new_copy()`. \"\"\"\n\n return self.__class__(child, self.integration_variable)\n\n def _evaluate_for_shape(self):\n \"\"\" See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()` \"\"\"\n return pybamm.evaluate_for_shape_using_domain(self.domain)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n\nclass BaseIndefiniteIntegral(Integral):\n \"\"\"Base class for indefinite integrals (forward or backward).\n\n Parameters\n ----------\n function : :class:`pybamm.Symbol`\n The function to be integrated (will become self.children[0])\n integration_variable : :class:`pybamm.IndependentVariable`\n The variable over which to integrate\n\n **Extends:** :class:`Integral`\n \"\"\"\n\n def __init__(self, child, integration_variable):\n if isinstance(integration_variable, list):\n if len(integration_variable) > 1:\n raise NotImplementedError(\n \"Indefinite integral only implemeted w.r.t. one variable\"\n )\n else:\n integration_variable = integration_variable[0]\n super().__init__(child, integration_variable)\n # overwrite domains with child domains\n self.copy_domains(child)\n\n def _evaluate_for_shape(self):\n return self.children[0].evaluate_for_shape()\n\n def evaluates_on_edges(self, dimension):\n # If child evaluates on edges, indefinite integral doesn't\n # If child doesn't evaluate on edges, indefinite integral does\n return not self.child.evaluates_on_edges(dimension)\n\n\nclass IndefiniteIntegral(BaseIndefiniteIntegral):\n \"\"\"A node in the expression tree representing an indefinite integral operator\n\n .. math::\n I = \\\\int_{x_\\text{min}}^{x}\\\\!f(u)\\\\,du\n\n where :math:`u\\\\in\\\\text{domain}` which can represent either a\n spatial or temporal variable.\n\n Parameters\n ----------\n function : :class:`pybamm.Symbol`\n The function to be integrated (will become self.children[0])\n integration_variable : :class:`pybamm.IndependentVariable`\n The variable over which to integrate\n\n **Extends:** :class:`BaseIndefiniteIntegral`\n \"\"\"\n\n def __init__(self, child, integration_variable):\n super().__init__(child, integration_variable)\n # Overwrite the name\n self.name = \"{} integrated w.r.t {}\".format(\n child.name, self.integration_variable[0].name\n )\n if isinstance(integration_variable, pybamm.SpatialVariable):\n self.name += \" on {}\".format(self.integration_variable[0].domain)\n\n\nclass BackwardIndefiniteIntegral(BaseIndefiniteIntegral):\n \"\"\"A node in the expression tree representing a backward indefinite integral\n operator\n\n .. math::\n I = \\\\int_{x}^{x_\\text{max}}\\\\!f(u)\\\\,du\n\n where :math:`u\\\\in\\\\text{domain}` which can represent either a\n spatial or temporal variable.\n\n Parameters\n ----------\n function : :class:`pybamm.Symbol`\n The function to be integrated (will become self.children[0])\n integration_variable : :class:`pybamm.IndependentVariable`\n The variable over which to integrate\n\n **Extends:** :class:`BaseIndefiniteIntegral`\n \"\"\"\n\n def __init__(self, child, integration_variable):\n super().__init__(child, integration_variable)\n # Overwrite the name\n self.name = \"{} integrated backward w.r.t {}\".format(\n child.name, self.integration_variable[0].name\n )\n if isinstance(integration_variable, pybamm.SpatialVariable):\n self.name += \" on {}\".format(self.integration_variable[0].domain)\n\n\nclass DefiniteIntegralVector(SpatialOperator):\n \"\"\"A node in the expression tree representing an integral of the basis used\n for discretisation\n\n .. math::\n I = \\\\int_{a}^{b}\\\\!\\\\psi(x)\\\\,dx,\n\n where :math:`a` and :math:`b` are the left-hand and right-hand boundaries of\n the domain respectively and :math:`\\\\psi` is the basis function.\n\n Parameters\n ----------\n variable : :class:`pybamm.Symbol`\n The variable whose basis will be integrated over the entire domain\n vector_type : str, optional\n Whether to return a row or column vector (default is row)\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child, vector_type=\"row\"):\n name = \"basis integral\"\n self.vector_type = vector_type\n super().__init__(name, child)\n # integrating removes the domain\n self.clear_domains()\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()` \"\"\"\n self._id = hash(\n (self.__class__, self.name, self.vector_type)\n + (self.children[0].id,)\n + tuple(self.domain)\n )\n\n def _unary_simplify(self, simplified_child):\n \"\"\" See :meth:`UnaryOperator._unary_simplify()`. \"\"\"\n\n return self.__class__(simplified_child, vector_type=self.vector_type)\n\n def _unary_new_copy(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_new_copy()`. \"\"\"\n\n return self.__class__(child, vector_type=self.vector_type)\n\n def _evaluate_for_shape(self):\n \"\"\" See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()` \"\"\"\n return pybamm.evaluate_for_shape_using_domain(self.domain)\n\n\nclass BoundaryIntegral(SpatialOperator):\n \"\"\"A node in the expression tree representing an integral operator over the\n boundary of a domain\n\n .. math::\n I = \\\\int_{\\\\partial a}\\\\!f(u)\\\\,du,\n\n where :math:`\\\\partial a` is the boundary of the domain, and\n :math:`u\\\\in\\\\text{domain boundary}`.\n\n Parameters\n ----------\n function : :class:`pybamm.Symbol`\n The function to be integrated (will become self.children[0])\n region : str, optional\n The region of the boundary over which to integrate. If region is `entire`\n (default) the integration is carried out over the entire boundary. If\n region is `negative tab` or `positive tab` then the integration is only\n carried out over the appropriate part of the boundary corresponding to\n the tab.\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child, region=\"entire\"):\n # boundary integral removes domain\n domain = []\n auxiliary_domains = {}\n\n name = \"boundary integral over \"\n if region == \"entire\":\n name += \"entire boundary\"\n elif region == \"negative tab\":\n name += \"negative tab\"\n elif region == \"positive tab\":\n name += \"positive tab\"\n self.region = region\n super().__init__(\n name, child, domain=domain, auxiliary_domains=auxiliary_domains\n )\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()` \"\"\"\n self._id = hash(\n (self.__class__, self.name) + (self.children[0].id,) + tuple(self.domain)\n )\n\n def _unary_simplify(self, simplified_child):\n \"\"\" See :meth:`UnaryOperator._unary_simplify()`. \"\"\"\n\n return self.__class__(simplified_child, region=self.region)\n\n def _unary_new_copy(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_new_copy()`. \"\"\"\n\n return self.__class__(child, region=self.region)\n\n def _evaluate_for_shape(self):\n \"\"\" See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()` \"\"\"\n return pybamm.evaluate_for_shape_using_domain(self.domain)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n\nclass DeltaFunction(SpatialOperator):\n \"\"\"Delta function. Currently can only be implemented at the edge of a domain\n\n Parameters\n ----------\n child : :class:`pybamm.Symbol`\n The variable that sets the strength of the delta function\n side : str\n Which side of the domain to implement the delta function on\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, child, side, domain):\n self.side = side\n if domain is None:\n raise pybamm.DomainError(\"Delta function domain cannot be None\")\n if child.domain != []:\n auxiliary_domains = {\"secondary\": child.domain}\n else:\n auxiliary_domains = {}\n super().__init__(\"delta_function\", child, domain, auxiliary_domains)\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()` \"\"\"\n self._id = hash(\n (self.__class__, self.name, self.side, self.children[0].id)\n + tuple(self.domain)\n + tuple([(k, tuple(v)) for k, v in self.auxiliary_domains.items()])\n )\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n def _unary_simplify(self, simplified_child):\n \"\"\" See :meth:`UnaryOperator._unary_simplify()`. \"\"\"\n return self.__class__(simplified_child, self.side, self.domain)\n\n def _unary_new_copy(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_new_copy()`. \"\"\"\n return self.__class__(child, self.side, self.domain)\n\n def evaluate_for_shape(self):\n \"\"\"\n See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`\n \"\"\"\n child_eval = self.children[0].evaluate_for_shape()\n vec = pybamm.evaluate_for_shape_using_domain(self.domain)\n\n return np.outer(child_eval, vec).reshape(-1, 1)\n\n\nclass BoundaryOperator(SpatialOperator):\n \"\"\"A node in the expression tree which gets the boundary value of a variable.\n\n Parameters\n ----------\n name : str\n The name of the symbol\n child : :class:`pybamm.Symbol`\n The variable whose boundary value to take\n side : str\n Which side to take the boundary value on (\"left\" or \"right\")\n\n **Extends:** :class:`SpatialOperator`\n \"\"\"\n\n def __init__(self, name, child, side):\n # side can only be \"negative tab\" or \"positive tab\" if domain is\n # \"current collector\"\n if side in [\"negative tab\", \"positive tab\"]:\n if child.domain[0] != \"current collector\":\n raise pybamm.ModelError(\n \"\"\"Can only take boundary value on the tabs in the domain\n 'current collector', but {} has domain {}\"\"\".format(\n child, child.domain[0]\n )\n )\n self.side = side\n # boundary value of a child takes the domain from auxiliary domain of the child\n if child.auxiliary_domains != {}:\n domain = child.auxiliary_domains[\"secondary\"]\n # if child has no auxiliary domain, boundary operator removes domain\n else:\n domain = []\n # tertiary auxiliary domain shift down to secondary\n try:\n auxiliary_domains = {\"secondary\": child.auxiliary_domains[\"tertiary\"]}\n except KeyError:\n auxiliary_domains = {}\n super().__init__(\n name, child, domain=domain, auxiliary_domains=auxiliary_domains\n )\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()` \"\"\"\n self._id = hash(\n (self.__class__, self.name, self.side, self.children[0].id)\n + tuple(self.domain)\n + tuple([(k, tuple(v)) for k, v in self.auxiliary_domains.items()])\n )\n\n def _unary_simplify(self, simplified_child):\n \"\"\" See :meth:`UnaryOperator._unary_simplify()`. \"\"\"\n return self.__class__(simplified_child, self.side)\n\n def _unary_new_copy(self, child):\n \"\"\" See :meth:`UnaryOperator._unary_new_copy()`. \"\"\"\n return self.__class__(child, self.side)\n\n def _evaluate_for_shape(self):\n \"\"\" See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()` \"\"\"\n return pybamm.evaluate_for_shape_using_domain(\n self.domain, self.auxiliary_domains\n )\n\n\nclass BoundaryValue(BoundaryOperator):\n \"\"\"A node in the expression tree which gets the boundary value of a variable.\n\n Parameters\n ----------\n child : :class:`pybamm.Symbol`\n The variable whose boundary value to take\n side : str\n Which side to take the boundary value on (\"left\" or \"right\")\n\n **Extends:** :class:`BoundaryOperator`\n \"\"\"\n\n def __init__(self, child, side):\n super().__init__(\"boundary value\", child, side)\n\n\nclass BoundaryGradient(BoundaryOperator):\n \"\"\"A node in the expression tree which gets the boundary flux of a variable.\n\n Parameters\n ----------\n child : :class:`pybamm.Symbol`\n The variable whose boundary flux to take\n side : str\n Which side to take the boundary flux on (\"left\" or \"right\")\n\n **Extends:** :class:`BoundaryOperator`\n \"\"\"\n\n def __init__(self, child, side):\n super().__init__(\"boundary flux\", child, side)\n\n\n#\n# Methods to call Gradient, Divergence, Laplacian and Gradient_Squared\n#\n\n\ndef grad(expression):\n \"\"\"convenience function for creating a :class:`Gradient`\n\n Parameters\n ----------\n\n expression : :class:`Symbol`\n the gradient will be performed on this sub-expression\n\n Returns\n -------\n\n :class:`Gradient`\n the gradient of ``expression``\n \"\"\"\n # Gradient of a broadcast is zero\n if isinstance(expression, pybamm.PrimaryBroadcast):\n new_child = pybamm.PrimaryBroadcast(0, expression.child.domain)\n return pybamm.PrimaryBroadcastToEdges(new_child, expression.domain)\n else:\n return Gradient(expression)\n\n\ndef div(expression):\n \"\"\"convenience function for creating a :class:`Divergence`\n\n Parameters\n ----------\n\n expression : :class:`Symbol`\n the divergence will be performed on this sub-expression\n\n Returns\n -------\n\n :class:`Divergence`\n the divergence of ``expression``\n \"\"\"\n # Divergence of a broadcast is zero\n if isinstance(expression, pybamm.PrimaryBroadcastToEdges):\n new_child = pybamm.PrimaryBroadcast(0, expression.child.domain)\n return pybamm.PrimaryBroadcast(new_child, expression.domain)\n else:\n return Divergence(expression)\n\n\ndef laplacian(expression):\n \"\"\"convenience function for creating a :class:`Laplacian`\n\n Parameters\n ----------\n\n expression : :class:`Symbol`\n the laplacian will be performed on this sub-expression\n\n Returns\n -------\n\n :class:`Laplacian`\n the laplacian of ``expression``\n \"\"\"\n\n return Laplacian(expression)\n\n\ndef grad_squared(expression):\n \"\"\"convenience function for creating a :class:`Gradient_Squared`\n\n Parameters\n ----------\n\n expression : :class:`Symbol`\n the inner product of the gradient with itself will be performed on this\n sub-expression\n\n Returns\n -------\n\n :class:`Gradient_Squared`\n inner product of the gradient of ``expression`` with itself\n \"\"\"\n\n return Gradient_Squared(expression)\n\n\n#\n# Method to call SurfaceValue\n#\n\n\ndef surf(symbol):\n \"\"\"convenience function for creating a right :class:`BoundaryValue`, usually in the\n spherical geometry\n\n Parameters\n ----------\n\n symbol : :class:`pybamm.Symbol`\n the surface value of this symbol will be returned\n\n Returns\n -------\n :class:`pybamm.BoundaryValue`\n the surface value of ``symbol``\n \"\"\"\n return boundary_value(symbol, \"right\")\n\n\ndef x_average(symbol):\n \"\"\"convenience function for creating an average in the x-direction\n\n Parameters\n ----------\n symbol : :class:`pybamm.Symbol`\n The function to be averaged\n\n Returns\n -------\n :class:`Symbol`\n the new averaged symbol\n \"\"\"\n # Can't take average if the symbol evaluates on edges\n if symbol.evaluates_on_edges(\"primary\"):\n raise ValueError(\"Can't take the x-average of a symbol that evaluates on edges\")\n # If symbol doesn't have a domain, its average value is itself\n if symbol.domain in [[], [\"current collector\"]]:\n new_symbol = symbol.new_copy()\n new_symbol.parent = None\n return new_symbol\n # If symbol is a Broadcast, its average value is its child\n elif isinstance(symbol, pybamm.Broadcast):\n return symbol.orphans[0]\n # If symbol is a concatenation of Broadcasts, its average value is its child\n elif (\n isinstance(symbol, pybamm.Concatenation)\n and all(isinstance(child, pybamm.Broadcast) for child in symbol.children)\n and symbol.domain == [\"negative electrode\", \"separator\", \"positive electrode\"]\n ):\n a, b, c = [orp.orphans[0] for orp in symbol.orphans]\n if a.id == b.id == c.id:\n return a\n else:\n geo = pybamm.GeometricParameters()\n l_n = geo.l_n\n l_s = geo.l_s\n l_p = geo.l_p\n return (l_n * a + l_s * b + l_p * c) / (l_n + l_s + l_p)\n # Otherwise, use Integral to calculate average value\n else:\n geo = pybamm.GeometricParameters()\n if symbol.domain == [\"negative electrode\"]:\n x = pybamm.standard_spatial_vars.x_n\n l = geo.l_n\n elif symbol.domain == [\"separator\"]:\n x = pybamm.standard_spatial_vars.x_s\n l = geo.l_s\n elif symbol.domain == [\"positive electrode\"]:\n x = pybamm.standard_spatial_vars.x_p\n l = geo.l_p\n elif symbol.domain == [\"negative electrode\", \"separator\", \"positive electrode\"]:\n x = pybamm.standard_spatial_vars.x\n l = pybamm.Scalar(1)\n elif symbol.domain == [\"negative particle\"]:\n x = pybamm.standard_spatial_vars.x_n\n l = geo.l_n\n elif symbol.domain == [\"positive particle\"]:\n x = pybamm.standard_spatial_vars.x_p\n l = geo.l_p\n else:\n x = pybamm.SpatialVariable(\"x\", domain=symbol.domain)\n v = pybamm.ones_like(symbol)\n l = pybamm.Integral(v, x)\n return Integral(symbol, x) / l\n\n\ndef z_average(symbol):\n \"\"\"convenience function for creating an average in the z-direction\n\n Parameters\n ----------\n symbol : :class:`pybamm.Symbol`\n The function to be averaged\n\n Returns\n -------\n :class:`Symbol`\n the new averaged symbol\n \"\"\"\n # Can't take average if the symbol evaluates on edges\n if symbol.evaluates_on_edges(\"primary\"):\n raise ValueError(\"Can't take the z-average of a symbol that evaluates on edges\")\n # Symbol must have domain [] or [\"current collector\"]\n if symbol.domain not in [[], [\"current collector\"]]:\n raise pybamm.DomainError(\n \"\"\"z-average only implemented in the 'current collector' domain,\n but symbol has domains {}\"\"\".format(\n symbol.domain\n )\n )\n # If symbol doesn't have a domain, its average value is itself\n if symbol.domain == []:\n new_symbol = symbol.new_copy()\n new_symbol.parent = None\n return new_symbol\n # If symbol is a Broadcast, its average value is its child\n elif isinstance(symbol, pybamm.Broadcast):\n return symbol.orphans[0]\n # Otherwise, use Integral to calculate average value\n else:\n geo = pybamm.GeometricParameters()\n z = pybamm.standard_spatial_vars.z\n l_z = geo.l_z\n return Integral(symbol, z) / l_z\n\n\ndef yz_average(symbol):\n \"\"\"convenience function for creating an average in the y-z-direction\n\n Parameters\n ----------\n symbol : :class:`pybamm.Symbol`\n The function to be averaged\n\n Returns\n -------\n :class:`Symbol`\n the new averaged symbol\n \"\"\"\n # Symbol must have domain [] or [\"current collector\"]\n if symbol.domain not in [[], [\"current collector\"]]:\n raise pybamm.DomainError(\n \"\"\"y-z-average only implemented in the 'current collector' domain,\n but symbol has domains {}\"\"\".format(\n symbol.domain\n )\n )\n # If symbol doesn't have a domain, its average value is itself\n if symbol.domain == []:\n new_symbol = symbol.new_copy()\n new_symbol.parent = None\n return new_symbol\n # If symbol is a Broadcast, its average value is its child\n elif isinstance(symbol, pybamm.Broadcast):\n return symbol.orphans[0]\n # Otherwise, use Integral to calculate average value\n else:\n geo = pybamm.GeometricParameters()\n y = pybamm.standard_spatial_vars.y\n z = pybamm.standard_spatial_vars.z\n l_y = geo.l_y\n l_z = geo.l_z\n return Integral(symbol, [y, z]) / (l_y * l_z)\n\n\ndef r_average(symbol):\n \"\"\"convenience function for creating an average in the r-direction\n\n Parameters\n ----------\n symbol : :class:`pybamm.Symbol`\n The function to be averaged\n\n Returns\n -------\n :class:`Symbol`\n the new averaged symbol\n \"\"\"\n # Can't take average if the symbol evaluates on edges\n if symbol.evaluates_on_edges(\"primary\"):\n raise ValueError(\"Can't take the r-average of a symbol that evaluates on edges\")\n # Otherwise, if symbol doesn't have a particle domain,\n # its r-averaged value is itself\n elif symbol.domain not in [[\"positive particle\"], [\"negative particle\"]]:\n new_symbol = symbol.new_copy()\n new_symbol.parent = None\n return new_symbol\n # If symbol is a secondary broadcast onto \"negative electrode\" or\n # \"positive electrode\", take the r-average of the child then broadcast back\n elif isinstance(symbol, pybamm.SecondaryBroadcast) and symbol.domains[\n \"secondary\"\n ] in [[\"positive electrode\"], [\"negative electrode\"]]:\n child = symbol.orphans[0]\n child_av = pybamm.r_average(child)\n return pybamm.PrimaryBroadcast(child_av, symbol.domains[\"secondary\"])\n # If symbol is a Broadcast onto a particle domain, its average value is its child\n elif isinstance(symbol, pybamm.PrimaryBroadcast) and symbol.domain in [\n [\"positive particle\"],\n [\"negative particle\"],\n ]:\n return symbol.orphans[0]\n else:\n r = pybamm.SpatialVariable(\"r\", symbol.domain)\n v = pybamm.FullBroadcast(\n pybamm.Scalar(1), symbol.domain, symbol.auxiliary_domains\n )\n return Integral(symbol, r) / Integral(v, r)\n\n\ndef boundary_value(symbol, side):\n \"\"\"convenience function for creating a :class:`pybamm.BoundaryValue`\n\n Parameters\n ----------\n symbol : `pybamm.Symbol`\n The symbol whose boundary value to take\n side : str\n Which side to take the boundary value on (\"left\" or \"right\")\n\n Returns\n -------\n :class:`BoundaryValue`\n the new integrated expression tree\n \"\"\"\n # If symbol doesn't have a domain, its boundary value is itself\n if symbol.domain == []:\n new_symbol = symbol.new_copy()\n new_symbol.parent = None\n return new_symbol\n # If symbol is a primary or full broadcast, its boundary value is its child\n if isinstance(symbol, (pybamm.PrimaryBroadcast, pybamm.FullBroadcast)):\n return symbol.orphans[0]\n # If symbol is a secondary broadcast, its boundary value is a primary broadcast of\n # the boundary value of its child\n if isinstance(symbol, pybamm.SecondaryBroadcast):\n # Read child (making copy)\n child = symbol.orphans[0]\n # Take boundary value\n boundary_child = boundary_value(child, side)\n # Broadcast back to the original symbol's secondary domain\n return pybamm.PrimaryBroadcast(boundary_child, symbol.secondary_domain)\n # Otherwise, calculate boundary value\n else:\n return BoundaryValue(symbol, side)\n\n\ndef sign(symbol):\n \" Returns a :class:`Sign` object. \"\n return Sign(symbol)\n"
]
| [
[
"scipy.sparse.issparse",
"numpy.ceil",
"scipy.sparse.csr_matrix.sign",
"numpy.sign",
"numpy.abs",
"numpy.outer",
"scipy.sparse.csr_matrix",
"numpy.floor"
]
]
|
synoptic/MetPy | [
"79abf129fe3c7c9df1d17684cbd36672b7628cf4"
]
| [
"tests/io/test_metar.py"
]
| [
"# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test various METARs.\"\"\"\nfrom datetime import datetime\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport pytest\n\nfrom metpy.cbook import get_test_data\nfrom metpy.io import parse_metar_file, parse_metar_to_dataframe\nfrom metpy.io.metar import Metar, parse_metar\nfrom metpy.units import units\n\n\[email protected](['metar', 'truth'], [\n # Missing station\n ('METAR KLBG 261155Z AUTO 00000KT 10SM CLR 05/00 A3001 RMK AO2=',\n Metar('KLBG', np.nan, np.nan, np.nan, datetime(2017, 5, 26, 11, 55), 0, 0, 16093.44,\n np.nan, np.nan, np.nan, 'CLR', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, 0, 5, 0, 30.01, 0, 0, 0, 'AO2')),\n # Broken clouds\n ('METAR KLOT 261155Z AUTO 00000KT 10SM BKN100 05/00 A3001 RMK AO2=',\n Metar('KLOT', 41.6, -88.1, 205, datetime(2017, 5, 26, 11, 55), 0, 0, 16093.44, np.nan,\n np.nan, np.nan, 'BKN', 10000, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 6, 5,\n 0, 30.01, 0, 0, 0, 'AO2')),\n # Few clouds, bad time and winds\n ('METAR KMKE 266155Z AUTO /////KT 10SM FEW100 05/00 A3001 RMK AO2=',\n Metar('KMKE', 42.95, -87.9, 206, np.nan, np.nan, np.nan, 16093.44,\n np.nan, np.nan, np.nan, 'FEW', 10000, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, 2, 5, 0, 30.01, 0, 0, 0, 'AO2')),\n # Many weather and cloud slots taken\n ('METAR RJOI 261155Z 00000KT 4000 -SHRA BR VCSH BKN009 BKN015 OVC030 OVC040 22/21 A2987 '\n 'RMK SHRAB35E44 SLP114 VCSH S-NW P0000 60021 70021 T02220206 10256 20211 55000=',\n Metar('RJOI', 34.13, 132.22, 2, datetime(2017, 5, 26, 11, 55), 0, 0, 4000, '-SHRA', 'BR',\n 'VCSH', 'BKN', 900, 'BKN', 1500, 'OVC', 3000, 'OVC', 4000, 8, 22, 21, 29.87, 80, 10,\n 16, 'SHRAB35E44 SLP114 VCSH S-NW P0000 60021 70021 T02220206 10256 20211 55000')),\n # Smoke for current weather\n ('KFLG 252353Z AUTO 27005KT 10SM FU BKN036 BKN085 22/03 A3018 RMK AO2 SLP130 T02220033 '\n '10250 20217 55007=',\n Metar('KFLG', 35.13, -111.67, 2134, datetime(2017, 5, 25, 23, 53), 270, 5, 16093.44, 'FU',\n np.nan, np.nan, 'BKN', 3600, 'BKN', 8500, np.nan, np.nan, np.nan, np.nan, 6, 22, 3,\n 30.18, 4, 0, 0, 'AO2 SLP130 T02220033 10250 20217 55007')),\n # CAVOK for visibility group\n ('METAR OBBI 011200Z 33012KT CAVOK 40/18 Q0997 NOSIG=',\n Metar('OBBI', 26.27, 50.63, 2, datetime(2017, 5, 1, 12, 00), 330, 12, 10000, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0,\n 40, 18, units.Quantity(997, 'hPa').m_as('inHg'), 0, 0, 0, 'NOSIG')),\n # Visibility using a mixed fraction\n ('K2I0 011155Z AUTO 05004KT 1 3/4SM BR SCT001 22/22 A3009 RMK AO2 70001 T02210221 10223 '\n '20208=',\n Metar('K2I0', 37.35, -87.4, 134, datetime(2017, 5, 1, 11, 55), 50, 4, 2816.352, 'BR',\n np.nan, np.nan, 'SCT', 100, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 4,\n 22, 22, 30.09, 10, 0, 0, 'AO2 70001 T02210221 10223 20208')),\n # Missing temperature\n ('KIOW 011152Z AUTO A3006 RMK AO2 SLPNO 70020 51013 PWINO=',\n Metar('KIOW', 41.63, -91.55, 198, datetime(2017, 5, 1, 11, 52), np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, 10, np.nan, np.nan, 30.06, 0, 0, 0, 'AO2 SLPNO 70020 51013 PWINO')),\n # Missing data\n ('METAR KBOU 011152Z AUTO 02006KT //// // ////// 42/02 Q1004=',\n Metar('KBOU', 40., -105.33, 1625, datetime(2017, 5, 1, 11, 52), 20, 6, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, 10, 42, 2, units.Quantity(1004, 'hPa').m_as('inHg'), 0, 0, 0, '')),\n # Vertical visibility\n ('KSLK 011151Z AUTO 21005KT 1/4SM FG VV002 14/13 A1013 RMK AO2 SLP151 70043 T01390133 '\n '10139 20094 53002=',\n Metar('KSLK', 44.4, -74.2, 498, datetime(2017, 5, 1, 11, 51), 210, 5, 402.336, 'FG',\n np.nan, np.nan, 'VV', 200, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, 8, 14, 13, units.Quantity(1013, 'hPa').m_as('inHg'), 45, 0, 0,\n 'AO2 SLP151 70043 T01390133 10139 20094 53002')),\n # Missing vertical visibility height\n ('SLCP 011200Z 18008KT 0100 FG VV/// 19/19 Q1019=',\n Metar('SLCP', -16.14, -62.02, 497, datetime(2017, 5, 1, 12, 00), 180, 8, 100, 'FG',\n np.nan, np.nan, 'VV', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 8, 19,\n 19, units.Quantity(1019, 'hPa').m_as('inHg'), 45, 0, 0, '')),\n # BCFG current weather; also visibility is encoding 80SM which we're not adjusting\n ('METAR KMWN 011249Z 36037G45KT 80SM BCFG BKN/// FEW000 07/05 RMK BCFG FEW000 TPS LWR '\n 'BKN037 BCFG INTMT=',\n Metar('KMWN', 44.27, -71.3, 1910, datetime(2017, 5, 1, 12, 49), 360, 37,\n units.Quantity(80, 'mi').m_as('m'), 'BCFG', np.nan, np.nan, 'BKN', np.nan,\n 'FEW', 0, np.nan, np.nan, np.nan, np.nan, 6, 7, 5, np.nan, 41, 0, 0,\n 'BCFG FEW000 TPS LWR BKN037 BCFG INTMT')),\n # -DZ current weather\n ('KULM 011215Z AUTO 22003KT 10SM -DZ CLR 19/19 A3000 RMK AO2=',\n Metar('KULM', 44.32, -94.5, 308, datetime(2017, 5, 1, 12, 15), 220, 3, 16093.44, '-DZ',\n np.nan, np.nan, 'CLR', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0,\n 19, 19, 30., 51, 0, 0, 'AO2')),\n # CB trailing on cloud group\n ('METAR AGGH 011200Z 25003KT 9999 FEW015 FEW017CB BKN030 25/24 Q1011=',\n Metar('AGGH', -9.42, 160.05, 9, datetime(2017, 5, 1, 12, 00), 250, 3., 9999, np.nan,\n np.nan, np.nan, 'FEW', 1500, 'FEW', 1700, 'BKN', 3000, np.nan, np.nan, 6, 25,\n 24, units.Quantity(1011, 'hPa').m_as('inHg'), 0, 0, 0, '')),\n # 5 levels of clouds\n ('METAR KSEQ 011158Z AUTO 08003KT 9SM FEW009 BKN020 BKN120 BKN150 OVC180 22/22 A3007 RMK '\n 'AO2 RAB12E46RAB56E57 CIG 020V150 BKN020 V FEW SLP179 P0000 60000 70001 52008=',\n Metar('KSEQ', 29.566666666666666, -97.91666666666667, 160, datetime(2017, 5, 1, 11, 58),\n 80, 3., units.Quantity(9, 'miles').m_as('m'), np.nan, np.nan, np.nan, 'FEW', 900.,\n 'BKN', 2000., 'BKN', 12000., 'BKN', 15000., 8, 22., 22., 30.07, 0, 0, 0,\n 'AO2 RAB12E46RAB56E57 CIG 020V150 BKN020 V FEW SLP179 P0000 60000 70001 52008')),\n # -FZUP\n ('SPECI CBBC 060030Z AUTO 17009G15KT 9SM -FZUP FEW011 SCT019 BKN026 OVC042 02/01 A3004 '\n 'RMK ICG INTMT SLP177=',\n Metar('CBBC', 52.18, -128.15, 43, datetime(2017, 5, 6, 0, 30), 170, 9.,\n units.Quantity(9, 'miles').m_as('m'), '-FZUP', np.nan, np.nan, 'FEW', 1100.,\n 'SCT', 1900., 'BKN', 2600., 'OVC', 4200., 8, 2, 1, 30.04, 147, 0, 0,\n 'ICG INTMT SLP177')),\n # Weird VV group and +SG\n ('BGGH 060750Z AUTO 36004KT 0100NDV +SG VV001/// 05/05 Q1000',\n Metar('BGGH', 64.2, -51.68, 70, datetime(2017, 5, 6, 7, 50), 360, 4, 100, '+SG', np.nan,\n np.nan, 'VV', 100, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 8, 5, 5,\n units.Quantity(1000, 'hPa').m_as('inHg'), 77, 0, 0, '')),\n # COR at beginning, also wind MPS (m/s)\n ('COR ZLLL 101100Z 13010MPS 5000 -SHRA BLDU FEW033CB BKN046 21/11 Q1014 BECMG TL1240 '\n '04004MPS NSW',\n Metar('ZLLL', 36.52, 103.62, 1947, datetime(2017, 5, 10, 11, 0), 130,\n units.Quantity(10, 'm/s').m_as('knots'), 5000, '-SHRA', 'BLDU', np.nan, 'FEW',\n 3300, 'BKN', 4600, np.nan, np.nan, np.nan, np.nan, 6, 21, 11,\n units.Quantity(1014, 'hPa').m_as('inHg'), 80, 1007, 0,\n 'BECMG TL1240 04004MPS NSW')),\n # M1/4SM vis, -VCTSSN weather\n ('K4BM 020127Z AUTO 04013G24KT 010V080 M1/4SM -VCTSSN OVC002 07/06 A3060 '\n 'RMK AO2 LTG DSNT SE THRU SW',\n Metar('K4BM', 39.04, -105.52, 3438, datetime(2017, 5, 2, 1, 27), 40, 13,\n units.Quantity(0.25, 'mi').m_as('m'), '-VCTSSN', np.nan, np.nan, 'OVC', 200,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 8, 7, 6, 30.60, 2095, 0, 0,\n 'AO2 LTG DSNT SE THRU SW')),\n # Variable visibility group\n ('ENBS 121620Z 36008KT 9999 3000N VCFG -DZ SCT006 BKN009 12/11 Q1014',\n Metar('ENBS', 70.62, 29.72, 10, datetime(2017, 5, 12, 16, 20), 360, 8, 9999, 'VCFG',\n '-DZ', np.nan, 'SCT', 600, 'BKN', 900, np.nan, np.nan, np.nan, np.nan, 6, 12, 11,\n units.Quantity(1014, 'hPa').m_as('inHg'), 40, 51, 0, '')),\n # More complicated runway visibility\n ('CYYC 030047Z 26008G19KT 170V320 1SM R35L/5500VP6000FT/D R29/P6000FT/D R35R/P6000FT/D '\n '+TSRAGS BR OVC009CB 18/16 A2993 RMK CB8 FRQ LTGIC OVRHD PRESRR SLP127 DENSITY ALT '\n '4800FT',\n Metar('CYYC', 51.12, -114.02, 1084, datetime(2017, 5, 3, 0, 47), 260, 8,\n units.Quantity(1, 'mi').m_as('m'), '+TSRAGS', 'BR', np.nan, 'OVC', 900, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, 8, 18, 16, 29.93, 99, 10, 0,\n 'CB8 FRQ LTGIC OVRHD PRESRR SLP127 DENSITY ALT 4800FT')),\n # Oddly-placed COR\n ('KDMA 110240Z COR AUTO 08039G47KT 1/4SM -TSRA DS FEW008 BKN095 27/19 A2998 RMK AO2 '\n 'RAB0159E20DZB20E27DZB27E27RAB35 TSB00E15TSB32 PRESFR SLP106 $ COR 0246',\n Metar('KDMA', 32.17, -110.87, 824, datetime(2017, 5, 11, 2, 40), 80, 39, 402.336,\n '-TSRA', 'DS', np.nan, 'FEW', 800, 'BKN', 9500, np.nan, np.nan, np.nan, np.nan,\n 6, 27, 19, 29.98, 1095, 31, 0,\n 'AO2 RAB0159E20DZB20E27DZB27E27RAB35 TSB00E15TSB32 PRESFR SLP106 $ COR 0246')),\n # Ice crystals (IC) and no dewpoint -- South Pole!\n ('NZSP 052350Z 03009KT 3200 IC BLSN SCT019 M58/ A2874 RMK SKWY WNDS ESTMD CLN AIR 03005KT '\n 'ALL WNDS GRID',\n Metar('NZSP', -89.98, 179.98, 2830, datetime(2017, 5, 5, 23, 50), 30, 9, 3200, 'IC',\n 'BLSN', np.nan, 'SCT', 1900, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 4,\n -58, np.nan, 28.74, 78, 38, 0, 'SKWY WNDS ESTMD CLN AIR 03005KT ALL WNDS GRID')),\n # NSW\n ('VEIM 121200Z 09008KT 8000 NSW FEW018 SCT100 28/23 Q1008 BECMG 7000 NSW',\n Metar('VEIM', 24.77, 93.9, 781, datetime(2017, 5, 12, 12, 0), 90, 8, 8000, np.nan,\n np.nan, np.nan, 'FEW', 1800, 'SCT', 10000, np.nan, np.nan, np.nan, np.nan, 4,\n 28, 23, units.Quantity(1008, 'hPa').m_as('inHg'), 0, 0, 0, 'BECMG 7000 NSW')),\n # Variable vis with no direction\n ('TFFF 111830Z AUTO 11019G30KT 1000 0600 R10/1100D RA BCFG FEW014/// BKN021/// BKN027/// '\n '///CB 27/24 Q1015',\n Metar('TFFF', 14.6, -61.0, 5, datetime(2017, 5, 11, 18, 30), 110, 19, 1000, 'RA', 'BCFG',\n np.nan, 'FEW', 1400, 'BKN', 2100, 'BKN', 2700, np.nan, np.nan, 6, 27, 24,\n units.Quantity(1015, 'hPa').m_as('inHg'), 63, 41, 0, '')),\n # Space between + and wx code\n ('SKCG 031730Z 13004KT 0500 + TSRA BKN010 25/25 Q1012 RMK A2990',\n Metar('SKCG', 10.43, -75.52, 1, datetime(2017, 5, 3, 17, 30), 130, 4, 500, '+TSRA',\n np.nan, np.nan, 'BKN', 1000, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 6,\n 25, 25, units.Quantity(1012, 'hPa').m_as('inHg'), 1097, 0, 0, 'A2990'))],\n ids=['missing station', 'BKN', 'FEW', 'current weather', 'smoke', 'CAVOK', 'vis fraction',\n 'missing temps', 'missing data', 'vertical vis', 'missing vertical vis', 'BCFG',\n '-DZ', 'sky cover CB', '5 sky levels', '-FZUP', 'VV group', 'COR placement',\n 'M1/4SM vis', 'variable vis', 'runway vis', 'odd COR', 'IC', 'NSW',\n 'variable vis no dir', 'space in wx code'])\ndef test_metar_parser(metar, truth):\n \"\"\"Test parsing individual METARs.\"\"\"\n assert parse_metar(metar, 2017, 5) == truth\n\n\ndef test_date_time_given():\n \"\"\"Test for when date_time is given.\"\"\"\n df = parse_metar_to_dataframe('K6B0 261200Z AUTO 00000KT 10SM CLR 20/M17 A3002 RMK AO2 '\n 'T01990165=', year=2019, month=6)\n assert df.date_time[0] == datetime(2019, 6, 26, 12)\n assert df.eastward_wind[0] == 0\n assert df.northward_wind[0] == 0\n assert_almost_equal(df.air_pressure_at_sea_level[0], 1016.56)\n assert_almost_equal(df.visibility.values, 16093.44)\n\n\ndef test_parse_metar_df_positional_datetime_failure():\n \"\"\"Test that positional year, month arguments fail for parse_metar_to_dataframe.\"\"\"\n # pylint: disable=too-many-function-args\n with pytest.raises(TypeError, match='takes 1 positional argument but 3 were given'):\n parse_metar_to_dataframe('K6B0 261200Z AUTO 00000KT 10SM CLR 20/M17'\n 'A3002 RMK AO2 T01990165=', 2019, 6)\n\n\ndef test_parse_metar_to_dataframe():\n \"\"\"Test parsing a single METAR to a DataFrame.\"\"\"\n df = parse_metar_to_dataframe('KDEN 012153Z 09010KT 10SM FEW060 BKN110 BKN220 27/13 '\n 'A3010 RMK AO2 LTG DSNT SW AND W SLP114 OCNL LTGICCG '\n 'DSNT SW CB DSNT SW MOV E T02670128')\n assert df.wind_direction.values == 90\n assert df.wind_speed.values == 10\n assert_almost_equal(df.eastward_wind.values, -10)\n assert_almost_equal(df.northward_wind.values, 0)\n assert_almost_equal(df.visibility.values, 16093.44)\n assert df.air_temperature.values == 27\n assert df.dew_point_temperature.values == 13\n\n\ndef test_parse_file():\n \"\"\"Test the parser on an entire file.\"\"\"\n input_file = get_test_data('metar_20190701_1200.txt', as_file_obj=False)\n df = parse_metar_file(input_file)\n\n # Check counts (non-NaN) of various fields\n counts = df.count()\n assert counts.station_id == 8980\n assert counts.latitude == 8968\n assert counts.longitude == 8968\n assert counts.elevation == 8968\n assert counts.date_time == 8980\n assert counts.wind_direction == 8577\n assert counts.wind_speed == 8844\n assert counts.visibility == 8486\n assert counts.current_wx1 == 1090\n assert counts.current_wx2 == 82\n assert counts.current_wx3 == 1\n assert counts.low_cloud_type == 7361\n assert counts.low_cloud_level == 3867\n assert counts.medium_cloud_type == 1646\n assert counts.medium_cloud_level == 1641\n assert counts.high_cloud_type == 632\n assert counts.high_cloud_level == 626\n assert counts.highest_cloud_type == 37\n assert counts.highest_cloud_level == 37\n assert counts.cloud_coverage == 8980\n assert counts.air_temperature == 8779\n assert counts.dew_point_temperature == 8740\n assert counts.altimeter == 8450\n assert counts.remarks == 8980\n assert (df.current_wx1_symbol != 0).sum() == counts.current_wx1\n assert (df.current_wx2_symbol != 0).sum() == counts.current_wx2\n assert (df.current_wx3_symbol != 0).sum() == counts.current_wx3\n assert counts.air_pressure_at_sea_level == 8378\n assert counts.eastward_wind == 8577\n assert counts.northward_wind == 8577\n\n # KVPZ 011156Z AUTO 27005KT 10SM CLR 23/19 A3004 RMK AO2 SLP166\n test = df[df.station_id == 'KVPZ']\n assert test.air_temperature.values == 23\n assert test.dew_point_temperature.values == 19\n assert test.altimeter.values == 30.04\n assert_almost_equal(test.eastward_wind.values, 5)\n assert_almost_equal(test.northward_wind.values, 0)\n assert test.air_pressure_at_sea_level.values == 1016.76\n\n # Check that this ob properly gets all lines\n paku = df[df.station_id == 'PAKU']\n assert_almost_equal(paku.air_temperature.values, [9, 12])\n assert_almost_equal(paku.dew_point_temperature.values, [9, 10])\n assert_almost_equal(paku.altimeter.values, [30.02, 30.04])\n\n\ndef test_parse_file_positional_datetime_failure():\n \"\"\"Test that positional year, month arguments fail for parse_metar_file.\"\"\"\n # pylint: disable=too-many-function-args\n input_file = get_test_data('metar_20190701_1200.txt', as_file_obj=False)\n with pytest.raises(TypeError, match='takes 1 positional argument but 3 were given'):\n parse_metar_file(input_file, 2016, 12)\n\n\ndef test_parse_file_bad_encoding():\n \"\"\"Test the parser on an entire file that has at least one bad utf-8 encoding.\"\"\"\n input_file = get_test_data('2020010600_sao.wmo', as_file_obj=False)\n df = parse_metar_file(input_file)\n\n # Check counts (non-NaN) of various fields\n counts = df.count()\n assert counts.station_id == 8802\n assert counts.latitude == 8789\n assert counts.longitude == 8789\n assert counts.elevation == 8789\n assert counts.date_time == 8802\n assert counts.wind_direction == 8377\n assert counts.wind_speed == 8673\n assert counts.visibility == 8312\n assert counts.current_wx1 == 1412\n assert counts.current_wx2 == 213\n assert counts.current_wx3 == 3\n assert counts.low_cloud_type == 7672\n assert counts.low_cloud_level == 3816\n assert counts.medium_cloud_type == 1632\n assert counts.medium_cloud_level == 1623\n assert counts.high_cloud_type == 546\n assert counts.high_cloud_level == 545\n assert counts.highest_cloud_type == 40\n assert counts.highest_cloud_level == 40\n assert counts.cloud_coverage == 8802\n assert counts.air_temperature == 8597\n assert counts.dew_point_temperature == 8536\n assert counts.altimeter == 8246\n assert counts.remarks == 8802\n assert (df.current_wx1_symbol != 0).sum() == counts.current_wx1\n assert (df.current_wx2_symbol != 0).sum() == counts.current_wx2\n assert (df.current_wx3_symbol != 0).sum() == counts.current_wx3\n assert counts.air_pressure_at_sea_level == 8207\n assert counts.eastward_wind == 8377\n assert counts.northward_wind == 8377\n\n # KDEN 052353Z 16014KT 10SM FEW120 FEW220 02/M07 A3008 RMK AO2 SLP190 T00171072...\n test = df[df.station_id == 'KDEN']\n assert_almost_equal(test.visibility.values, 16093.44)\n assert test.air_temperature.values == 2\n assert test.air_pressure_at_sea_level.values == 1024.71\n\n\ndef test_parse_file_object():\n \"\"\"Test the parser reading from a file-like object.\"\"\"\n input_file = get_test_data('metar_20190701_1200.txt', mode='rt')\n # KOKC 011152Z 18006KT 7SM FEW080 FEW250 21/21 A3003 RMK AO2 SLP155 T02060206...\n df = parse_metar_file(input_file)\n test = df[df.station_id == 'KOKC']\n assert_almost_equal(test.visibility.values, 11265.408)\n assert test.air_temperature.values == 21\n assert test.dew_point_temperature.values == 21\n assert test.altimeter.values == 30.03\n assert_almost_equal(test.eastward_wind.values, 0)\n assert_almost_equal(test.northward_wind.values, 6)\n\n\ndef test_parse_no_pint_objects_in_df():\n \"\"\"Test that there are no Pint quantities in dataframes created by parser.\"\"\"\n input_file = get_test_data('metar_20190701_1200.txt', mode='rt')\n metar_str = ('KSLK 011151Z AUTO 21005KT 1/4SM FG VV002 14/13 A1013 RMK AO2 SLP151 70043 '\n 'T01390133 10139 20094 53002=')\n\n for df in (parse_metar_file(input_file), parse_metar_to_dataframe(metar_str)):\n for column in df:\n assert not isinstance(df[column][0], units.Quantity)\n"
]
| [
[
"numpy.testing.assert_almost_equal"
]
]
|
lirnish/US-Salary-Prediction | [
"a3a90016a0f0dd15578a70546ef42144229936e6"
]
| [
"src/compare_models.py"
]
| [
"# author: Andy Yang\n# date: 2021-11-27\n\n\"\"\"This script imports preprocessed test data and fitted Ridge and RandomForestRegressor models. \nIt then evaluates them on the test set and outputs evaluation metrics to the output directory.\n\nUsage: fit_model.py --source_data=<filepath> --output_dir=<filepath>\n\nOptions:\n--source_data=<filepath> directory containing transformed data (this is a required option)\n--output_dir=<filepath> directory to output figures and tables (this is a required option)\n\"\"\" \n\nfrom docopt import docopt\nimport random\nimport numpy as np\nimport pandas as pd\nimport altair as alt\nimport sklearn.metrics as sk\nimport math\nimport pickle\nimport scipy.sparse\n\nfrom sklearn.model_selection import train_test_split\nfrom altair_saver import save\nfrom sklearn.compose import ColumnTransformer, make_column_transformer\nfrom sklearn.dummy import DummyClassifier, DummyRegressor\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_selection import RFE, RFECV\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LogisticRegression, Ridge, RidgeCV\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import (\n GridSearchCV,\n RandomizedSearchCV,\n ShuffleSplit,\n cross_val_score,\n cross_validate,\n train_test_split,\n)\n\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.preprocessing import (\n OneHotEncoder,\n OrdinalEncoder,\n PolynomialFeatures,\n StandardScaler,\n)\n\nfrom sklearn.pipeline import Pipeline, make_pipeline\n\nopt = docopt(__doc__)\n\ndef main(opt):\n input_dir = opt['--source_data']\n output_dir = opt['--output_dir']\n\n # load test data from input directory\n print(\"loading test data..\")\n X_transformed_test_sparse = scipy.sparse.load_npz(input_dir + '/x_test_sparse.npz')\n X_transformed_test = pd.DataFrame.sparse.from_spmatrix(X_transformed_test_sparse)\n y_test = pd.read_csv(input_dir + '/y_test.csv')\n feats = pd.read_csv(input_dir + '/feature_names.csv').iloc[:,0]\n\n # load models from pickle files\n print(\"loading fitted models..\")\n ridge_model = pickle.load(open(\"results/models/ridge_model.pickle\", 'rb'))\n rf_model = pickle.load(open(\"results/models/rf_model.pickle\", 'rb'))\n\n # generate predictions on test set\n y_pred_ridge = ridge_model.predict(X_transformed_test)\n y_pred_rf = rf_model.predict(X_transformed_test)\n\n print(\"creating tables and figures..\")\n # create scores dataframe and save it to output directory\n r2_ridge = round(sk.r2_score(y_test, y_pred_ridge), 2)\n r2_rf = round(sk.r2_score(y_test, y_pred_rf), 2)\n rmse = round(math.sqrt(sk.mean_squared_error(y_test, y_pred_ridge)), 2)\n rmse_rf = round(math.sqrt(sk.mean_squared_error(y_test, y_pred_rf)), 2)\n\n scores = {\n \"Metric\": [\"R2\", \"RMSE\"],\n \"Ridge Scores\": [r2_ridge, rmse],\n }\n \n test_scores = pd.DataFrame(scores)\n test_scores.to_csv(output_dir + '/tables/test_scores.csv', index = False)\n print(\"saved model test results to: \" + output_dir)\n\n # Plot the predicted values against true values, then save the graph in the output directory\n y_data = {\n \"Ridge precitions\": y_pred_ridge,\n \"y_actual\": y_test.iloc[:, 0]\n }\n salary_data = pd.DataFrame(y_data)\n\n point = alt.Chart(salary_data, title='Ridge regression effectiveness in predicting salary values').mark_circle(opacity = 0.5).encode(\n alt.X(\"y_actual\", title=\"Actual Salary\"),\n alt.Y(\"Ridge precitions\", title=\"Predicted Salary\")\n )\n\n line = pd.DataFrame({\n 'x': [0, 500000],\n 'y': [0, 500000],\n })\n\n line_plot = alt.Chart(line).mark_line(color= 'red').encode(\n x= 'x',\n y= 'y',\n )\n\n chart = point + line_plot\n chart.save(output_dir + \"/figures/predicted_vs_actual_chart.png\")\n print(\"saved model evaluation chart to: \" + output_dir)\n\n # create model coefficient dataframes and save them to the output directory\n neg_coefficients_df = pd.DataFrame(data=ridge_model.coef_, index=feats, columns=[\"coefficient\"]).sort_values(\"coefficient\")[:10].reset_index()\n neg_coefficients_df.columns = [\"Feature\", \"Coefficient\"]\n pos_coefficients_df =pd.DataFrame(data=ridge_model.coef_, index=feats, columns=[\"coefficient\"]).sort_values(\"coefficient\", ascending = False)[:10].reset_index()\n pos_coefficients_df.columns = [\"Feature\", \"Coefficient\"]\n \n ridge_feats = pd.DataFrame(data=ridge_model.coef_, index=feats, columns=[\"coefficient\"]).sort_values(by = \"coefficient\", ascending = False).reset_index()\n rf_feats = pd.DataFrame(data=rf_model.feature_importances_, index=feats, columns=[\"coefficient\"]).sort_values(by = \"coefficient\", ascending = False).reset_index()\n\n rf_coef_df = pd.DataFrame(rf_feats)\n ridge_coef_df = pd.DataFrame(ridge_feats)\n combined_df = pd.merge(ridge_coef_df[:10], rf_coef_df[:10], left_index=True, right_index=True).reset_index().round(4)\n combined_df.columns = [\"Significance Rank\", \"Ridge Feature\", \"Ridge Coefficient\", \"Random Forest Feature\", \"RandomForest Coefficient\"]\n combined_df[\"Significance Rank\"] = combined_df[\"Significance Rank\"] + 1\n\n neg_coefficients_df.to_csv(output_dir + '/tables/negative_coefficients_ridge.csv', index = False)\n pos_coefficients_df.to_csv(output_dir + '/tables/positive_coefficients_ridge.csv', index = False)\n combined_df.to_csv(output_dir + '/tables/coefficient_comparison.csv', index = False)\n print(\"saved coefficient tables to: \" + output_dir)\n\n\nif __name__ == \"__main__\":\n main(opt)\n"
]
| [
[
"sklearn.metrics.mean_squared_error",
"pandas.merge",
"pandas.DataFrame.sparse.from_spmatrix",
"pandas.DataFrame",
"sklearn.metrics.r2_score",
"pandas.read_csv"
]
]
|
CREVIOS/YHSA-Comp-18 | [
"e0ba41d596c820730eead396364df5854c4774c8",
"e0ba41d596c820730eead396364df5854c4774c8"
]
| [
"sentence_similarity_graph/graph_1.0_adv_sen_sim.py",
"sentence_similarity_graph/graph_1.0_norm_sen_sim.py"
]
| [
"#from nltk.tokenize import sent_tokenize,word_tokenize\r\nfrom __future__ import division\r\nimport nltk\r\nfrom nltk.tokenize import sent_tokenize,word_tokenize\r\nfrom heapq import nlargest\r\n\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.corpus import brown\r\nimport math\r\nimport numpy as np\r\nimport sys\r\nimport time\r\n\r\n# Parameters to the algorithm. Currently set to values that was reported\r\n# in the paper to produce \"best\" results.\r\ntime1=time.time()\r\nALPHA = 0.2\r\nBETA = 0.45\r\nETA = 0.4\r\nPHI = 0.2\r\nDELTA = 0.85\r\n\r\nbrown_freqs = dict()\r\nN = 0\r\n\r\n\r\n######################### word similarity ##########################\r\n\r\ndef get_best_synset_pair(word_1, word_2):\r\n \"\"\"\r\n Choose the pair with highest path similarity among all pairs.\r\n Mimics pattern-seeking behavior of humans.\r\n \"\"\"\r\n max_sim = -1.0\r\n synsets_1 = wn.synsets(word_1)\r\n synsets_2 = wn.synsets(word_2)\r\n if len(synsets_1) == 0 or len(synsets_2) == 0:\r\n return None, None\r\n else:\r\n max_sim = -1.0\r\n best_pair = None, None\r\n for synset_1 in synsets_1:\r\n for synset_2 in synsets_2:\r\n sim = wn.path_similarity(synset_1, synset_2)\r\n if sim == None:\r\n sim = 0\r\n if sim > max_sim:\r\n max_sim = sim\r\n best_pair = synset_1, synset_2\r\n return best_pair\r\n\r\n\r\ndef length_dist(synset_1, synset_2):\r\n \"\"\"\r\n Return a measure of the length of the shortest path in the semantic\r\n ontology (Wordnet in our case as well as the paper's) between two\r\n synsets.\r\n \"\"\"\r\n l_dist = sys.maxsize\r\n if synset_1 is None or synset_2 is None:\r\n return 0.0\r\n if synset_1 == synset_2:\r\n # if synset_1 and synset_2 are the same synset return 0\r\n l_dist = 0.0\r\n else:\r\n wset_1 = set([str(x.name()) for x in synset_1.lemmas()])\r\n wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\r\n if len(wset_1.intersection(wset_2)) > 0:\r\n # if synset_1 != synset_2 but there is word overlap, return 1.0\r\n l_dist = 1.0\r\n else:\r\n # just compute the shortest path between the two\r\n l_dist = synset_1.shortest_path_distance(synset_2)\r\n if l_dist is None:\r\n l_dist = 0.0\r\n # normalize path length to the range [0,1]\r\n return math.exp(-ALPHA * l_dist)\r\n\r\n\r\ndef hierarchy_dist(synset_1, synset_2):\r\n \"\"\"\r\n Return a measure of depth in the ontology to model the fact that\r\n nodes closer to the root are broader and have less semantic similarity\r\n than nodes further away from the root.\r\n \"\"\"\r\n h_dist = sys.maxsize\r\n if synset_1 is None or synset_2 is None:\r\n return h_dist\r\n if synset_1 == synset_2:\r\n # return the depth of one of synset_1 or synset_2\r\n h_dist = max([x[1] for x in synset_1.hypernym_distances()])\r\n else:\r\n # find the max depth of least common subsumer\r\n hypernyms_1 = {x[0]: x[1] for x in synset_1.hypernym_distances()}\r\n hypernyms_2 = {x[0]: x[1] for x in synset_2.hypernym_distances()}\r\n lcs_candidates = set(hypernyms_1.keys()).intersection(\r\n set(hypernyms_2.keys()))\r\n if len(lcs_candidates) > 0:\r\n lcs_dists = []\r\n for lcs_candidate in lcs_candidates:\r\n lcs_d1 = 0\r\n if lcs_candidate in hypernyms_1:\r\n lcs_d1 = hypernyms_1[lcs_candidate]\r\n lcs_d2 = 0\r\n if lcs_candidate in hypernyms_2:\r\n lcs_d2 = hypernyms_2[lcs_candidate]\r\n lcs_dists.append(max([lcs_d1, lcs_d2]))\r\n h_dist = max(lcs_dists)\r\n else:\r\n h_dist = 0\r\n return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /\r\n (math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))\r\n\r\n\r\ndef word_similarity(word_1, word_2):\r\n synset_pair = get_best_synset_pair(word_1, word_2)\r\n return (length_dist(synset_pair[0], synset_pair[1]) *\r\n hierarchy_dist(synset_pair[0], synset_pair[1]))\r\n\r\n\r\n######################### sentence similarity ##########################\r\n\r\ndef most_similar_word(word, word_set):\r\n \"\"\"\r\n Find the word in the joint word set that is most similar to the word\r\n passed in. We use the algorithm above to compute word similarity between\r\n the word and each word in the joint word set, and return the most similar\r\n word and the actual similarity value.\r\n \"\"\"\r\n max_sim = -1.0\r\n sim_word = \"\"\r\n for ref_word in word_set:\r\n sim = word_similarity(word, ref_word)\r\n if sim > max_sim:\r\n max_sim = sim\r\n sim_word = ref_word\r\n return sim_word, max_sim\r\n\r\n\r\ndef info_content(lookup_word):\r\n \"\"\"\r\n Uses the Brown corpus available in NLTK to calculate a Laplace\r\n smoothed frequency distribution of words, then uses this information\r\n to compute the information content of the lookup_word.\r\n \"\"\"\r\n global N\r\n if N == 0:\r\n # poor man's lazy evaluation\r\n for sent in brown.sents():\r\n for word in sent:\r\n word = word.lower()\r\n if word not in brown_freqs:\r\n brown_freqs[word] = 0\r\n brown_freqs[word] = brown_freqs[word] + 1\r\n N = N + 1\r\n lookup_word = lookup_word.lower()\r\n n = 0 if lookup_word not in brown_freqs else brown_freqs[lookup_word]\r\n return 1.0 - (math.log(n + 1) / math.log(N + 1))\r\n\r\n\r\ndef semantic_vector(words, joint_words, info_content_norm):\r\n \"\"\"\r\n Computes the semantic vector of a sentence. The sentence is passed in as\r\n a collection of words. The size of the semantic vector is the same as the\r\n size of the joint word set. The elements are 1 if a word in the sentence\r\n already exists in the joint word set, or the similarity of the word to the\r\n most similar word in the joint word set if it doesn't. Both values are\r\n further normalized by the word's (and similar word's) information content\r\n if info_content_norm is True.\r\n \"\"\"\r\n sent_set = set(words)\r\n semvec = np.zeros(len(joint_words))\r\n i = 0\r\n for joint_word in joint_words:\r\n if joint_word in sent_set:\r\n # if word in union exists in the sentence, s(i) = 1 (unnormalized)\r\n semvec[i] = 1.0\r\n if info_content_norm:\r\n semvec[i] = semvec[i] * math.pow(info_content(joint_word), 2)\r\n else:\r\n # find the most similar word in the joint set and set the sim value\r\n sim_word, max_sim = most_similar_word(joint_word, sent_set)\r\n semvec[i] = max_sim if max_sim > PHI else 0.0\r\n if info_content_norm:\r\n semvec[i] = semvec[i] * info_content(joint_word) * info_content(sim_word)\r\n i = i + 1\r\n return semvec\r\n\r\n\r\ndef semantic_similarity(sentence_1, sentence_2, info_content_norm):\r\n \"\"\"\r\n Computes the semantic similarity between two sentences as the cosine\r\n similarity between the semantic vectors computed for each sentence.\r\n \"\"\"\r\n words_1 = nltk.word_tokenize(sentence_1)\r\n words_2 = nltk.word_tokenize(sentence_2)\r\n joint_words = set(words_1).union(set(words_2))\r\n vec_1 = semantic_vector(words_1, joint_words, info_content_norm)\r\n vec_2 = semantic_vector(words_2, joint_words, info_content_norm)\r\n return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))\r\n\r\n\r\n######################### word order similarity ##########################\r\n\r\ndef word_order_vector(words, joint_words, windex):\r\n \"\"\"\r\n Computes the word order vector for a sentence. The sentence is passed\r\n in as a collection of words. The size of the word order vector is the\r\n same as the size of the joint word set. The elements of the word order\r\n vector are the position mapping (from the windex dictionary) of the\r\n word in the joint set if the word exists in the sentence. If the word\r\n does not exist in the sentence, then the value of the element is the\r\n position of the most similar word in the sentence as long as the similarity\r\n is above the threshold ETA.\r\n \"\"\"\r\n wovec = np.zeros(len(joint_words))\r\n i = 0\r\n wordset = set(words)\r\n for joint_word in joint_words:\r\n if joint_word in wordset:\r\n # word in joint_words found in sentence, just populate the index\r\n wovec[i] = windex[joint_word]\r\n else:\r\n # word not in joint_words, find most similar word and populate\r\n # word_vector with the thresholded similarity\r\n sim_word, max_sim = most_similar_word(joint_word, wordset)\r\n if max_sim > ETA:\r\n wovec[i] = windex[sim_word]\r\n else:\r\n wovec[i] = 0\r\n i = i + 1\r\n return wovec\r\n\r\n\r\ndef word_order_similarity(sentence_1, sentence_2):\r\n \"\"\"\r\n Computes the word-order similarity between two sentences as the normalized\r\n difference of word order between the two sentences.\r\n \"\"\"\r\n words_1 = nltk.word_tokenize(sentence_1)\r\n words_2 = nltk.word_tokenize(sentence_2)\r\n joint_words = list(set(words_1).union(set(words_2)))\r\n windex = {x[1]: x[0] for x in enumerate(joint_words)}\r\n r1 = word_order_vector(words_1, joint_words, windex)\r\n r2 = word_order_vector(words_2, joint_words, windex)\r\n return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))\r\n\r\n\r\n######################### overall similarity ##########################\r\n\r\ndef similarity(sentence_1, sentence_2, info_content_norm):\r\n \"\"\"\r\n Calculate the semantic similarity between two sentences. The last\r\n parameter is True or False depending on whether information content\r\n normalization is desired or not.\r\n \"\"\"\r\n return DELTA * semantic_similarity(sentence_1, sentence_2, info_content_norm) + \\\r\n (1.0 - DELTA) * word_order_similarity(sentence_1, sentence_2)\r\n#input1='Bangladesh is a developing country. Agriculture is main culture of it. It earns more money by exporting jute and prawn.',time=10sec\r\n\"\"\"#output1=Automatic text summarization is a text- mining task that extracts essential sentences to cover almost all the concepts of a document.\r\nIt is to reduce users’ consuming time in document reading without losing the general issues for users’ comprehension.\r\nWith document summary available, users can easily decide its relevancy to their interests and acquire desired documents with much less mental loads involved.\r\nouput2=The region witnessed the Bengali Language Movement in 1952 and the Bangladesh Liberation War in 1971.\r\nAfter independence was achieved, a parliamentary republic was established.\r\nA presidential government was in place between 1975 and 1990, followed by a return to parliamentary democracy.\r\nThe country continues to face challenges in the areas of poverty, education, healthcare and corruption.Bangladesh is a middle power and a developing nation.\r\nListed as one of the Next Eleven, its economy ranks 46th in terms of nominal gross domestic product (GDP) and 29th in terms of purchasing power parity (PPP).\r\nIt is one of the largest textile exporters in the world.\r\nIts major trading partners are the European Union, the United States, China, India, Japan, Malaysia and Singapore.\r\nWith its strategically vital location between Southern, Eastern and Southeast Asia, Bangladesh is an important promoter of regional connectivity and cooperation.\r\nIt is a founding member of SAARC, BIMSTEC, the Bangladesh-China-India-Myanmar Forum for Regional Cooperation and the Bangladesh Bhutan India Nepal Initiative.\r\nIt is also a member of the Commonwealth of Nations, the Developing 8 Countries, the OIC, the Non Aligned Movement, the Group of 77 and the World Trade Organization.\r\nBangladesh is one of the largest contributors to United Nations peacekeeping forces.\r\nTime : 1090.401 sec\r\n\"\"\"\r\n\r\n#stress=' The country of Bengal officially the People Republic of Bangladesh is a country in South Asia. It shares land borders with India and Myanmar (Burma). Nepal, Bhutan and China are located near Bangladesh but do not share a border with it. The country maritime territory in the Bay of Bengal is roughly equal to the size of its land area.[11] Bangladesh is the worlds eighth most populous country. Dhaka is its capital and largest city, followed by Chittagong, which has the country largest port. Bangladesh forms the largest and easternmost part of the Bengal region.[12] Bangladeshis include people from a range of ethnic groups and religions. Bengalis, who speak the official Bengali language, make up 98% of the population.[2][3] The politically dominant Bengali Muslims make the nation the world third largest Muslim-majority country. Most of Bangladesh is covered by the Bengal delta, the largest delta on Earth. The country has 700 rivers and 8,046 km (5,000 miles) of inland waterways. Highlands with evergreen forests are found in the northeastern and southeastern regions of the country. Bangladesh has many islands and a coral reef. The longest unbroken sea beach, Cox Bazar Beach is located here. It is home to the Sundarbans, the largest mangrove forest in the world. The country biodiversity includes a vast array of plant and wildlife, including endangered Bengal tigers, the national animal. The Greeks and Romans identified the region as Gangaridai, a powerful kingdom ofthe historical subcontinent, in the 3rd century BCE. Archaeological research has unearthed several ancient cities in Bangladesh, which enjoyed international trade links for millennia.[13] The Bengal Sultanate and Mughal Bengal transformed the region into a cosmopolitan Islamic imperial power between the 14th and 18th centuries. The region was home to many principalities that made use of their inland naval prowess.[14][15] It was also a notable center of the global muslin and silk trade. As part of British India, the region was influenced by the Bengali renaissance and played an important role in anti-colonial movements. The Partition of British India made East Bengal a part of the Dominion of Pakistan; and renamed it as East Pakistan. The region witnessed the Bengali Language Movement in 1952 and the Bangladesh Liberation War in 1971. After independence was achieved, a parliamentary republic was established. A presidential government was in place between 1975 and 1990, followed by a return to parliamentary democracy. The country continues to face challenges in the areas of poverty, education, healthcare and corruption.Bangladesh is a middle power and a developing nation. Listed as one of the Next Eleven, its economy ranks 46th in terms of nominal gross domestic product (GDP) and 29th in terms of purchasing power parity (PPP). It is one of the largest textile exporters in the world. Its major trading partners are the European Union, the United States, China, India, Japan, Malaysia and Singapore. With its strategically vital location between Southern, Eastern and Southeast Asia, Bangladesh is an important promoter of regional connectivity and cooperation. It is a founding member of SAARC, BIMSTEC, the Bangladesh-China-India-Myanmar Forum for Regional Cooperation and the Bangladesh Bhutan India Nepal Initiative. It is also a member of the Commonwealth of Nations, the Developing 8 Countries, the OIC, the Non Aligned Movement, the Group of 77 and the World Trade Organization. Bangladesh is one of the largest contributors to United Nations peacekeeping forces.'\r\nstress = \"I Love Bangladesh. Cz it's my country. And peoples are awesome. and the animals are also. Scenic beauty is too good.\"\r\nsents=sent_tokenize(stress)\r\nl=len(sents)\r\nprint(l)\r\ntot={}\r\nok={}\r\nfor i in range(l):\r\n tot[i]=0\r\n ok[i]=0\r\n\r\n\r\nfor i in range(l):\r\n for j in range(l):\r\n if j>i:#here condition is for reduce over calculation\r\n a=similarity(sents[i],sents[j],True)\r\n # b=similarity(sents[i],sents[j],False)\r\n b=0\r\n d=(a+b)\r\n #print(d);\r\n #total similarity of first sentence with second sentence\r\n tot[i]=tot[i]+d\r\n\r\n #to store same result i.e. total similarity of second sentence with first sentence\r\n tot[j]=tot[j]+d\r\n\r\nlst=nlargest((int)(l/3),tot)\r\n\r\nfor i in lst:\r\n ok[i]=1\r\nfor i in range(l):\r\n if ok[i]==1:\r\n print(sents[i])\r\n\r\ntime2=time.time()\r\nprint(round(time2-time1,3)) ",
"# -*- coding: utf-8 -*-\r\n\r\n# author : Sujit Pal\r\n\r\n# Note: this is a python3 updated version of http://sujitpal.blogspot.fr/2014/12/semantic-similarity-for-short-sentences.html\r\n# by mathieu Chrétien ([email protected])\r\n\r\n# contributor : Mathieu Chrétien\r\n#from nltk.tokenize import sent_tokenize,word_tokenize\r\n\r\n\r\nfrom __future__ import division\r\nimport nltk\r\nfrom nltk.tokenize import sent_tokenize,word_tokenize\r\nfrom heapq import nlargest\r\n\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.corpus import brown\r\nimport math\r\nimport numpy as np\r\nimport sys\r\nimport time\r\nfrom datetime import datetime\r\n\r\ntime1 = time.time()\r\n\r\n# Parameters to the algorithm. Currently set to values that was reported\r\n# in the paper to produce \"best\" results.\r\nALPHA = 0.2\r\nBETA = 0.45\r\nETA = 0.4\r\nPHI = 0.2\r\nDELTA = 0.85\r\n\r\nbrown_freqs = dict()\r\nN = 0\r\n\r\n\r\n######################### word similarity ##########################\r\n\r\ndef get_best_synset_pair(word_1, word_2):\r\n \"\"\"\r\n Choose the pair with highest path similarity among all pairs.\r\n Mimics pattern-seeking behavior of humans.\r\n \"\"\"\r\n max_sim = -1.0\r\n synsets_1 = wn.synsets(word_1)\r\n synsets_2 = wn.synsets(word_2)\r\n if len(synsets_1) == 0 or len(synsets_2) == 0:\r\n return None, None\r\n else:\r\n max_sim = -1.0\r\n best_pair = None, None\r\n for synset_1 in synsets_1:\r\n for synset_2 in synsets_2:\r\n sim = wn.path_similarity(synset_1, synset_2)\r\n if sim == None:\r\n sim = 0\r\n if sim > max_sim:\r\n max_sim = sim\r\n best_pair = synset_1, synset_2\r\n return best_pair\r\n\r\n\r\ndef length_dist(synset_1, synset_2):\r\n \"\"\"\r\n Return a measure of the length of the shortest path in the semantic\r\n ontology (Wordnet in our case as well as the paper's) between two\r\n synsets.\r\n \"\"\"\r\n l_dist = sys.maxsize\r\n if synset_1 is None or synset_2 is None:\r\n return 0.0\r\n if synset_1 == synset_2:\r\n # if synset_1 and synset_2 are the same synset return 0\r\n l_dist = 0.0\r\n else:\r\n wset_1 = set([str(x.name()) for x in synset_1.lemmas()])\r\n wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\r\n if len(wset_1.intersection(wset_2)) > 0:\r\n # if synset_1 != synset_2 but there is word overlap, return 1.0\r\n l_dist = 1.0\r\n else:\r\n # just compute the shortest path between the two\r\n l_dist = synset_1.shortest_path_distance(synset_2)\r\n if l_dist is None:\r\n l_dist = 0.0\r\n # normalize path length to the range [0,1]\r\n return math.exp(-ALPHA * l_dist)\r\n\r\n\r\ndef hierarchy_dist(synset_1, synset_2):\r\n \"\"\"\r\n Return a measure of depth in the ontology to model the fact that\r\n nodes closer to the root are broader and have less semantic similarity\r\n than nodes further away from the root.\r\n \"\"\"\r\n h_dist = sys.maxsize\r\n if synset_1 is None or synset_2 is None:\r\n return h_dist\r\n if synset_1 == synset_2:\r\n # return the depth of one of synset_1 or synset_2\r\n h_dist = max([x[1] for x in synset_1.hypernym_distances()])\r\n else:\r\n # find the max depth of least common subsumer\r\n hypernyms_1 = {x[0]: x[1] for x in synset_1.hypernym_distances()}\r\n hypernyms_2 = {x[0]: x[1] for x in synset_2.hypernym_distances()}\r\n lcs_candidates = set(hypernyms_1.keys()).intersection(\r\n set(hypernyms_2.keys()))\r\n if len(lcs_candidates) > 0:\r\n lcs_dists = []\r\n for lcs_candidate in lcs_candidates:\r\n lcs_d1 = 0\r\n if lcs_candidate in hypernyms_1:\r\n lcs_d1 = hypernyms_1[lcs_candidate]\r\n lcs_d2 = 0\r\n if lcs_candidate in hypernyms_2:\r\n lcs_d2 = hypernyms_2[lcs_candidate]\r\n lcs_dists.append(max([lcs_d1, lcs_d2]))\r\n h_dist = max(lcs_dists)\r\n else:\r\n h_dist = 0\r\n return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /\r\n (math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))\r\n\r\n\r\ndef word_similarity(word_1, word_2):\r\n synset_pair = get_best_synset_pair(word_1, word_2)\r\n return (length_dist(synset_pair[0], synset_pair[1]) *\r\n hierarchy_dist(synset_pair[0], synset_pair[1]))\r\n\r\n\r\n######################### sentence similarity ##########################\r\n\r\ndef most_similar_word(word, word_set):\r\n \"\"\"\r\n Find the word in the joint word set that is most similar to the word\r\n passed in. We use the algorithm above to compute word similarity between\r\n the word and each word in the joint word set, and return the most similar\r\n word and the actual similarity value.\r\n \"\"\"\r\n max_sim = -1.0\r\n sim_word = \"\"\r\n for ref_word in word_set:\r\n sim = word_similarity(word, ref_word)\r\n if sim > max_sim:\r\n max_sim = sim\r\n sim_word = ref_word\r\n return sim_word, max_sim\r\n\r\n\r\ndef info_content(lookup_word):\r\n \"\"\"\r\n Uses the Brown corpus available in NLTK to calculate a Laplace\r\n smoothed frequency distribution of words, then uses this information\r\n to compute the information content of the lookup_word.\r\n \"\"\"\r\n global N\r\n if N == 0:\r\n # poor man's lazy evaluation\r\n for sent in brown.sents():\r\n for word in sent:\r\n word = word.lower()\r\n if word not in brown_freqs:\r\n brown_freqs[word] = 0\r\n brown_freqs[word] = brown_freqs[word] + 1\r\n N = N + 1\r\n lookup_word = lookup_word.lower()\r\n n = 0 if lookup_word not in brown_freqs else brown_freqs[lookup_word]\r\n return 1.0 - (math.log(n + 1) / math.log(N + 1))\r\n\r\n\r\ndef semantic_vector(words, joint_words, info_content_norm):\r\n \"\"\"\r\n Computes the semantic vector of a sentence. The sentence is passed in as\r\n a collection of words. The size of the semantic vector is the same as the\r\n size of the joint word set. The elements are 1 if a word in the sentence\r\n already exists in the joint word set, or the similarity of the word to the\r\n most similar word in the joint word set if it doesn't. Both values are\r\n further normalized by the word's (and similar word's) information content\r\n if info_content_norm is True.\r\n \"\"\"\r\n sent_set = set(words)\r\n semvec = np.zeros(len(joint_words))\r\n i = 0\r\n for joint_word in joint_words:\r\n if joint_word in sent_set:\r\n # if word in union exists in the sentence, s(i) = 1 (unnormalized)\r\n semvec[i] = 1.0\r\n if info_content_norm:\r\n semvec[i] = semvec[i] * math.pow(info_content(joint_word), 2)\r\n else:\r\n # find the most similar word in the joint set and set the sim value\r\n sim_word, max_sim = most_similar_word(joint_word, sent_set)\r\n semvec[i] = PHI if max_sim > PHI else 0.0\r\n if info_content_norm:\r\n semvec[i] = semvec[i] * info_content(joint_word) * info_content(sim_word)\r\n i = i + 1\r\n return semvec\r\n\r\n\r\ndef semantic_similarity(sentence_1, sentence_2, info_content_norm):\r\n \"\"\"\r\n Computes the semantic similarity between two sentences as the cosine\r\n similarity between the semantic vectors computed for each sentence.\r\n \"\"\"\r\n words_1 = nltk.word_tokenize(sentence_1)\r\n words_2 = nltk.word_tokenize(sentence_2)\r\n joint_words = set(words_1).union(set(words_2))\r\n vec_1 = semantic_vector(words_1, joint_words, info_content_norm)\r\n vec_2 = semantic_vector(words_2, joint_words, info_content_norm)\r\n return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))\r\n\r\n\r\n######################### word order similarity ##########################\r\n\r\ndef word_order_vector(words, joint_words, windex):\r\n \"\"\"\r\n Computes the word order vector for a sentence. The sentence is passed\r\n in as a collection of words. The size of the word order vector is the\r\n same as the size of the joint word set. The elements of the word order\r\n vector are the position mapping (from the windex dictionary) of the\r\n word in the joint set if the word exists in the sentence. If the word\r\n does not exist in the sentence, then the value of the element is the\r\n position of the most similar word in the sentence as long as the similarity\r\n is above the threshold ETA.\r\n \"\"\"\r\n wovec = np.zeros(len(joint_words))\r\n i = 0\r\n wordset = set(words)\r\n for joint_word in joint_words:\r\n if joint_word in wordset:\r\n # word in joint_words found in sentence, just populate the index\r\n wovec[i] = windex[joint_word]\r\n else:\r\n # word not in joint_words, find most similar word and populate\r\n # word_vector with the thresholded similarity\r\n sim_word, max_sim = most_similar_word(joint_word, wordset)\r\n if max_sim > ETA:\r\n wovec[i] = windex[sim_word]\r\n else:\r\n wovec[i] = 0\r\n i = i + 1\r\n return wovec\r\n\r\n\r\ndef word_order_similarity(sentence_1, sentence_2):\r\n \"\"\"\r\n Computes the word-order similarity between two sentences as the normalized\r\n difference of word order between the two sentences.\r\n \"\"\"\r\n words_1 = nltk.word_tokenize(sentence_1)\r\n words_2 = nltk.word_tokenize(sentence_2)\r\n joint_words = list(set(words_1).union(set(words_2)))\r\n windex = {x[1]: x[0] for x in enumerate(joint_words)}\r\n r1 = word_order_vector(words_1, joint_words, windex)\r\n r2 = word_order_vector(words_2, joint_words, windex)\r\n return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))\r\n\r\n\r\n######################### overall similarity ##########################\r\n\r\ndef similarity(sentence_1, sentence_2, info_content_norm):\r\n \"\"\"\r\n Calculate the semantic similarity between two sentences. The last\r\n parameter is True or False depending on whether information content\r\n normalization is desired or not.\r\n \"\"\"\r\n return DELTA * semantic_similarity(sentence_1, sentence_2, info_content_norm) + \\\r\n (1.0 - DELTA) * word_order_similarity(sentence_1, sentence_2)\r\n\"\"\"\r\nsentence_pairs = [\r\n [\"I like that bachelor.\", \"I like that bachelor.\", 1.0],\r\n [\"John is very nice.\", \"Is John very nice?\", 0.977],\r\n [\"Red alcoholic drink.\", \"A bottle of wine.\", 0.585],\r\n [\"Red alcoholic drink.\", \"Fresh orange juice.\", 0.611],\r\n [\"Red alcoholic drink.\", \"An English dictionary.\", 0.0],\r\n [\"Red alcoholic drink.\", \"Fresh apple juice.\", 0.420],\r\n [\"A glass of cider.\", \"A full cup of apple juice.\", 0.678],\r\n [\"It is a dog.\", \"That must be your dog.\", 0.739],\r\n [\"It is a dog.\", \"It is a log.\", 0.623],\r\n [\"It is a dog.\", \"It is a pig.\", 0.790],\r\n [\"Dogs are animals.\", \"They are common pets.\", 0.738],\r\n [\"Canis familiaris are animals.\", \"Dogs are common pets.\", 0.362],\r\n [\"I have a pen.\", \"Where do you live?\", 0.0],\r\n [\"I have a pen.\", \"Where is ink?\", 0.129],\r\n [\"I have a hammer.\", \"Take some nails.\", 0.508],\r\n [\"I have a hammer.\", \"Take some apples.\", 0.121]\r\n]\r\nfor sent_pair in sentence_pairs:\r\n print (\"%s\\t%s\\t%.3f\\t%.3f\\t%.3f\" % (sent_pair[0], sent_pair[1], sent_pair[2],\r\n similarity(sent_pair[0], sent_pair[1], False),\r\n similarity(sent_pair[0], sent_pair[1], True)))\r\n\"\"\"\r\n\r\nfi = open('Input5.txt', encoding=\"utf8\")\r\nstrss = \"\"\r\nfor line in fi:\r\n strss = strss + line\r\n#print(strss)\r\n\r\nclass my_struct():\r\n def __init__(self, i, j, sim):\r\n self.i = i\r\n self.j = j\r\n self.sim = sim\r\n\r\nsents=sent_tokenize(strss)\r\n#print(sents)\r\nl=len(sents)\r\n#print(l)\r\n\r\nsim = []\r\nfor i in range(l):\r\n for j in range(l):\r\n if (j>i):\r\n cal = similarity(sents[i], sents[j], False)\r\n sim.append(my_struct(i, j, cal))\r\n #print(\"%d %d %.3f\" %(i, j, cal))\r\n #print(\"%s\\t%s\\t%.3f\\t%.3f\" % (sents[i], sents[j], similarity(sents[i], sents[j], False), similarity(sents[i], sents[j], True)))\r\n\r\nlsm = len(sim)\r\nsim.sort(key=lambda x: x.sim, reverse=True)\r\n\r\n#for i in range(lsm):\r\n #print(\"%d\\t%d\\t%.3f\" % (sim[i].i, sim[i].j, sim[i].sim))\r\n\r\nhmlines = int(input(print(\"How many lines you want as summary from \" + str(l) + \" lines \")))\r\n\r\nmy_nodup_list = []\r\n\r\ntot_line_printed = 0;\r\nif(hmlines>=len(set(sents))):\r\n print(strss)\r\nelse:\r\n for i in range(lsm):\r\n if sents[sim[i].i] not in my_nodup_list:\r\n print(sents[sim[i].i], end = \" \")\r\n my_nodup_list.append(sents[sim[i].i])\r\n tot_line_printed = tot_line_printed + 1\r\n\r\n if(tot_line_printed==hmlines):\r\n break\r\n\r\n if sents[sim[i].j] not in my_nodup_list:\r\n print(sents[sim[i].j], end = \" \")\r\n my_nodup_list.append(sents[sim[i].j])\r\n tot_line_printed = tot_line_printed + 1\r\n\r\n if (tot_line_printed == hmlines):\r\n break\r\n\r\ntime2 = time.time()\r\ntotal_time=(time2 - time1)\r\nno_of_docs = 1\r\nind_time=(total_time / no_of_docs)\r\nprint('')\r\nprint (\"Process ended: \", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\r\nprint (\"Total time required for \", no_of_docs, \" articles to be summarized: \", round(total_time,3) , \"seconds\")\r\nprint (\"Average time for each article \",round(ind_time,3),\" seconds\")"
]
| [
[
"numpy.dot",
"numpy.linalg.norm"
],
[
"numpy.dot",
"numpy.linalg.norm"
]
]
|
sean-bailey/Real-Time-Voice-Cloning | [
"c8f22ee1251d27d51f7d668100b6822e36a7d519"
]
| [
"rtvc/synthesizer/inference.py"
]
| [
"import torch\nfrom rtvc.synthesizer import audio\nfrom rtvc.synthesizer.hparams import hparams\nfrom rtvc.synthesizer.models.tacotron import Tacotron\nfrom rtvc.synthesizer.utils.symbols import symbols\nfrom rtvc.synthesizer.utils.text import text_to_sequence\nfrom rtvc.vocoder.display import simple_table\nfrom pathlib import Path\nfrom typing import Union, List\nimport numpy as np\nimport librosa\n\n\nclass Synthesizer:\n sample_rate = hparams.sample_rate\n hparams = hparams\n\n def __init__(self, model_fpath: Path, verbose=True):\n \"\"\"\n The model isn't instantiated and loaded in memory until needed or until load() is called.\n \n :param model_fpath: path to the trained model file\n :param verbose: if False, prints less information when using the model\n \"\"\"\n self.model_fpath = model_fpath\n self.verbose = verbose\n\n # Check for GPU\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n if self.verbose:\n print(\"Synthesizer using device:\", self.device)\n\n # Tacotron model will be instantiated later on first use.\n self._model = None\n\n def is_loaded(self):\n \"\"\"\n Whether the model is loaded in memory.\n \"\"\"\n return self._model is not None\n\n def load(self):\n \"\"\"\n Instantiates and loads the model given the weights file that was passed in the constructor.\n \"\"\"\n self._model = Tacotron(embed_dims=hparams.tts_embed_dims,\n num_chars=len(symbols),\n encoder_dims=hparams.tts_encoder_dims,\n decoder_dims=hparams.tts_decoder_dims,\n n_mels=hparams.num_mels,\n fft_bins=hparams.num_mels,\n postnet_dims=hparams.tts_postnet_dims,\n encoder_K=hparams.tts_encoder_K,\n lstm_dims=hparams.tts_lstm_dims,\n postnet_K=hparams.tts_postnet_K,\n num_highways=hparams.tts_num_highways,\n dropout=hparams.tts_dropout,\n stop_threshold=hparams.tts_stop_threshold,\n speaker_embedding_size=hparams.speaker_embedding_size).to(self.device)\n\n self._model.load(self.model_fpath)\n self._model.eval()\n\n if self.verbose:\n print(\"Loaded synthesizer \\\"%s\\\" trained to step %d\" % (\n self.model_fpath.name, self._model.state_dict()[\"step\"]))\n\n def synthesize_spectrograms(self, texts: List[str],\n embeddings: Union[np.ndarray, List[np.ndarray]],\n return_alignments=False):\n \"\"\"\n Synthesizes mel spectrograms from texts and speaker embeddings.\n\n :param texts: a list of N text prompts to be synthesized\n :param embeddings: a numpy array or list of speaker embeddings of shape (N, 256) \n :param return_alignments: if True, a matrix representing the alignments between the \n characters\n and each decoder output step will be returned for each spectrogram\n :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the \n sequence length of spectrogram i, and possibly the alignments.\n \"\"\"\n # Load the model on the first request.\n if not self.is_loaded():\n self.load()\n\n # Print some info about the model when it is loaded \n tts_k = self._model.get_step() // 1000\n\n simple_table([(\"Tacotron\", str(tts_k) + \"k\"),\n (\"r\", self._model.r)])\n\n # Preprocess text inputs\n inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts]\n if not isinstance(embeddings, list):\n embeddings = [embeddings]\n\n # Batch inputs\n batched_inputs = [inputs[i:i + hparams.synthesis_batch_size]\n for i in range(0, len(inputs), hparams.synthesis_batch_size)]\n batched_embeds = [embeddings[i:i + hparams.synthesis_batch_size]\n for i in range(0, len(embeddings), hparams.synthesis_batch_size)]\n\n specs = []\n for i, batch in enumerate(batched_inputs, 1):\n if self.verbose:\n print(f\"\\n| Generating {i}/{len(batched_inputs)}\")\n\n # Pad texts so they are all the same length\n text_lens = [len(text) for text in batch]\n max_text_len = max(text_lens)\n chars = [pad1d(text, max_text_len) for text in batch]\n chars = np.stack(chars)\n\n # Stack speaker embeddings into 2D array for batch processing\n speaker_embeds = np.stack(batched_embeds[i - 1])\n\n # Convert to tensor\n chars = torch.tensor(chars).long().to(self.device)\n speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device)\n\n # Inference\n _, mels, alignments = self._model.generate(chars, speaker_embeddings)\n mels = mels.detach().cpu().numpy()\n for m in mels:\n # Trim silence from end of each spectrogram\n while np.max(m[:, -1]) < hparams.tts_stop_threshold:\n m = m[:, :-1]\n specs.append(m)\n\n if self.verbose:\n print(\"\\n\\nDone.\\n\")\n return (specs, alignments) if return_alignments else specs\n\n @staticmethod\n def load_preprocess_wav(fpath):\n \"\"\"\n Loads and preprocesses an audio file under the same conditions the audio files were used to\n train the synthesizer. \n \"\"\"\n wav = librosa.load(str(fpath), hparams.sample_rate)[0]\n if hparams.rescale:\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n return wav\n\n @staticmethod\n def make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]):\n \"\"\"\n Creates a mel spectrogram from an audio file in the same manner as the mel spectrograms that \n were fed to the synthesizer when training.\n \"\"\"\n if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):\n wav = Synthesizer.load_preprocess_wav(fpath_or_wav)\n else:\n wav = fpath_or_wav\n\n mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32)\n return mel_spectrogram\n\n @staticmethod\n def griffin_lim(mel):\n \"\"\"\n Inverts a mel spectrogram using Griffin-Lim. The mel spectrogram is expected to have been built\n with the same parameters present in hparams.py.\n \"\"\"\n return audio.inv_mel_spectrogram(mel, hparams)\n\n\ndef pad1d(x, max_len, pad_value=0):\n return np.pad(x, (0, max_len - len(x)), mode=\"constant\", constant_values=pad_value)\n"
]
| [
[
"numpy.max",
"torch.device",
"torch.cuda.is_available",
"torch.tensor",
"numpy.stack",
"numpy.abs"
]
]
|
crazyavi/Hive | [
"d24e2eec21b2c4bc96c0e008510155827398ee32"
]
| [
"Example3_EvolveAPainting.py"
]
| [
"#!/usr/bin/env python\n\n# ---- MODULE DOCSTRING\n\n__doc__ = \"\"\"\n\n(C) Hive, Romain Wuilbercq, 2017\n _\n /_/_ .'''.\n =O(_)))) ...' `.\n \\_\\ `. .'''X\n `..'\n.---. .---..-./`) ,---. ,---. .-''-.\n| | |_ _|\\ .-.')| / | | .'_ _ \\\n| | ( ' )/ `-' \\| | | .'/ ( ` ) '\n| '-(_{;}_)`-'`\"`| | _ | |. (_ o _) |\n| (_,_) .---. | _( )_ || (_,_)___|\n| _ _--. | | | \\ (_ o._) /' \\ .---.\n|( ' ) | | | | \\ (_,_) / \\ `-' /\n(_{;}_)| | | | \\ / \\ /\n'(_,_) '---' '---' `---` `'-..-'\n\nThe Artificial Bee Colony (ABC) algorithm is based on the\nintelligent foraging behaviour of honey bee swarm, and was first proposed\nby Karaboga in 2005.\n\nDescription:\n-----------\n\nThis example shows how to evolve a famous painting using polygons.\n\nThe location of a number of polygons and RGB colors are evolved by an Artificial\nBee Colony algorithm to replicate a famous painting from Henri Matisse.\n\nThis example is inspired by a blog post written by Roger Alsing.\n\nReference:\n---------\n\nhttp://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/\n\nDependencies:\n------------\n\n- PIL\n- sklearn-image\n- numpy\n- matplotlib\n\n\"\"\"\n\n# ---- IMPORT MODULES\n\n# import internal modules\n\nfrom Hive import Hive\n\n# import external modules\n\nimport numpy as np\n\nfrom sklearn.metrics import mean_squared_error as mse\n\ntry:\n from PIL import ImageChops, Image\nexcept:\n raise ImportError(\"PIL module not found.\")\n\ntry:\n import matplotlib.path as mpath\n import matplotlib.pyplot as plt\n import matplotlib as mpl\nexcept:\n raise ImportError(\"matplotlib module not found.\")\n\ntry:\n from skimage import color\nexcept:\n raise ImportError(\"sklearn-image module not found.\")\n\n# ---- PROCESS IMAGE\n\n# loads original image\nsource_image = Image.open(\"assets/matisse.jpg\")\nxsize, ysize = source_image.size\n\n# post-processes source image as a np.ndarray\nSOURCE_IMAGE = np.array(source_image)\n\n# defines size of image [pixels/inch]\ndpi = 80\n\n# converts image to gray scale\nsource_image_gray = color.rgb2gray(SOURCE_IMAGE)\n\n# ---- DEFINE BLANK CANVAS\n\n# define image polygons parameters\nnb_polygons, nb_pts_per_polygon, nb_rgb = 8, 4, 3\n\ndef polygon(x, y, up=1):\n \"\"\" Creates a polygon. \"\"\"\n\n # defines vertex coordinates of a dummy polygon \"path\"\n vertices = [(x[0], y[0]), (x[1], y[1]),\n (x[2], y[2]), (x[3], y[3]),\n (x[0], y[0]) ]\n\n # creates a polygon\n poly = mpath.Path(vertices, [mpath.Path.MOVETO] + \\\n (len(vertices)-1) * [mpath.Path.LINETO])\n\n # returns polygon\n return poly\n\ndef create_image(vector):\n \"\"\" Creates an image from a set of polygons. \"\"\"\n\n # converts vector input to numpy.ndarray\n vector = np.array(vector)\n\n # creates a list of shapes and colors\n shapes = []; colors = [];\n for ix in range(nb_polygons):\n\n # processes input vector - \"unrolling\" vector\n ind_start_x = ix * (nb_pts_per_polygon * 2 + 3)\n ind_start_y = ind_start_x + 4\n ind_start_c = ind_start_y + 4\n x = vector[ind_start_x : ind_start_y]\n y = vector[ind_start_y : ind_start_c]\n color = vector[ind_start_c : ind_start_c + 3]\n\n # creates list of polygons and colors\n shapes.append(polygon(x*xsize, y*ysize))\n colors.append([color[i] for i in range(3)])\n\n # creates a figure of the same dimension as source image\n fig, ax = plt.subplots(figsize=(xsize/dpi, ysize/dpi))\n ax.set_rasterization_zorder(1)\n\n # creates a collection of polygonal shapes\n set_of_shapes = mpl.collections.PathCollection(shapes,\n facecolor=colors,\n linewidth=0)\n\n # creates an image\n ax.add_collection(set_of_shapes)\n ax.set_frame_on(False)\n ax.axis('off')\n ax.autoscale_view()\n\n # draws image\n fig.tight_layout(pad=0)\n fig.canvas.draw()\n\n # converts image to np.ndarray\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n image = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n # returns image array\n return image\n\n\n# ---- CREATE EVALUATOR\n\ndef compare_images_mse(source_image, new_image):\n \"\"\" Computes the root mean-square. \"\"\"\n\n err = np.sum((source_image.astype(\"float\") - new_image.astype(\"float\")) ** 2)\n err /= float(source_image.shape[0] * source_image.shape[1])\n\n return err\n\ndef evaluator(vector):\n \"\"\" Computes similarity between newly created and source image. \"\"\"\n\n # creates an image\n image = create_image(vector)\n\n # closes current figure\n plt.close()\n\n # compare new image with source image\n return compare_images_mse(SOURCE_IMAGE, image)\n\n\n# ---- SOLVE TEST CASE\n\ndef run():\n\n # creates model\n ndim = int(nb_polygons * (2 * nb_pts_per_polygon + nb_rgb))\n model = Hive.BeeHive(lower = [0]*ndim ,\n upper = [1]*ndim ,\n fun = evaluator ,\n numb_bees = 20 ,\n max_itrs = 1000 ,\n verbose = True ,)\n\n # runs model\n model.run()\n\n # saves an image of the end result\n solution = create_image(model.solution)\n plt.savefig('solutionMatisse.png', bbox_inches='tight')\n\n\nif __name__ == \"__main__\":\n run()\n\n\n# ---- END\n"
]
| [
[
"numpy.array",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.collections.PathCollection"
]
]
|
kajal5888/syncopy | [
"f7d49808a09ff65eec64cda1cfb4c87a012e0c2b"
]
| [
"syncopy/specest/_norm_spec.py"
]
| [
"# -*- coding: utf-8 -*-\n#\n# Helper routines to normalize Fourier spectra\n#\n\nimport numpy as np\n\n\ndef _norm_spec(ftr, nSamples, fs):\n\n \"\"\"\n Normalizes the complex Fourier transform to\n power spectral density units.\n \"\"\"\n\n # frequency bins\n delta_f = fs / nSamples\n ftr *= np.sqrt(2) / (nSamples * np.sqrt(delta_f))\n\n return ftr\n\n\ndef _norm_taper(taper, windows, nSamples):\n\n \"\"\"\n Helper function to normalize tapers such\n that the resulting spectra are normalized\n to power density units.\n \"\"\"\n\n if taper == 'dpss':\n windows *= np.sqrt(nSamples)\n # weird 3 point normalization,\n # checks out exactly for 'hann' though\n elif taper != 'boxcar':\n windows *= np.sqrt(4 / 3) * np.sqrt(nSamples / windows.sum())\n\n return windows\n"
]
| [
[
"numpy.sqrt"
]
]
|
SimonCSelg/yt-sselg | [
"3d5ab92540ae33d8ed0b9694f0ecf82166ba0aea"
]
| [
"yt/data_objects/data_containers.py"
]
| [
"\"\"\"\nVarious non-grid data containers.\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport itertools\nimport uuid\n\nimport numpy as np\nimport weakref\nimport shelve\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom yt.fields.derived_field import \\\n DerivedField\nfrom yt.frontends.ytdata.utilities import \\\n save_as_dataset\nfrom yt.funcs import \\\n get_output_filename, \\\n mylog, \\\n ensure_list, \\\n fix_axis, \\\n iterable, validate_width_tuple\nfrom yt.units.unit_object import UnitParseError\nfrom yt.units.yt_array import \\\n YTArray, \\\n YTQuantity\nimport yt.units.dimensions as ytdims\nfrom yt.utilities.exceptions import \\\n YTUnitConversionError, \\\n YTFieldUnitError, \\\n YTFieldUnitParseError, \\\n YTSpatialFieldUnitError, \\\n YTCouldNotGenerateField, \\\n YTFieldNotParseable, \\\n YTFieldNotFound, \\\n YTFieldTypeNotFound, \\\n YTDataSelectorNotImplemented, \\\n YTDimensionalityError, \\\n YTBooleanObjectError, \\\n YTBooleanObjectsWrongDataset, YTException\nfrom yt.utilities.lib.marching_cubes import \\\n march_cubes_grid, march_cubes_grid_flux\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import \\\n ParallelAnalysisInterface\nfrom yt.utilities.parameter_file_storage import \\\n ParameterFileStore\nfrom yt.utilities.amr_kdtree.api import \\\n AMRKDTree\nfrom .derived_quantities import DerivedQuantityCollection\nfrom yt.fields.field_exceptions import \\\n NeedsGridType\nimport yt.geometry.selection_routines\nfrom yt.geometry.selection_routines import \\\n compose_selector\nfrom yt.extern.six import add_metaclass, string_types\nfrom yt.data_objects.field_data import YTFieldData\nfrom yt.data_objects.profiles import create_profile\n\ndata_object_registry = {}\n\ndef sanitize_weight_field(ds, field, weight):\n field_object = ds._get_field_info(field)\n if weight is None:\n if field_object.particle_type is True:\n weight_field = (field_object.name[0], 'particle_ones')\n else:\n weight_field = ('index', 'ones')\n else:\n weight_field = weight\n return weight_field\n\nclass RegisteredDataContainer(type):\n def __init__(cls, name, b, d):\n type.__init__(cls, name, b, d)\n if hasattr(cls, \"_type_name\") and not cls._skip_add:\n data_object_registry[cls._type_name] = cls\n\n@add_metaclass(RegisteredDataContainer)\nclass YTDataContainer(object):\n \"\"\"\n Generic YTDataContainer container. By itself, will attempt to\n generate field, read fields (method defined by derived classes)\n and deal with passing back and forth field parameters.\n \"\"\"\n _chunk_info = None\n _num_ghost_zones = 0\n _con_args = ()\n _skip_add = False\n _container_fields = ()\n _tds_attrs = ()\n _tds_fields = ()\n _field_cache = None\n _index = None\n\n def __init__(self, ds, field_parameters):\n \"\"\"\n Typically this is never called directly, but only due to inheritance.\n It associates a :class:`~yt.data_objects.static_output.Dataset` with the class,\n sets its initial set of fields, and the remainder of the arguments\n are passed as field_parameters.\n \"\"\"\n # ds is typically set in the new object type created in Dataset._add_object_class\n # but it can also be passed as a parameter to the constructor, in which case it will\n # override the default. This code ensures it is never not set.\n if ds is not None:\n self.ds = ds\n else:\n if not hasattr(self, \"ds\"):\n raise RuntimeError(\"Error: ds must be set either through class type or parameter to the constructor\")\n\n self._current_particle_type = \"all\"\n self._current_fluid_type = self.ds.default_fluid_type\n self.ds.objects.append(weakref.proxy(self))\n mylog.debug(\"Appending object to %s (type: %s)\", self.ds, type(self))\n self.field_data = YTFieldData()\n self._default_field_parameters = {\n 'center': self.ds.arr(np.zeros(3, dtype='float64'), 'cm'),\n 'bulk_velocity': self.ds.arr(np.zeros(3, dtype='float64'), 'cm/s'),\n 'bulk_magnetic_field': self.ds.arr(np.zeros(3, dtype='float64'), 'G'),\n 'normal': self.ds.arr([0.0, 0.0, 1.0], ''),\n }\n if field_parameters is None: field_parameters = {}\n self._set_default_field_parameters()\n for key, val in field_parameters.items():\n self.set_field_parameter(key, val)\n\n @property\n def pf(self):\n return getattr(self, 'ds', None)\n\n @property\n def index(self):\n if self._index is not None:\n return self._index\n self._index = self.ds.index\n return self._index\n\n def _debug(self):\n \"\"\"\n When called from within a derived field, this will run pdb. However,\n during field detection, it will not. This allows you to more easily\n debug fields that are being called on actual objects.\n \"\"\"\n import pdb\n pdb.set_trace()\n\n def _set_default_field_parameters(self):\n self.field_parameters = {}\n for k,v in self._default_field_parameters.items():\n self.set_field_parameter(k,v)\n\n def _is_default_field_parameter(self, parameter):\n if parameter not in self._default_field_parameters:\n return False\n return self._default_field_parameters[parameter] is \\\n self.field_parameters[parameter]\n\n def apply_units(self, arr, units):\n return self.ds.arr(arr, input_units = units)\n\n def _set_center(self, center):\n if center is None:\n self.center = None\n return\n elif isinstance(center, YTArray):\n self.center = self.ds.arr(center.copy())\n self.center.convert_to_units('code_length')\n elif isinstance(center, (list, tuple, np.ndarray)):\n if isinstance(center[0], YTQuantity):\n self.center = self.ds.arr([c.copy() for c in center])\n self.center.convert_to_units('code_length')\n else:\n self.center = self.ds.arr(center, 'code_length')\n elif isinstance(center, string_types):\n if center.lower() in (\"c\", \"center\"):\n self.center = self.ds.domain_center\n # is this dangerous for race conditions?\n elif center.lower() in (\"max\", \"m\"):\n self.center = self.ds.find_max((\"gas\", \"density\"))[1]\n elif center.startswith(\"max_\"):\n self.center = self.ds.find_max(center[4:])[1]\n elif center.lower() == \"min\":\n self.center = self.ds.find_min((\"gas\", \"density\"))[1]\n elif center.startswith(\"min_\"):\n self.center = self.ds.find_min(center[4:])[1]\n else:\n self.center = self.ds.arr(center, 'code_length', dtype='float64')\n self.set_field_parameter('center', self.center)\n\n def get_field_parameter(self, name, default=None):\n \"\"\"\n This is typically only used by derived field functions, but\n it returns parameters used to generate fields.\n \"\"\"\n if name in self.field_parameters:\n return self.field_parameters[name]\n else:\n return default\n\n def set_field_parameter(self, name, val):\n \"\"\"\n Here we set up dictionaries that get passed up and down and ultimately\n to derived fields.\n \"\"\"\n self.field_parameters[name] = val\n\n def has_field_parameter(self, name):\n \"\"\"\n Checks if a field parameter is set.\n \"\"\"\n return name in self.field_parameters\n\n def clear_data(self):\n \"\"\"\n Clears out all data from the YTDataContainer instance, freeing memory.\n \"\"\"\n self.field_data.clear()\n\n def has_key(self, key):\n \"\"\"\n Checks if a data field already exists.\n \"\"\"\n return key in self.field_data\n\n def keys(self):\n return self.field_data.keys()\n\n def _reshape_vals(self, arr):\n return arr\n\n def __getitem__(self, key):\n \"\"\"\n Returns a single field. Will add if necessary.\n \"\"\"\n f = self._determine_fields([key])[0]\n if f not in self.field_data and key not in self.field_data:\n if f in self._container_fields:\n self.field_data[f] = \\\n self.ds.arr(self._generate_container_field(f))\n return self.field_data[f]\n else:\n self.get_data(f)\n # fi.units is the unit expression string. We depend on the registry\n # hanging off the dataset to define this unit object.\n # Note that this is less succinct so that we can account for the case\n # when there are, for example, no elements in the object.\n rv = self.field_data.get(f, None)\n if rv is None:\n if isinstance(f, tuple):\n fi = self.ds._get_field_info(*f)\n elif isinstance(f, bytes):\n fi = self.ds._get_field_info(\"unknown\", f)\n rv = self.ds.arr(self.field_data[key], fi.units)\n return rv\n\n def __setitem__(self, key, val):\n \"\"\"\n Sets a field to be some other value.\n \"\"\"\n self.field_data[key] = val\n\n def __delitem__(self, key):\n \"\"\"\n Deletes a field\n \"\"\"\n if key not in self.field_data:\n key = self._determine_fields(key)[0]\n del self.field_data[key]\n\n def _generate_field(self, field):\n ftype, fname = field\n finfo = self.ds._get_field_info(*field)\n with self._field_type_state(ftype, finfo):\n if fname in self._container_fields:\n tr = self._generate_container_field(field)\n if finfo.particle_type: # This is a property now\n tr = self._generate_particle_field(field)\n else:\n tr = self._generate_fluid_field(field)\n if tr is None:\n raise YTCouldNotGenerateField(field, self.ds)\n return tr\n\n def _generate_fluid_field(self, field):\n # First we check the validator\n ftype, fname = field\n finfo = self.ds._get_field_info(ftype, fname)\n if self._current_chunk is None or \\\n self._current_chunk.chunk_type != \"spatial\":\n gen_obj = self\n else:\n gen_obj = self._current_chunk.objs[0]\n gen_obj.field_parameters = self.field_parameters\n try:\n finfo.check_available(gen_obj)\n except NeedsGridType as ngt_exception:\n rv = self._generate_spatial_fluid(field, ngt_exception.ghost_zones)\n else:\n rv = finfo(gen_obj)\n return rv\n\n def _generate_spatial_fluid(self, field, ngz):\n finfo = self.ds._get_field_info(*field)\n if finfo.units is None:\n raise YTSpatialFieldUnitError(field)\n units = finfo.units\n rv = self.ds.arr(np.empty(self.ires.size, dtype=\"float64\"), units)\n ind = 0\n if ngz == 0:\n deps = self._identify_dependencies([field], spatial = True)\n deps = self._determine_fields(deps)\n for io_chunk in self.chunks([], \"io\", cache = False):\n for i,chunk in enumerate(self.chunks([], \"spatial\", ngz = 0,\n preload_fields = deps)):\n o = self._current_chunk.objs[0]\n with o._activate_cache():\n ind += o.select(self.selector, self[field], rv, ind)\n else:\n chunks = self.index._chunk(self, \"spatial\", ngz = ngz)\n for i, chunk in enumerate(chunks):\n with self._chunked_read(chunk):\n gz = self._current_chunk.objs[0]\n gz.field_parameters = self.field_parameters\n wogz = gz._base_grid\n ind += wogz.select(\n self.selector,\n gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz],\n rv, ind)\n return rv\n\n def _generate_particle_field(self, field):\n # First we check the validator\n ftype, fname = field\n if self._current_chunk is None or \\\n self._current_chunk.chunk_type != \"spatial\":\n gen_obj = self\n else:\n gen_obj = self._current_chunk.objs[0]\n try:\n finfo = self.ds._get_field_info(*field)\n finfo.check_available(gen_obj)\n except NeedsGridType as ngt_exception:\n if ngt_exception.ghost_zones != 0:\n raise NotImplementedError\n size = self._count_particles(ftype)\n rv = self.ds.arr(np.empty(size, dtype=\"float64\"), finfo.units)\n ind = 0\n for io_chunk in self.chunks([], \"io\", cache = False):\n for i, chunk in enumerate(self.chunks(field, \"spatial\")):\n x, y, z = (self[ftype, 'particle_position_%s' % ax]\n for ax in 'xyz')\n if x.size == 0: continue\n mask = self._current_chunk.objs[0].select_particles(\n self.selector, x, y, z)\n if mask is None: continue\n # This requests it from the grid and does NOT mask it\n data = self[field][mask]\n rv[ind:ind+data.size] = data\n ind += data.size\n else:\n with self._field_type_state(ftype, finfo, gen_obj):\n rv = self.ds._get_field_info(*field)(gen_obj)\n return rv\n\n def _count_particles(self, ftype):\n for (f1, f2), val in self.field_data.items():\n if f1 == ftype:\n return val.size\n size = 0\n for io_chunk in self.chunks([], \"io\", cache = False):\n for i,chunk in enumerate(self.chunks([], \"spatial\")):\n x, y, z = (self[ftype, 'particle_position_%s' % ax]\n for ax in 'xyz')\n if x.size == 0: continue\n size += self._current_chunk.objs[0].count_particles(\n self.selector, x, y, z)\n return size\n\n def _generate_container_field(self, field):\n raise NotImplementedError\n\n def _parameter_iterate(self, seq):\n for obj in seq:\n old_fp = obj.field_parameters\n obj.field_parameters = self.field_parameters\n yield obj\n obj.field_parameters = old_fp\n\n _key_fields = None\n def write_out(self, filename, fields=None, format=\"%0.16e\"):\n \"\"\"Write out the YTDataContainer object in a text file.\n\n This function will take a data object and produce a tab delimited text\n file containing the fields presently existing and the fields given in\n the ``fields`` list.\n\n Parameters\n ----------\n filename : String\n The name of the file to write to.\n\n fields : List of string, Default = None\n If this is supplied, these fields will be added to the list of\n fields to be saved to disk. If not supplied, whatever fields\n presently exist will be used.\n\n format : String, Default = \"%0.16e\"\n Format of numbers to be written in the file.\n\n Raises\n ------\n ValueError\n Raised when there is no existing field.\n\n YTException\n Raised when field_type of supplied fields is inconsistent with the\n field_type of existing fields.\n\n Examples\n --------\n >>> ds = fake_particle_ds()\n >>> sp = ds.sphere(ds.domain_center, 0.25)\n >>> sp.write_out(\"sphere_1.txt\")\n >>> sp.write_out(\"sphere_2.txt\", fields=[\"cell_volume\"])\n \"\"\"\n if fields is None:\n fields = sorted(self.field_data.keys())\n\n if self._key_fields is None:\n raise ValueError\n\n field_order = self._key_fields\n diff_fields = [field for field in fields if field not in field_order]\n field_order += diff_fields\n field_order = sorted(self._determine_fields(field_order))\n field_types = {u for u, v in field_order}\n\n if len(field_types) != 1:\n diff_fields = self._determine_fields(diff_fields)\n req_ftype = self._determine_fields(self._key_fields[0])[0][0]\n f_type = {f for f in diff_fields if f[0] != req_ftype }\n msg = (\"Field type %s of the supplied field %s is inconsistent\"\n \" with field type '%s'.\" %\n ([f[0] for f in f_type], [f[1] for f in f_type], req_ftype))\n raise YTException(msg)\n\n for field in field_order: self[field]\n with open(filename, \"w\") as fid:\n field_header = [str(f) for f in field_order]\n fid.write(\"\\t\".join([\"#\"] + field_header + [\"\\n\"]))\n field_data = np.array([self.field_data[field] for field in field_order])\n for line in range(field_data.shape[1]):\n field_data[:, line].tofile(fid, sep=\"\\t\", format=format)\n fid.write(\"\\n\")\n\n def save_object(self, name, filename=None):\n \"\"\"\n Save an object. If *filename* is supplied, it will be stored in\n a :mod:`shelve` file of that name. Otherwise, it will be stored via\n :meth:`yt.data_objects.api.GridIndex.save_object`.\n \"\"\"\n if filename is not None:\n ds = shelve.open(filename, protocol=-1)\n if name in ds:\n mylog.info(\"Overwriting %s in %s\", name, filename)\n ds[name] = self\n ds.close()\n else:\n self.index.save_object(self, name)\n\n def to_dataframe(self, fields=None):\n r\"\"\"Export a data object to a pandas DataFrame.\n\n This function will take a data object and construct from it and\n optionally a list of fields a pandas DataFrame object. If pandas is\n not importable, this will raise ImportError.\n\n Parameters\n ----------\n fields : list of strings or tuple field names, default None\n If this is supplied, it is the list of fields to be exported into\n the data frame. If not supplied, whatever fields presently exist\n will be used.\n\n Returns\n -------\n df : DataFrame\n The data contained in the object.\n\n Examples\n --------\n\n >>> dd = ds.all_data()\n >>> df1 = dd.to_dataframe([\"density\", \"temperature\"])\n >>> dd[\"velocity_magnitude\"]\n >>> df2 = dd.to_dataframe()\n \"\"\"\n import pandas as pd\n data = {}\n if fields is not None:\n for f in fields:\n data[f] = self[f]\n else:\n data.update(self.field_data)\n df = pd.DataFrame(data)\n return df\n\n def save_as_dataset(self, filename=None, fields=None):\n r\"\"\"Export a data object to a reloadable yt dataset.\n\n This function will take a data object and output a dataset\n containing either the fields presently existing or fields\n given in the ``fields`` list. The resulting dataset can be\n reloaded as a yt dataset.\n\n Parameters\n ----------\n filename : str, optional\n The name of the file to be written. If None, the name\n will be a combination of the original dataset and the type\n of data container.\n fields : list of string or tuple field names, optional\n If this is supplied, it is the list of fields to be saved to\n disk. If not supplied, all the fields that have been queried\n will be saved.\n\n Returns\n -------\n filename : str\n The name of the file that has been created.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n >>> sp = ds.sphere(ds.domain_center, (10, \"Mpc\"))\n >>> fn = sp.save_as_dataset(fields=[\"density\", \"temperature\"])\n >>> sphere_ds = yt.load(fn)\n >>> # the original data container is available as the data attribute\n >>> print (sds.data[\"density\"])\n [ 4.46237613e-32 4.86830178e-32 4.46335118e-32 ..., 6.43956165e-30\n 3.57339907e-30 2.83150720e-30] g/cm**3\n >>> ad = sphere_ds.all_data()\n >>> print (ad[\"temperature\"])\n [ 1.00000000e+00 1.00000000e+00 1.00000000e+00 ..., 4.40108359e+04\n 4.54380547e+04 4.72560117e+04] K\n\n \"\"\"\n\n keyword = \"%s_%s\" % (str(self.ds), self._type_name)\n filename = get_output_filename(filename, keyword, \".h5\")\n\n data = {}\n if fields is not None:\n for f in self._determine_fields(fields):\n data[f] = self[f]\n else:\n data.update(self.field_data)\n # get the extra fields needed to reconstruct the container\n tds_fields = tuple([('index', t) for t in self._tds_fields])\n for f in [f for f in self._container_fields + tds_fields \\\n if f not in data]:\n data[f] = self[f]\n data_fields = list(data.keys())\n\n need_grid_positions = False\n need_particle_positions = False\n ptypes = []\n ftypes = {}\n for field in data_fields:\n if field in self._container_fields:\n ftypes[field] = \"grid\"\n need_grid_positions = True\n elif self.ds.field_info[field].particle_type:\n if field[0] not in ptypes:\n ptypes.append(field[0])\n ftypes[field] = field[0]\n need_particle_positions = True\n else:\n ftypes[field] = \"grid\"\n need_grid_positions = True\n # projections and slices use px and py, so don't need positions\n if self._type_name in [\"cutting\", \"proj\", \"slice\"]:\n need_grid_positions = False\n\n if need_particle_positions:\n for ax in \"xyz\":\n for ptype in ptypes:\n p_field = (ptype, \"particle_position_%s\" % ax)\n if p_field in self.ds.field_info and p_field not in data:\n data_fields.append(field)\n ftypes[p_field] = p_field[0]\n data[p_field] = self[p_field]\n if need_grid_positions:\n for ax in \"xyz\":\n g_field = (\"index\", ax)\n if g_field in self.ds.field_info and g_field not in data:\n data_fields.append(g_field)\n ftypes[g_field] = \"grid\"\n data[g_field] = self[g_field]\n g_field = (\"index\", \"d\" + ax)\n if g_field in self.ds.field_info and g_field not in data:\n data_fields.append(g_field)\n ftypes[g_field] = \"grid\"\n data[g_field] = self[g_field]\n\n extra_attrs = dict([(arg, getattr(self, arg, None))\n for arg in self._con_args + self._tds_attrs])\n extra_attrs[\"con_args\"] = self._con_args\n extra_attrs[\"data_type\"] = \"yt_data_container\"\n extra_attrs[\"container_type\"] = self._type_name\n extra_attrs[\"dimensionality\"] = self._dimensionality\n save_as_dataset(self.ds, filename, data, field_types=ftypes,\n extra_attrs=extra_attrs)\n\n return filename\n\n def to_glue(self, fields, label=\"yt\", data_collection=None):\n \"\"\"\n Takes specific *fields* in the container and exports them to\n Glue (http://www.glueviz.org) for interactive\n analysis. Optionally add a *label*. If you are already within\n the Glue environment, you can pass a *data_collection* object,\n otherwise Glue will be started.\n \"\"\"\n from yt.config import ytcfg\n from glue.core import DataCollection, Data\n if ytcfg.getboolean(\"yt\", \"__withintesting\"):\n from glue.core.application_base import \\\n Application as GlueApplication\n else:\n try:\n from glue.app.qt.application import GlueApplication\n except ImportError:\n from glue.qt.glue_application import GlueApplication\n gdata = Data(label=label)\n for component_name in fields:\n gdata.add_component(self[component_name], component_name)\n\n if data_collection is None:\n dc = DataCollection([gdata])\n app = GlueApplication(dc)\n try:\n app.start()\n except AttributeError:\n # In testing we're using a dummy glue application object\n # that doesn't have a start method\n pass\n else:\n data_collection.append(gdata)\n\n # Numpy-like Operations\n def argmax(self, field, axis=None):\n r\"\"\"Return the values at which the field is maximized.\n\n This will, in a parallel-aware fashion, find the maximum value and then\n return to you the values at that maximum location that are requested\n for \"axis\". By default it will return the spatial positions (in the\n natural coordinate system), but it can be any field\n\n Parameters\n ----------\n field : string or tuple field name\n The field to maximize.\n axis : string or list of strings, optional\n If supplied, the fields to sample along; if not supplied, defaults\n to the coordinate fields. This can be the name of the coordinate\n fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,\n 1, 2.\n\n Returns\n -------\n A list of YTQuantities as specified by the axis argument.\n\n Examples\n --------\n\n >>> temp_at_max_rho = reg.argmax(\"density\", axis=\"temperature\")\n >>> max_rho_xyz = reg.argmax(\"density\")\n >>> t_mrho, v_mrho = reg.argmax(\"density\", axis=[\"temperature\",\n ... \"velocity_magnitude\"])\n >>> x, y, z = reg.argmax(\"density\")\n\n \"\"\"\n if axis is None:\n mv, pos0, pos1, pos2 = self.quantities.max_location(field)\n return pos0, pos1, pos2\n if isinstance(axis, string_types):\n axis = [axis]\n rv = self.quantities.sample_at_max_field_values(field, axis)\n if len(rv) == 2:\n return rv[1]\n return rv[1:]\n\n def argmin(self, field, axis=None):\n r\"\"\"Return the values at which the field is minimized.\n\n This will, in a parallel-aware fashion, find the minimum value and then\n return to you the values at that minimum location that are requested\n for \"axis\". By default it will return the spatial positions (in the\n natural coordinate system), but it can be any field\n\n Parameters\n ----------\n field : string or tuple field name\n The field to minimize.\n axis : string or list of strings, optional\n If supplied, the fields to sample along; if not supplied, defaults\n to the coordinate fields. This can be the name of the coordinate\n fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,\n 1, 2.\n\n Returns\n -------\n A list of YTQuantities as specified by the axis argument.\n\n Examples\n --------\n\n >>> temp_at_min_rho = reg.argmin(\"density\", axis=\"temperature\")\n >>> min_rho_xyz = reg.argmin(\"density\")\n >>> t_mrho, v_mrho = reg.argmin(\"density\", axis=[\"temperature\",\n ... \"velocity_magnitude\"])\n >>> x, y, z = reg.argmin(\"density\")\n\n \"\"\"\n if axis is None:\n mv, pos0, pos1, pos2 = self.quantities.min_location(field)\n return pos0, pos1, pos2\n rv = self.quantities.sample_at_min_field_values(field, axis)\n if len(rv) == 2:\n return rv[1]\n return rv[1:]\n\n def _compute_extrema(self, field):\n if self._extrema_cache is None:\n self._extrema_cache = {}\n if field not in self._extrema_cache:\n # Note we still need to call extrema for each field, as of right\n # now\n mi, ma = self.quantities.extrema(field)\n self._extrema_cache[field] = (mi, ma)\n return self._extrema_cache[field]\n\n _extrema_cache = None\n def max(self, field, axis=None):\n r\"\"\"Compute the maximum of a field, optionally along an axis.\n\n This will, in a parallel-aware fashion, compute the maximum of the\n given field. Supplying an axis will result in a return value of a\n YTProjection, with method 'mip' for maximum intensity. If the max has\n already been requested, it will use the cached extrema value.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to maximize.\n axis : string, optional\n If supplied, the axis to project the maximum along.\n\n Returns\n -------\n Either a scalar or a YTProjection.\n\n Examples\n --------\n\n >>> max_temp = reg.max(\"temperature\")\n >>> max_temp_proj = reg.max(\"temperature\", axis=\"x\")\n \"\"\"\n if axis is None:\n rv = ()\n fields = ensure_list(field)\n for f in fields:\n rv += (self._compute_extrema(f)[1],)\n if len(fields) == 1:\n return rv[0]\n else:\n return rv\n elif axis in self.ds.coordinates.axis_name:\n r = self.ds.proj(field, axis, data_source=self, method=\"mip\")\n return r\n else:\n raise NotImplementedError(\"Unknown axis %s\" % axis)\n\n def min(self, field, axis=None):\n r\"\"\"Compute the minimum of a field.\n\n This will, in a parallel-aware fashion, compute the minimum of the\n given field. Supplying an axis is not currently supported. If the max\n has already been requested, it will use the cached extrema value.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to minimize.\n axis : string, optional\n If supplied, the axis to compute the minimum along.\n\n Returns\n -------\n Scalar.\n\n Examples\n --------\n\n >>> min_temp = reg.min(\"temperature\")\n \"\"\"\n if axis is None:\n rv = ()\n fields = ensure_list(field)\n for f in ensure_list(fields):\n rv += (self._compute_extrema(f)[0],)\n if len(fields) == 1:\n return rv[0]\n else:\n return rv\n return rv\n elif axis in self.ds.coordinates.axis_name:\n raise NotImplementedError(\"Minimum intensity projection not\"\n \" implemented.\")\n else:\n raise NotImplementedError(\"Unknown axis %s\" % axis)\n\n def std(self, field, weight=None):\n \"\"\"Compute the variance of a field.\n\n This will, in a parallel-ware fashion, compute the variance of\n the given field.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to calculate the variance of\n weight : string or tuple field name\n The field to weight the variance calculation by. Defaults to\n unweighted if unset.\n\n Returns\n -------\n Scalar\n \"\"\"\n weight_field = sanitize_weight_field(self.ds, field, weight)\n return self.quantities.weighted_variance(field, weight_field)[0]\n\n def ptp(self, field):\n r\"\"\"Compute the range of values (maximum - minimum) of a field.\n\n This will, in a parallel-aware fashion, compute the \"peak-to-peak\" of\n the given field.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to average.\n\n Returns\n -------\n Scalar\n\n Examples\n --------\n\n >>> rho_range = reg.ptp(\"density\")\n \"\"\"\n ex = self._compute_extrema(field)\n return ex[1] - ex[0]\n\n def profile(self, bin_fields, fields, n_bins=64,\n extrema=None, logs=None, units=None,\n weight_field=\"cell_mass\",\n accumulation=False, fractional=False,\n deposition='ngp'):\n r\"\"\"\n Create a 1, 2, or 3D profile object from this data_source.\n\n The dimensionality of the profile object is chosen by the number of\n fields given in the bin_fields argument. This simply calls\n :func:`yt.data_objects.profiles.create_profile`.\n\n Parameters\n ----------\n bin_fields : list of strings\n List of the binning fields for profiling.\n fields : list of strings\n The fields to be profiled.\n n_bins : int or list of ints\n The number of bins in each dimension. If None, 64 bins for\n each bin are used for each bin field.\n Default: 64.\n extrema : dict of min, max tuples\n Minimum and maximum values of the bin_fields for the profiles.\n The keys correspond to the field names. Defaults to the extrema\n of the bin_fields of the dataset. If a units dict is provided, extrema\n are understood to be in the units specified in the dictionary.\n logs : dict of boolean values\n Whether or not to log the bin_fields for the profiles.\n The keys correspond to the field names. Defaults to the take_log\n attribute of the field.\n units : dict of strings\n The units of the fields in the profiles, including the bin_fields.\n weight_field : str or tuple field identifier\n The weight field for computing weighted average for the profile\n values. If None, the profile values are sums of the data in\n each bin.\n accumulation : bool or list of bools\n If True, the profile values for a bin n are the cumulative sum of\n all the values from bin 0 to n. If -True, the sum is reversed so\n that the value for bin n is the cumulative sum from bin N (total bins)\n to n. If the profile is 2D or 3D, a list of values can be given to\n control the summation in each dimension independently.\n Default: False.\n fractional : If True the profile values are divided by the sum of all\n the profile data such that the profile represents a probability\n distribution function.\n deposition : Controls the type of deposition used for ParticlePhasePlots.\n Valid choices are 'ngp' and 'cic'. Default is 'ngp'. This parameter is\n ignored the if the input fields are not of particle type.\n\n\n Examples\n --------\n\n Create a 1d profile. Access bin field from profile.x and field\n data from profile[<field_name>].\n\n >>> ds = load(\"DD0046/DD0046\")\n >>> ad = ds.all_data()\n >>> profile = ad.profile(ad, [(\"gas\", \"density\")],\n ... [(\"gas\", \"temperature\"),\n ... (\"gas\", \"velocity_x\")])\n >>> print (profile.x)\n >>> print (profile[\"gas\", \"temperature\"])\n >>> plot = profile.plot()\n \"\"\"\n p = create_profile(self, bin_fields, fields, n_bins,\n extrema, logs, units, weight_field, accumulation,\n fractional, deposition)\n return p\n\n def mean(self, field, axis=None, weight=None):\n r\"\"\"Compute the mean of a field, optionally along an axis, with a\n weight.\n\n This will, in a parallel-aware fashion, compute the mean of the\n given field. If an axis is supplied, it will return a projection,\n where the weight is also supplied. By default the weight field will be\n \"ones\" or \"particle_ones\", depending on the field being averaged,\n resulting in an unweighted average.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to average.\n axis : string, optional\n If supplied, the axis to compute the mean along (i.e., to project\n along)\n weight : string, optional\n The field to use as a weight.\n\n Returns\n -------\n Scalar or YTProjection.\n\n Examples\n --------\n\n >>> avg_rho = reg.mean(\"density\", weight=\"cell_volume\")\n >>> rho_weighted_T = reg.mean(\"temperature\", axis=\"y\", weight=\"density\")\n \"\"\"\n weight_field = sanitize_weight_field(self.ds, field, weight)\n if axis in self.ds.coordinates.axis_name:\n r = self.ds.proj(field, axis, data_source=self,\n weight_field=weight_field)\n elif axis is None:\n r = self.quantities.weighted_average_quantity(field, weight_field)\n else:\n raise NotImplementedError(\"Unknown axis %s\" % axis)\n return r\n\n def sum(self, field, axis=None):\n r\"\"\"Compute the sum of a field, optionally along an axis.\n\n This will, in a parallel-aware fashion, compute the sum of the given\n field. If an axis is specified, it will return a projection (using\n method type \"sum\", which does not take into account path length) along\n that axis.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to sum.\n axis : string, optional\n If supplied, the axis to sum along.\n\n Returns\n -------\n Either a scalar or a YTProjection.\n\n Examples\n --------\n\n >>> total_vol = reg.sum(\"cell_volume\")\n >>> cell_count = reg.sum(\"ones\", axis=\"x\")\n \"\"\"\n # Because we're using ``sum`` to specifically mean a sum or a\n # projection with the method=\"sum\", we do not utilize the ``mean``\n # function.\n if axis in self.ds.coordinates.axis_name:\n with self._field_parameter_state({'axis':axis}):\n r = self.ds.proj(field, axis, data_source=self, method=\"sum\")\n elif axis is None:\n r = self.quantities.total_quantity(field)\n else:\n raise NotImplementedError(\"Unknown axis %s\" % axis)\n return r\n\n def integrate(self, field, weight=None, axis=None):\n r\"\"\"Compute the integral (projection) of a field along an axis.\n\n This projects a field along an axis.\n\n Parameters\n ----------\n field : string or tuple field name\n The field to project.\n weight: string or tuple field name\n The field to weight the projection by\n axis : string\n The axis to project along.\n\n Returns\n -------\n YTProjection\n\n Examples\n --------\n\n >>> column_density = reg.integrate(\"density\", axis=\"z\")\n \"\"\"\n if weight is not None:\n weight_field = sanitize_weight_field(self.ds, field, weight)\n else:\n weight_field = None\n if axis in self.ds.coordinates.axis_name:\n r = self.ds.proj(field, axis, data_source=self,\n weight_field=weight_field)\n else:\n raise NotImplementedError(\"Unknown axis %s\" % axis)\n return r\n\n @property\n def _hash(self):\n s = \"%s\" % self\n try:\n import hashlib\n return hashlib.md5(s.encode('utf-8')).hexdigest()\n except ImportError:\n return s\n\n def __reduce__(self):\n args = tuple([self.ds._hash(), self._type_name] +\n [getattr(self, n) for n in self._con_args] +\n [self.field_parameters])\n return (_reconstruct_object, args)\n\n def clone(self):\n r\"\"\"Clone a data object.\n\n This will make a duplicate of a data object; note that the\n `field_parameters` may not necessarily be deeply-copied. If you modify\n the field parameters in-place, it may or may not be shared between the\n objects, depending on the type of object that that particular field\n parameter is.\n\n Notes\n -----\n One use case for this is to have multiple identical data objects that\n are being chunked over in different orders.\n\n Examples\n --------\n\n >>> ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> sp = ds.sphere(\"c\", 0.1)\n >>> sp_clone = sp.clone()\n >>> sp[\"density\"]\n >>> print sp.field_data.keys()\n [(\"gas\", \"density\")]\n >>> print sp_clone.field_data.keys()\n []\n \"\"\"\n args = self.__reduce__()\n return args[0](self.ds, *args[1][1:])[1]\n\n def __repr__(self):\n # We'll do this the slow way to be clear what's going on\n s = \"%s (%s): \" % (self.__class__.__name__, self.ds)\n for i in self._con_args:\n try:\n s += \", %s=%s\" % (i, getattr(self, i).in_base(unit_system=self.ds.unit_system))\n except AttributeError:\n s += \", %s=%s\" % (i, getattr(self, i))\n return s\n\n @contextmanager\n def _field_parameter_state(self, field_parameters):\n # What we're doing here is making a copy of the incoming field\n # parameters, and then updating it with our own. This means that we'll\n # be using our own center, if set, rather than the supplied one. But\n # it also means that any additionally set values can override it.\n old_field_parameters = self.field_parameters\n new_field_parameters = field_parameters.copy()\n new_field_parameters.update(old_field_parameters)\n self.field_parameters = new_field_parameters\n yield\n self.field_parameters = old_field_parameters\n\n @contextmanager\n def _field_type_state(self, ftype, finfo, obj = None):\n if obj is None: obj = self\n old_particle_type = obj._current_particle_type\n old_fluid_type = obj._current_fluid_type\n if finfo.particle_type:\n obj._current_particle_type = ftype\n else:\n obj._current_fluid_type = ftype\n yield\n obj._current_particle_type = old_particle_type\n obj._current_fluid_type = old_fluid_type\n\n def _determine_fields(self, fields):\n fields = ensure_list(fields)\n explicit_fields = []\n for field in fields:\n if field in self._container_fields:\n explicit_fields.append(field)\n continue\n if isinstance(field, tuple):\n if len(field) != 2 or \\\n not isinstance(field[0], string_types) or \\\n not isinstance(field[1], string_types):\n raise YTFieldNotParseable(field)\n ftype, fname = field\n finfo = self.ds._get_field_info(ftype, fname)\n elif isinstance(field, DerivedField):\n ftype, fname = field.name\n finfo = field\n else:\n fname = field\n finfo = self.ds._get_field_info(\"unknown\", fname)\n if finfo.particle_type:\n ftype = self._current_particle_type\n else:\n ftype = self._current_fluid_type\n if (ftype, fname) not in self.ds.field_info:\n ftype = self.ds._last_freq[0]\n\n # really ugly check to ensure that this field really does exist somewhere,\n # in some naming convention, before returning it as a possible field type\n if (ftype,fname) not in self.ds.field_info and \\\n (ftype,fname) not in self.ds.field_list and \\\n fname not in self.ds.field_list and \\\n (ftype,fname) not in self.ds.derived_field_list and \\\n fname not in self.ds.derived_field_list and \\\n (ftype,fname) not in self._container_fields:\n raise YTFieldNotFound((ftype,fname),self.ds)\n\n # these tests are really insufficient as a field type may be valid, and the\n # field name may be valid, but not the combination (field type, field name)\n if finfo.particle_type and ftype not in self.ds.particle_types:\n raise YTFieldTypeNotFound(ftype, ds=self.ds)\n elif not finfo.particle_type and ftype not in self.ds.fluid_types:\n raise YTFieldTypeNotFound(ftype, ds=self.ds)\n explicit_fields.append((ftype, fname))\n return explicit_fields\n\n _tree = None\n\n @property\n def tiles(self):\n if self._tree is not None: return self._tree\n self._tree = AMRKDTree(self.ds, data_source=self)\n return self._tree\n\n @property\n def blocks(self):\n for io_chunk in self.chunks([], \"io\"):\n for i,chunk in enumerate(self.chunks([], \"spatial\", ngz = 0)):\n # For grids this will be a grid object, and for octrees it will\n # be an OctreeSubset. Note that we delegate to the sub-object.\n o = self._current_chunk.objs[0]\n cache_fp = o.field_parameters.copy()\n o.field_parameters.update(self.field_parameters)\n for b, m in o.select_blocks(self.selector):\n if m is None: continue\n yield b, m\n o.field_parameters = cache_fp\n\nclass GenerationInProgress(Exception):\n def __init__(self, fields):\n self.fields = fields\n super(GenerationInProgress, self).__init__()\n\nclass YTSelectionContainer(YTDataContainer, ParallelAnalysisInterface):\n _locked = False\n _sort_by = None\n _selector = None\n _current_chunk = None\n _data_source = None\n _dimensionality = None\n _max_level = None\n _min_level = None\n\n def __init__(self, ds, field_parameters, data_source=None):\n ParallelAnalysisInterface.__init__(self)\n super(YTSelectionContainer, self).__init__(ds, field_parameters)\n self._data_source = data_source\n if data_source is not None:\n if data_source.ds is not self.ds:\n raise RuntimeError(\"Attempted to construct a DataContainer with a data_source \"\n \"from a different DataSet\", ds, data_source.ds)\n if data_source._dimensionality < self._dimensionality:\n raise RuntimeError(\"Attempted to construct a DataContainer with a data_source \"\n \"of lower dimensionality (%u vs %u)\" %\n (data_source._dimensionality, self._dimensionality))\n self.field_parameters.update(data_source.field_parameters)\n self.quantities = DerivedQuantityCollection(self)\n\n @property\n def selector(self):\n if self._selector is not None:\n return self._selector\n s_module = getattr(self, '_selector_module',\n yt.geometry.selection_routines)\n sclass = getattr(s_module,\n \"%s_selector\" % self._type_name, None)\n if sclass is None:\n raise YTDataSelectorNotImplemented(self._type_name)\n\n if self._data_source is not None:\n self._selector = compose_selector(self, self._data_source.selector, sclass(self))\n else:\n self._selector = sclass(self)\n return self._selector\n\n def chunks(self, fields, chunking_style, **kwargs):\n # This is an iterator that will yield the necessary chunks.\n self.get_data() # Ensure we have built ourselves\n if fields is None: fields = []\n # chunk_ind can be supplied in the keyword arguments. If it's a\n # scalar, that'll be the only chunk that gets returned; if it's a list,\n # those are the ones that will be.\n chunk_ind = kwargs.pop(\"chunk_ind\", None)\n if chunk_ind is not None:\n chunk_ind = ensure_list(chunk_ind)\n for ci, chunk in enumerate(self.index._chunk(self, chunking_style,\n **kwargs)):\n if chunk_ind is not None and ci not in chunk_ind:\n continue\n with self._chunked_read(chunk):\n self.get_data(fields)\n # NOTE: we yield before releasing the context\n yield self\n\n def _identify_dependencies(self, fields_to_get, spatial = False):\n inspected = 0\n fields_to_get = fields_to_get[:]\n for field in itertools.cycle(fields_to_get):\n if inspected >= len(fields_to_get): break\n inspected += 1\n fi = self.ds._get_field_info(*field)\n fd = self.ds.field_dependencies.get(field, None) or \\\n self.ds.field_dependencies.get(field[1], None)\n # This is long overdue. Any time we *can't* find a field\n # dependency -- for instance, if the derived field has been added\n # after dataset instantiation -- let's just try to\n # recalculate it.\n if fd is None:\n try:\n fd = fi.get_dependencies(ds = self.ds)\n self.ds.field_dependencies[field] = fd\n except:\n continue\n requested = self._determine_fields(list(set(fd.requested)))\n deps = [d for d in requested if d not in fields_to_get]\n fields_to_get += deps\n return sorted(fields_to_get)\n\n def get_data(self, fields=None):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if fields is None: return\n nfields = []\n apply_fields = defaultdict(list)\n for field in self._determine_fields(fields):\n # We need to create the field on the raw particle types\n # for particles types (when the field is not directly\n # defined for the derived particle type only)\n finfo = self.ds.field_info[field]\n\n if field[0] in self.ds.filtered_particle_types and finfo._inherited_particle_filter:\n f = self.ds.known_filters[field[0]]\n apply_fields[field[0]].append(\n (f.filtered_type, field[1]))\n else:\n nfields.append(field)\n for filter_type in apply_fields:\n f = self.ds.known_filters[filter_type]\n with f.apply(self):\n self.get_data(apply_fields[filter_type])\n fields = nfields\n if len(fields) == 0: return\n # Now we collect all our fields\n # Here is where we need to perform a validation step, so that if we\n # have a field requested that we actually *can't* yet get, we put it\n # off until the end. This prevents double-reading fields that will\n # need to be used in spatial fields later on.\n fields_to_get = []\n # This will be pre-populated with spatial fields\n fields_to_generate = []\n for field in self._determine_fields(fields):\n if field in self.field_data: continue\n finfo = self.ds._get_field_info(*field)\n try:\n finfo.check_available(self)\n except NeedsGridType:\n fields_to_generate.append(field)\n continue\n fields_to_get.append(field)\n if len(fields_to_get) == 0 and len(fields_to_generate) == 0:\n return\n elif self._locked is True:\n raise GenerationInProgress(fields)\n # Track which ones we want in the end\n ofields = set(list(self.field_data.keys())\n + fields_to_get\n + fields_to_generate)\n # At this point, we want to figure out *all* our dependencies.\n fields_to_get = self._identify_dependencies(fields_to_get,\n self._spatial)\n # We now split up into readers for the types of fields\n fluids, particles = [], []\n finfos = {}\n for ftype, fname in fields_to_get:\n finfo = self.ds._get_field_info(ftype, fname)\n finfos[ftype, fname] = finfo\n if finfo.particle_type:\n particles.append((ftype, fname))\n elif (ftype, fname) not in fluids:\n fluids.append((ftype, fname))\n # The _read method will figure out which fields it needs to get from\n # disk, and return a dict of those fields along with the fields that\n # need to be generated.\n read_fluids, gen_fluids = self.index._read_fluid_fields(\n fluids, self, self._current_chunk)\n for f, v in read_fluids.items():\n self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)\n self.field_data[f].convert_to_units(finfos[f].output_units)\n\n read_particles, gen_particles = self.index._read_particle_fields(\n particles, self, self._current_chunk)\n for f, v in read_particles.items():\n self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)\n self.field_data[f].convert_to_units(finfos[f].output_units)\n\n fields_to_generate += gen_fluids + gen_particles\n self._generate_fields(fields_to_generate)\n for field in list(self.field_data.keys()):\n if field not in ofields:\n self.field_data.pop(field)\n\n def _generate_fields(self, fields_to_generate):\n index = 0\n with self._field_lock():\n # At this point, we assume that any fields that are necessary to\n # *generate* a field are in fact already available to us. Note\n # that we do not make any assumption about whether or not the\n # fields have a spatial requirement. This will be checked inside\n # _generate_field, at which point additional dependencies may\n # actually be noted.\n while any(f not in self.field_data for f in fields_to_generate):\n field = fields_to_generate[index % len(fields_to_generate)]\n index += 1\n if field in self.field_data: continue\n fi = self.ds._get_field_info(*field)\n try:\n fd = self._generate_field(field)\n if fd is None:\n raise RuntimeError\n if fi.units is None:\n # first time calling a field with units='auto', so we\n # infer the units from the units of the data we get back\n # from the field function and use these units for future\n # field accesses\n units = getattr(fd, 'units', '')\n if units == '':\n dimensions = ytdims.dimensionless\n else:\n dimensions = units.dimensions\n units = str(units.get_base_equivalent(self.ds.unit_system.name))\n if fi.dimensions != dimensions:\n raise YTDimensionalityError(fi.dimensions, dimensions)\n fi.units = units\n self.field_data[field] = self.ds.arr(fd, units)\n msg = (\"Field %s was added without specifying units, \"\n \"assuming units are %s\")\n mylog.warn(msg % (fi.name, units))\n try:\n fd.convert_to_units(fi.units)\n except AttributeError:\n # If the field returns an ndarray, coerce to a\n # dimensionless YTArray and verify that field is\n # supposed to be unitless\n fd = self.ds.arr(fd, '')\n if fi.units != '':\n raise YTFieldUnitError(fi, fd.units)\n except YTUnitConversionError:\n raise YTFieldUnitError(fi, fd.units)\n except UnitParseError:\n raise YTFieldUnitParseError(fi)\n self.field_data[field] = fd\n except GenerationInProgress as gip:\n for f in gip.fields:\n if f not in fields_to_generate:\n fields_to_generate.append(f)\n\n def __or__(self, other):\n if not isinstance(other, YTSelectionContainer):\n raise YTBooleanObjectError(other)\n if self.ds is not other.ds:\n raise YTBooleanObjectsWrongDataset()\n # Should maybe do something with field parameters here\n return YTBooleanContainer(\"OR\", self, other, ds = self.ds)\n\n def __invert__(self):\n # ~obj\n asel = yt.geometry.selection_routines.AlwaysSelector(self.ds)\n return YTBooleanContainer(\"NOT\", self, asel, ds = self.ds)\n\n def __xor__(self, other):\n if not isinstance(other, YTSelectionContainer):\n raise YTBooleanObjectError(other)\n if self.ds is not other.ds:\n raise YTBooleanObjectsWrongDataset()\n return YTBooleanContainer(\"XOR\", self, other, ds = self.ds)\n\n def __and__(self, other):\n if not isinstance(other, YTSelectionContainer):\n raise YTBooleanObjectError(other)\n if self.ds is not other.ds:\n raise YTBooleanObjectsWrongDataset()\n return YTBooleanContainer(\"AND\", self, other, ds = self.ds)\n\n def __add__(self, other):\n return self.__or__(other)\n\n def __sub__(self, other):\n if not isinstance(other, YTSelectionContainer):\n raise YTBooleanObjectError(other)\n if self.ds is not other.ds:\n raise YTBooleanObjectsWrongDataset()\n return YTBooleanContainer(\"NEG\", self, other, ds = self.ds)\n\n @contextmanager\n def _field_lock(self):\n self._locked = True\n yield\n self._locked = False\n\n @contextmanager\n def _ds_hold(self, new_ds):\n \"\"\"\n This contextmanager is used to take a data object and preserve its\n attributes but allow the dataset that underlies it to be swapped out.\n This is typically only used internally, and differences in unit systems\n may present interesting possibilities.\n \"\"\"\n old_ds = self.ds\n old_index = self._index\n self.ds = new_ds\n self._index = new_ds.index\n old_chunk_info = self._chunk_info\n old_chunk = self._current_chunk\n old_size = self.size\n self._chunk_info = None\n self._current_chunk = None\n self.size = None\n self._index._identify_base_chunk(self)\n with self._chunked_read(None):\n yield\n self._index = old_index\n self.ds = old_ds\n self._chunk_info = old_chunk_info\n self._current_chunk = old_chunk\n self.size = old_size\n\n @contextmanager\n def _chunked_read(self, chunk):\n # There are several items that need to be swapped out\n # field_data, size, shape\n obj_field_data = []\n if hasattr(chunk, 'objs'):\n for obj in chunk.objs:\n obj_field_data.append(obj.field_data)\n obj.field_data = YTFieldData()\n old_field_data, self.field_data = self.field_data, YTFieldData()\n old_chunk, self._current_chunk = self._current_chunk, chunk\n old_locked, self._locked = self._locked, False\n yield\n self.field_data = old_field_data\n self._current_chunk = old_chunk\n self._locked = old_locked\n if hasattr(chunk, 'objs'):\n for obj in chunk.objs:\n obj.field_data = obj_field_data.pop(0)\n\n @contextmanager\n def _activate_cache(self):\n cache = self._field_cache or {}\n old_fields = {}\n for field in (f for f in cache if f in self.field_data):\n old_fields[field] = self.field_data[field]\n self.field_data.update(cache)\n yield\n for field in cache:\n self.field_data.pop(field)\n if field in old_fields:\n self.field_data[field] = old_fields.pop(field)\n self._field_cache = None\n\n def _initialize_cache(self, cache):\n # Wipe out what came before\n self._field_cache = {}\n self._field_cache.update(cache)\n\n @property\n def icoords(self):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n return self._current_chunk.icoords\n\n @property\n def fcoords(self):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n return self._current_chunk.fcoords\n\n @property\n def ires(self):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n return self._current_chunk.ires\n\n @property\n def fwidth(self):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n return self._current_chunk.fwidth\n\n @property\n def fcoords_vertex(self):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n return self._current_chunk.fcoords_vertex\n\n @property\n def max_level(self):\n if self._max_level is None:\n try:\n return self.ds.max_level\n except AttributeError:\n return None\n return self._max_level\n\n @max_level.setter\n def max_level(self, value):\n if self._selector is not None:\n del self._selector\n self._selector = None\n self._current_chunk = None\n self.size = None\n self.shape = None\n self.field_data.clear()\n self._max_level = value\n\n @property\n def min_level(self):\n if self._min_level is None:\n try:\n return 0\n except AttributeError:\n return None\n return self._min_level\n\n @min_level.setter\n def min_level(self, value):\n if self._selector is not None:\n del self._selector\n self._selector = None\n self.field_data.clear()\n self.size = None\n self.shape = None\n self._current_chunk = None\n self._min_level = value\n\nclass YTSelectionContainer0D(YTSelectionContainer):\n _spatial = False\n _dimensionality = 0\n def __init__(self, ds, field_parameters = None, data_source = None):\n super(YTSelectionContainer0D, self).__init__(\n ds, field_parameters, data_source)\n\nclass YTSelectionContainer1D(YTSelectionContainer):\n _spatial = False\n _dimensionality = 1\n def __init__(self, ds, field_parameters = None, data_source = None):\n super(YTSelectionContainer1D, self).__init__(\n ds, field_parameters, data_source)\n self._grids = None\n self._sortkey = None\n self._sorted = {}\n\nclass YTSelectionContainer2D(YTSelectionContainer):\n _key_fields = ['px','py','pdx','pdy']\n _dimensionality = 2\n \"\"\"\n Prepares the YTSelectionContainer2D, normal to *axis*. If *axis* is 4, we are not\n aligned with any axis.\n \"\"\"\n _spatial = False\n def __init__(self, axis, ds, field_parameters = None, data_source = None):\n super(YTSelectionContainer2D, self).__init__(\n ds, field_parameters, data_source)\n # We need the ds, which will exist by now, for fix_axis.\n self.axis = fix_axis(axis, self.ds)\n self.set_field_parameter(\"axis\", axis)\n\n def _convert_field_name(self, field):\n return field\n\n def _get_pw(self, fields, center, width, origin, plot_type):\n from yt.visualization.plot_window import \\\n get_window_parameters, PWViewerMPL\n from yt.visualization.fixed_resolution import \\\n FixedResolutionBuffer as frb\n axis = self.axis\n skip = self._key_fields\n skip += list(set(frb._exclude_fields).difference(set(self._key_fields)))\n self.fields = [k for k in self.field_data if k not in skip]\n if fields is not None:\n self.fields = ensure_list(fields) + self.fields\n if len(self.fields) == 0:\n raise ValueError(\"No fields found to plot in get_pw\")\n (bounds, center, display_center) = \\\n get_window_parameters(axis, center, width, self.ds)\n pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,\n frb_generator=frb, plot_type=plot_type)\n pw._setup_plots()\n return pw\n\n\n def to_frb(self, width, resolution, center=None, height=None,\n periodic = False):\n r\"\"\"This function returns a FixedResolutionBuffer generated from this\n object.\n\n A FixedResolutionBuffer is an object that accepts a variable-resolution\n 2D object and transforms it into an NxM bitmap that can be plotted,\n examined or processed. This is a convenience function to return an FRB\n directly from an existing 2D data object.\n\n Parameters\n ----------\n width : width specifier\n This can either be a floating point value, in the native domain\n units of the simulation, or a tuple of the (value, unit) style.\n This will be the width of the FRB.\n height : height specifier\n This will be the physical height of the FRB, by default it is equal\n to width. Note that this will not make any corrections to\n resolution for the aspect ratio.\n resolution : int or tuple of ints\n The number of pixels on a side of the final FRB. If iterable, this\n will be the width then the height.\n center : array-like of floats, optional\n The center of the FRB. If not specified, defaults to the center of\n the current object.\n periodic : bool\n Should the returned Fixed Resolution Buffer be periodic? (default:\n False).\n\n Returns\n -------\n frb : :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`\n A fixed resolution buffer, which can be queried for fields.\n\n Examples\n --------\n\n >>> proj = ds.proj(\"Density\", 0)\n >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)\n >>> write_image(np.log10(frb[\"Density\"]), 'density_100kpc.png')\n \"\"\"\n\n if (self.ds.geometry == \"cylindrical\" and self.axis == 1) or \\\n (self.ds.geometry == \"polar\" and self.axis == 2):\n if center is not None and center != (0.0, 0.0):\n raise NotImplementedError(\n \"Currently we only support images centered at R=0. \" +\n \"We plan to generalize this in the near future\")\n from yt.visualization.fixed_resolution import CylindricalFixedResolutionBuffer\n validate_width_tuple(width)\n if iterable(resolution): resolution = max(resolution)\n frb = CylindricalFixedResolutionBuffer(self, width, resolution)\n return frb\n\n if center is None:\n center = self.center\n if center is None:\n center = (self.ds.domain_right_edge\n + self.ds.domain_left_edge)/2.0\n elif iterable(center) and not isinstance(center, YTArray):\n center = self.ds.arr(center, 'code_length')\n if iterable(width):\n w, u = width\n if isinstance(w, tuple) and isinstance(u, tuple):\n height = u\n w, u = w\n width = self.ds.quan(w, input_units = u)\n elif not isinstance(width, YTArray):\n width = self.ds.quan(width, 'code_length')\n if height is None:\n height = width\n elif iterable(height):\n h, u = height\n height = self.ds.quan(h, input_units = u)\n elif not isinstance(height, YTArray):\n height = self.ds.quan(height, 'code_length')\n if not iterable(resolution):\n resolution = (resolution, resolution)\n from yt.visualization.fixed_resolution import FixedResolutionBuffer\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n bounds = (center[xax] - width*0.5, center[xax] + width*0.5,\n center[yax] - height*0.5, center[yax] + height*0.5)\n frb = FixedResolutionBuffer(self, bounds, resolution,\n periodic = periodic)\n return frb\n\nclass YTSelectionContainer3D(YTSelectionContainer):\n \"\"\"\n Returns an instance of YTSelectionContainer3D, or prepares one. Usually only\n used as a base class. Note that *center* is supplied, but only used\n for fields and quantities that require it.\n \"\"\"\n _key_fields = ['x','y','z','dx','dy','dz']\n _spatial = False\n _num_ghost_zones = 0\n _dimensionality = 3\n def __init__(self, center, ds, field_parameters = None, data_source = None):\n super(YTSelectionContainer3D, self).__init__(ds, field_parameters, data_source)\n self._set_center(center)\n self.coords = None\n self._grids = None\n\n def cut_region(self, field_cuts, field_parameters=None):\n \"\"\"\n Return a YTCutRegion, where the a cell is identified as being inside\n the cut region based on the value of one or more fields. Note that in\n previous versions of yt the name 'grid' was used to represent the data\n object used to construct the field cut, as of yt 3.0, this has been\n changed to 'obj'.\n\n Parameters\n ----------\n field_cuts : list of strings\n A list of conditionals that will be evaluated. In the namespace\n available, these conditionals will have access to 'obj' which is a\n data object of unknown shape, and they must generate a boolean array.\n For instance, conditionals = [\"obj['temperature'] < 1e3\"]\n field_parameters : dictionary\n A dictionary of field parameters to be used when applying the field\n cuts.\n\n Examples\n --------\n To find the total mass of hot gas with temperature greater than 10^6 K\n in your volume:\n\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> ad = ds.all_data()\n >>> cr = ad.cut_region([\"obj['temperature'] > 1e6\"])\n >>> print cr.quantities.total_quantity(\"cell_mass\").in_units('Msun')\n \"\"\"\n cr = self.ds.cut_region(self, field_cuts,\n field_parameters=field_parameters)\n return cr\n\n def extract_isocontours(self, field, value, filename = None,\n rescale = False, sample_values = None):\n r\"\"\"This identifies isocontours on a cell-by-cell basis, with no\n consideration of global connectedness, and returns the vertices of the\n Triangles in that isocontour.\n\n This function simply returns the vertices of all the triangles\n calculated by the `marching cubes\n <http://en.wikipedia.org/wiki/Marching_cubes>`_ algorithm; for more\n complex operations, such as identifying connected sets of cells above a\n given threshold, see the extract_connected_sets function. This is more\n useful for calculating, for instance, total isocontour area, or\n visualizing in an external program (such as `MeshLab\n <http://meshlab.sf.net>`_.)\n\n Parameters\n ----------\n field : string\n Any field that can be obtained in a data object. This is the field\n which will be isocontoured.\n value : float\n The value at which the isocontour should be calculated.\n filename : string, optional\n If supplied, this file will be filled with the vertices in .obj\n format. Suitable for loading into meshlab.\n rescale : bool, optional\n If true, the vertices will be rescaled within their min/max.\n sample_values : string, optional\n Any field whose value should be extracted at the center of each\n triangle.\n\n Returns\n -------\n verts : array of floats\n The array of vertices, x,y,z. Taken in threes, these are the\n triangle vertices.\n samples : array of floats\n If `sample_values` is specified, this will be returned and will\n contain the values of the field specified at the center of each\n triangle.\n\n Examples\n --------\n This will create a data object, find a nice value in the center, and\n output the vertices to \"triangles.obj\" after rescaling them.\n\n >>> dd = ds.all_data()\n >>> rho = dd.quantities[\"WeightedAverageQuantity\"](\n ... \"Density\", weight=\"CellMassMsun\")\n >>> verts = dd.extract_isocontours(\"Density\", rho,\n ... \"triangles.obj\", True)\n \"\"\"\n verts = []\n samples = []\n for block, mask in self.blocks:\n my_verts = self._extract_isocontours_from_grid(\n block, mask, field, value, sample_values)\n if sample_values is not None:\n my_verts, svals = my_verts\n samples.append(svals)\n verts.append(my_verts)\n verts = np.concatenate(verts).transpose()\n verts = self.comm.par_combine_object(verts, op='cat', datatype='array')\n verts = verts.transpose()\n if sample_values is not None:\n samples = np.concatenate(samples)\n samples = self.comm.par_combine_object(samples, op='cat',\n datatype='array')\n if rescale:\n mi = np.min(verts, axis=0)\n ma = np.max(verts, axis=0)\n verts = (verts - mi) / (ma - mi).max()\n if filename is not None and self.comm.rank == 0:\n if hasattr(filename, \"write\"): f = filename\n else: f = open(filename, \"w\")\n for v1 in verts:\n f.write(\"v %0.16e %0.16e %0.16e\\n\" % (v1[0], v1[1], v1[2]))\n for i in range(len(verts)//3):\n f.write(\"f %s %s %s\\n\" % (i*3+1, i*3+2, i*3+3))\n if not hasattr(filename, \"write\"): f.close()\n if sample_values is not None:\n return verts, samples\n return verts\n\n def _extract_isocontours_from_grid(self, grid, mask, field, value,\n sample_values=None):\n vc_fields = [field]\n if sample_values is not None:\n vc_fields.append(sample_values)\n\n vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False)\n try:\n svals = vc_data[sample_values]\n except KeyError:\n svals = None\n\n my_verts = march_cubes_grid(value, vc_data[field], mask,\n grid.LeftEdge, grid.dds, svals)\n return my_verts\n\n def calculate_isocontour_flux(self, field, value,\n field_x, field_y, field_z, fluxing_field = None):\n r\"\"\"This identifies isocontours on a cell-by-cell basis, with no\n consideration of global connectedness, and calculates the flux over\n those contours.\n\n This function will conduct `marching cubes\n <http://en.wikipedia.org/wiki/Marching_cubes>`_ on all the cells in a\n given data container (grid-by-grid), and then for each identified\n triangular segment of an isocontour in a given cell, calculate the\n gradient (i.e., normal) in the isocontoured field, interpolate the local\n value of the \"fluxing\" field, the area of the triangle, and then return:\n\n area * local_flux_value * (n dot v)\n\n Where area, local_value, and the vector v are interpolated at the barycenter\n (weighted by the vertex values) of the triangle. Note that this\n specifically allows for the field fluxing across the surface to be\n *different* from the field being contoured. If the fluxing_field is\n not specified, it is assumed to be 1.0 everywhere, and the raw flux\n with no local-weighting is returned.\n\n Additionally, the returned flux is defined as flux *into* the surface,\n not flux *out of* the surface.\n\n Parameters\n ----------\n field : string\n Any field that can be obtained in a data object. This is the field\n which will be isocontoured and used as the \"local_value\" in the\n flux equation.\n value : float\n The value at which the isocontour should be calculated.\n field_x : string\n The x-component field\n field_y : string\n The y-component field\n field_z : string\n The z-component field\n fluxing_field : string, optional\n The field whose passage over the surface is of interest. If not\n specified, assumed to be 1.0 everywhere.\n\n Returns\n -------\n flux : float\n The summed flux. Note that it is not currently scaled; this is\n simply the code-unit area times the fields.\n\n Examples\n --------\n This will create a data object, find a nice value in the center, and\n calculate the metal flux over it.\n\n >>> dd = ds.all_data()\n >>> rho = dd.quantities[\"WeightedAverageQuantity\"](\n ... \"Density\", weight=\"CellMassMsun\")\n >>> flux = dd.calculate_isocontour_flux(\"Density\", rho,\n ... \"velocity_x\", \"velocity_y\", \"velocity_z\", \"Metal_Density\")\n \"\"\"\n flux = 0.0\n for block, mask in self.blocks:\n flux += self._calculate_flux_in_grid(block, mask, field, value, field_x,\n field_y, field_z, fluxing_field)\n flux = self.comm.mpi_allreduce(flux, op=\"sum\")\n return flux\n\n def _calculate_flux_in_grid(self, grid, mask, field, value,\n field_x, field_y, field_z, fluxing_field = None):\n\n vc_fields = [field, field_x, field_y, field_z]\n if fluxing_field is not None:\n vc_fields.append(fluxing_field)\n\n vc_data = grid.get_vertex_centered_data(vc_fields)\n\n if fluxing_field is None:\n ff = np.ones_like(vc_data[field], dtype=\"float64\")\n else:\n ff = vc_data[fluxing_field]\n\n return march_cubes_grid_flux(value, vc_data[field], vc_data[field_x],\n vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,\n grid.dds)\n\n def extract_connected_sets(self, field, num_levels, min_val, max_val,\n log_space=True, cumulative=True):\n \"\"\"\n This function will create a set of contour objects, defined\n by having connected cell structures, which can then be\n studied and used to 'paint' their source grids, thus enabling\n them to be plotted.\n\n Note that this function *can* return a connected set object that has no\n member values.\n \"\"\"\n if log_space:\n cons = np.logspace(np.log10(min_val),np.log10(max_val),\n num_levels+1)\n else:\n cons = np.linspace(min_val, max_val, num_levels+1)\n contours = {}\n for level in range(num_levels):\n contours[level] = {}\n if cumulative:\n mv = max_val\n else:\n mv = cons[level+1]\n from yt.data_objects.level_sets.api import identify_contours\n from yt.data_objects.level_sets.clump_handling import \\\n add_contour_field\n nj, cids = identify_contours(self, field, cons[level], mv)\n unique_contours = set([])\n for sl_list in cids.values():\n for sl, ff in sl_list:\n unique_contours.update(np.unique(ff))\n contour_key = uuid.uuid4().hex\n # In case we're a cut region already...\n base_object = getattr(self, 'base_object', self)\n add_contour_field(base_object.ds, contour_key)\n for cid in sorted(unique_contours):\n if cid == -1: continue\n contours[level][cid] = base_object.cut_region(\n [\"obj['contours_%s'] == %s\" % (contour_key, cid)],\n {'contour_slices_%s' % contour_key: cids})\n return cons, contours\n\n\n\n def volume(self):\n \"\"\"\n Return the volume of the data container.\n This is found by adding up the volume of the cells with centers\n in the container, rather than using the geometric shape of\n the container, so this may vary very slightly\n from what might be expected from the geometric volume.\n \"\"\"\n return self.quantities.total_quantity((\"index\", \"cell_volume\"))\n\n\nclass YTBooleanContainer(YTSelectionContainer3D):\n \"\"\"\n This is a boolean operation, accepting AND, OR, XOR, and NOT for combining\n multiple data objects.\n\n This object is not designed to be created directly; it is designed to be\n created implicitly by using one of the bitwise operations (&, \\|, ^, \\~) on\n one or two other data objects. These correspond to the appropriate boolean\n operations, and the resultant object can be nested.\n\n Parameters\n ----------\n op : string\n Can be AND, OR, XOR, NOT or NEG.\n dobj1 : YTSelectionContainer\n The first selection object\n dobj2 : YTSelectionContainer\n The second object\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> sp = ds.sphere(\"c\", 0.1)\n >>> dd = ds.r[:,:,:]\n >>> new_obj = sp ^ dd\n >>> print(new_obj.sum(\"cell_volume\"), dd.sum(\"cell_volume\") -\n ... sp.sum(\"cell_volume\"))\n \"\"\"\n _type_name = \"bool\"\n _con_args = (\"op\", \"dobj1\", \"dobj2\")\n def __init__(self, op, dobj1, dobj2, ds = None, field_parameters = None,\n data_source = None):\n YTSelectionContainer3D.__init__(self, None, ds, field_parameters,\n data_source)\n self.op = op.upper()\n self.dobj1 = dobj1\n self.dobj2 = dobj2\n name = \"Boolean%sSelector\" % (self.op,)\n sel_cls = getattr(yt.geometry.selection_routines, name)\n self._selector = sel_cls(self)\n\n# Many of these items are set up specifically to ensure that\n# we are not breaking old pickle files. This means we must only call the\n# _reconstruct_object and that we cannot mandate any additional arguments to\n# the reconstruction function.\n#\n# In the future, this would be better off being set up to more directly\n# reference objects or retain state, perhaps with a context manager.\n#\n# One final detail: time series or multiple datasets in a single pickle\n# seems problematic.\n\nclass ReconstructedObject(tuple):\n pass\n\ndef _check_nested_args(arg, ref_ds):\n if not isinstance(arg, (tuple, list, ReconstructedObject)):\n return arg\n elif isinstance(arg, ReconstructedObject) and ref_ds == arg[0]:\n return arg[1]\n narg = [_check_nested_args(a, ref_ds) for a in arg]\n return narg\n\ndef _get_ds_by_hash(hash):\n from yt.data_objects.static_output import Dataset\n if isinstance(hash, Dataset):\n return hash\n from yt.data_objects.static_output import _cached_datasets\n for ds in _cached_datasets.values():\n if ds._hash() == hash: return ds\n return None\n\ndef _reconstruct_object(*args, **kwargs):\n dsid = args[0]\n dtype = args[1]\n ds = _get_ds_by_hash(dsid)\n if not ds:\n datasets = ParameterFileStore()\n ds = datasets.get_ds_hash(dsid)\n field_parameters = args[-1]\n # will be much nicer when we can do dsid, *a, fp = args\n args = args[2:-1]\n new_args = [_check_nested_args(a, ds) for a in args]\n cls = getattr(ds, dtype)\n obj = cls(*new_args)\n obj.field_parameters.update(field_parameters)\n return ReconstructedObject((ds, obj))\n"
]
| [
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.ones_like",
"numpy.empty",
"numpy.zeros",
"pandas.DataFrame",
"numpy.min",
"numpy.log10",
"numpy.linspace",
"numpy.unique"
]
]
|
MauroLuzzatto/algorithmic-explanations | [
"4a362ae9576cc68ecf4b61dd6bad2105ff62bf57"
]
| [
"src/model/ModelClass.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 24 21:29:13 2020\n\n@author: mauro\n\"\"\"\n\n\nimport datetime\nimport json\nimport os\nimport pickle\nimport random\n\nimport matplotlib.pyplot as plt # type: ignore\nimport pandas as pd # type: ignore\nimport sklearn # type: ignore\nfrom LoggerClass import LoggerClass\nfrom sklearn.base import is_classifier, is_regressor # type: ignore\nfrom sklearn.datasets import load_diabetes # type: ignore\nfrom sklearn.metrics import f1_score # type: ignore\nfrom sklearn.metrics import (\n accuracy_score,\n mean_absolute_error,\n mean_absolute_percentage_error,\n mean_squared_error,\n precision_score,\n r2_score,\n)\nfrom sklearn.model_selection import RandomizedSearchCV # type: ignore\nfrom sklearn.model_selection import train_test_split # type: ignore\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom xgboost import XGBRegressor # type: ignore\n\nfrom src.model.config import path_base\nfrom src.model.utils import create_folder, get_dataset\n\n\nclass ModelClass(object):\n \"\"\"\n This class provides the funktionality to train a model using\n a random grid search and evaluate the results\n \"\"\"\n\n def __init__(\n self,\n estimator: sklearn.base.BaseEstimator,\n X: pd.DataFrame,\n y: pd.DataFrame,\n path_model: str,\n folder: str = None,\n ) -> None:\n \"\"\"\n Initialize the class and setup the logger and define the paths to save the results to\n\n Args:\n estimator (sklearn.BaseEstimator): [description]\n X (pd.DataFrame): [description]\n y (pd.DataFrame): [description]\n path_model (str): [description]\n folder (str): add a folder extension in the save folder\n \"\"\"\n self.X = X.values\n self.y = y.values\n\n self.column_names = list(X)\n self.path_model = path_model\n self.estimator = estimator\n self.save_name = estimator.__class__.__name__\n\n self.folder = folder\n\n self.set_paths()\n\n Logger = LoggerClass()\n self.logger = Logger(self.path_save, stage=\"training\")\n\n self.get_train_test_split()\n\n def set_paths(self):\n \"\"\"\n Define the neceneeded paths for saving the results\n \"\"\"\n self.time_stamp = datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")\n\n if self.folder:\n folder_name = f\"{self.time_stamp} - {self.folder}\"\n else:\n folder_name = self.time_stamp\n\n self.path_model = create_folder(os.path.join(self.path_model, folder_name))\n self.path_save = create_folder(os.path.join(self.path_model, \"results\"))\n\n def get_train_test_split(\n self, test_size: float = 0.2, random_state: float = None\n ) -> None:\n \"\"\"\n Get the train and test split of the features and target values\n\n Args:\n test_size (float, optional): [description]. Defaults to 0.2.\n random_state ([type], optional): [description]. Defaults to None.\n \"\"\"\n\n if not random_state:\n random_state = random.randint(0, 1000)\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(\n self.X, self.y, random_state=random_state, test_size=test_size\n )\n\n self.logger.info(f\"self.X: {self.X.shape}\")\n self.logger.info(f\"self.y: {self.y.shape}\")\n self.logger.info(f\"self.X_train: {self.X_train.shape}\")\n self.logger.info(f\"self.y_train: {self.y_train.shape}\")\n self.logger.info(f\"self.X_test: {self.X_test.shape}\")\n self.logger.info(f\"self.y_test: {self.y_test.shape}\")\n self.logger.info(f\"test_size: {test_size}\")\n self.logger.info(f\"random_state: {test_size}\")\n self.logger.info(f\"column_names: {self.column_names}\")\n\n def hyperparamter_tuning(\n self, param_distributions: dict, cv_settings: dict\n ) -> None:\n \"\"\"\n Execute a random grid search using the distribution of hyperparemeter values\n and CV values\n\n Args:\n param_distributions (dict): dictionary with distribution of values per hyperparameter\n cv_settings (dict): dictionary CV settings\n \"\"\"\n random_search = self.build_CV_search(param_distributions, cv_settings)\n random_search.fit(self.X_train, self.y_train)\n self.get_CV_results(random_search, sort_by=\"rank_test_score\")\n\n self.best_estimator = random_search.best_estimator_\n self.best_params = random_search.best_params_\n\n self.save_parameters(param_distributions, \"param_distributions\")\n self.save_parameters(cv_settings, \"cv_settings\")\n\n def save_parameters(self, variable: dict, name: str) -> None:\n \"\"\"\n Save dictionary to json using the provided name\n \"\"\"\n with open(os.path.join(self.path_save, f\"{name}.json\"), \"w\") as fp:\n json.dump(variable, fp)\n\n def build_pipeline(self, estimator=None):\n \"\"\"\n Build the pipeline for processing the data before model training\n \"\"\"\n return Pipeline(\n steps=[\n (\"scale\", StandardScaler(with_mean=True, with_std=True)),\n (\"estimator\", estimator),\n ]\n )\n\n def build_CV_search(\n self, param_distributions: dict, param_cv: dict\n ) -> sklearn.model_selection.RandomizedSearchCV:\n \"\"\"\n Setup the random search cross validation object\n\n Args:\n param_distributions (dict): [description]\n param_cv (dict): [description]\n\n Returns:\n sklearn.RandomizedSearchCV: [description]\n \"\"\"\n random_search = RandomizedSearchCV(\n estimator=self.estimator,\n param_distributions=param_distributions,\n n_iter=param_cv[\"n_iter\"],\n scoring=param_cv[\"scoring\"],\n cv=param_cv[\"cv\"],\n return_train_score=False,\n n_jobs=param_cv[\"n_jobs\"],\n verbose=param_cv[\"verbose\"],\n random_state=param_cv[\"random_state\"],\n )\n\n return random_search\n\n def get_CV_results(\n self,\n random_search: sklearn.model_selection.RandomizedSearchCV,\n sort_by: str,\n ascending: bool = True,\n n_rows: int = 1000,\n ) -> None:\n \"\"\"\n Extract the results from the random search Cross Validation\n\n Args:\n random_search (sklearn.model_selection.RandomizedSearchCV): DESCRIPTION.\n sort_by (str): DESCRIPTION.\n ascending (bool, optional): DESCRIPTION. Defaults to True.\n n_rows (int, optional): DESCRIPTION. Defaults to 1000.\n (TYPE): DESCRIPTION.\n\n Returns:\n None: DESCRIPTION.\n\n \"\"\"\n df_results = (\n pd.DataFrame(random_search.cv_results_)\n .sort_values(by=sort_by, ascending=ascending)\n .head(n_rows)\n )\n\n df_results.to_csv(\n os.path.join(self.path_save, \"cv_results.csv\"),\n index=False,\n sep=\";\",\n float_format=\"%.3f\",\n )\n\n self.logger.info(f\"Training score: \\n{random_search.best_score_:.2f}\")\n self.logger.info(f\"Best hyperparameters: \\n{random_search.best_params_}\")\n\n def full_data_training(self) -> None:\n \"\"\"\n Train the model on the the full dataset and on the best hyperparameters\n\n Returns:\n None: DESCRIPTION.\n\n \"\"\"\n self.final_model = self.estimator.set_params(**self.best_params)\n self.final_model.fit(self.X, self.y)\n\n def evaluate(self) -> None:\n \"\"\"\n Evaluate the model with the best performing hyperparamters,\n use the test set to the metrics for the model\n\n Returns:\n None: DESCRIPTION.\n\n \"\"\"\n self.y_pred = self.best_estimator.predict(self.X_test)\n\n if is_regressor(self.estimator):\n methods = [\n r2_score,\n mean_absolute_error,\n mean_squared_error,\n mean_absolute_percentage_error,\n ]\n else:\n methods = [accuracy_score, precision_score, f1_score]\n\n results = {}\n for method in methods:\n score = method(self.y_test, self.y_pred)\n results[method.__name__] = score\n self.logger.info(f\"{method.__name__}: {score:.2f}\")\n\n pd.DataFrame(results, index=[self.time_stamp]).to_csv(\n os.path.join(self.path_save, \"best_score.csv\"), float_format=\"%.2f\", sep=\";\"\n )\n\n def save_pickle(self) -> None:\n \"\"\"\n save the estimator into a pickle file\n\n Returns:\n None: DESCRIPTION.\n\n \"\"\"\n name = f\"{self.save_name}.pickle\"\n with open(os.path.join(self.path_model, name), \"wb\") as handle:\n pickle.dump(self.final_model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n self.logger.info(f\"Save: {os.path.join(self.path_model, name)}\")\n\n def load_pickle(self, name: str) -> None:\n \"\"\"\n Load the estimator from a pickle file\n\n Args:\n name (str): DESCRIPTION.\n\n Returns:\n None: DESCRIPTION.\n\n \"\"\"\n assert name.endswith(\".pickle\")\n\n with open(os.path.join(self.path_model, name), \"rb\") as handle:\n estimator = pickle.load(handle)\n\n self.logger.info(f\"Load: {os.path.join(self.path_model, name)}\")\n return estimator\n\n def visualize(self, image_name: str = \"results.png\"):\n \"\"\"\n plot the predictions versus the true values\n\n Args:\n image_name (str, optional): DESCRIPTION. Defaults to \"results.png\".\n\n Returns:\n None.\n\n \"\"\"\n fig = plt.figure(figsize=(4, 5))\n # Plot Real vs Predict\n plt.scatter(self.y_pred, self.y_test, alpha=0.5)\n plt.xlabel(\"y_pred\")\n plt.ylabel(\"y_test\")\n plt.show(block=False)\n fig.savefig(os.path.join(self.path_save, image_name))\n\n def save_config(self, config):\n \"\"\"\n save the configurations of the dataset\n\n Args:\n config (TYPE): DESCRIPTION.\n\n Returns:\n None.\n\n \"\"\"\n with open(\n os.path.join(self.path_save, \"config.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(config, f, ensure_ascii=False, indent=4)\n\n def train(self, param_distributions, cv_settings, config):\n \"\"\"\n wrapper function to execute the full training process end-to-end,\n including hyperparameter tuning, evaluation, visualization\n and model saving\n\n Args:\n param_distributions (TYPE): DESCRIPTION.\n cv_settings (TYPE): DESCRIPTION.\n config (TYPE): DESCRIPTION.\n\n Returns:\n None.\n\n \"\"\"\n self.hyperparamter_tuning(param_distributions, cv_settings)\n self.evaluate()\n self.visualize()\n self.full_data_training()\n self.save_pickle()\n self.save_config(config)\n\n\nparam_distributions = {\n \"learning_rate\": [0.001, 0.01, 0.05, 0.1, 0.25, 0.5],\n \"max_depth\": [3, 5, 7, 9],\n \"min_child_weight\": [1, 3, 5, 10],\n \"subsample\": [0.5, 0.7, 0.8, 1.0],\n \"colsample_bytree\": [0.25, 0.5, 0.7, 1.0],\n \"n_estimators\": [100, 200],\n \"objective\": [\"reg:squarederror\"],\n}\n\ncv_settings = {\n \"n_iter\": 400, # total combinations testes\n \"scoring\": \"r2\",\n \"cv\": 4,\n \"random_state\": 0,\n \"n_jobs\": -1,\n \"verbose\": 3,\n}\n\nif __name__ == \"__main__\":\n\n path_load = os.path.join(path_base, r\"dataset\", \"training\")\n path_model = os.path.join(path_base, r\"model\")\n\n name = \"training_data_v2.csv\"\n\n source = \"training\"\n\n if source == \"demo\":\n diabetes = load_diabetes()\n X = diabetes.data\n y = diabetes.target\n elif source == \"training\":\n X, y = get_dataset(\n path_load=path_load,\n name=name,\n )\n else:\n raise\n\n estimator = XGBRegressor()\n\n config = {\"target\": list(y)[0], \"features\": list(X)}\n\n model = ModelClass(estimator, X, y, path_model)\n model.train(param_distributions, cv_settings, config)\n"
]
| [
[
"sklearn.datasets.load_diabetes",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame",
"sklearn.base.is_regressor",
"sklearn.model_selection.RandomizedSearchCV",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter"
]
]
|
tkys/text-detection-ctpn | [
"b3f20a4255a871512e9bd59a97bb761f05d9be39"
]
| [
"utils/text_connector/text_proposal_connector_oriented.py"
]
| [
"# coding:utf-8\nimport numpy as np\n\nfrom utils.text_connector.text_proposal_graph_builder import TextProposalGraphBuilder\n\n\nclass TextProposalConnector:\n \"\"\"\n Connect text proposals into text lines\n \"\"\"\n\n def __init__(self):\n self.graph_builder = TextProposalGraphBuilder()\n\n def group_text_proposals(self, text_proposals, scores, im_size):\n graph = self.graph_builder.build_graph(text_proposals, scores, im_size)\n return graph.sub_graphs_connected()\n\n def fit_y(self, X, Y, x1, x2):\n len(X) != 0\n # if X only include one point, the function will get line y=Y[0]\n if np.sum(X == X[0]) == len(X):\n return Y[0], Y[0]\n p = np.poly1d(np.polyfit(X, Y, 1))\n return p(x1), p(x2)\n\n def get_text_lines(self, text_proposals, scores, im_size):\n \"\"\"\n text_proposals:boxes\n \n \"\"\"\n # tp=text proposal\n tp_groups = self.group_text_proposals(text_proposals, scores, im_size) # まず画像を作成し、どの小さなボックスで構成されるテキスト行を取得します\n\n text_lines = np.zeros((len(tp_groups), 8), np.float32)\n\n for index, tp_indices in enumerate(tp_groups):\n text_line_boxes = text_proposals[list(tp_indices)] # 各テキスト行のすべての小さなボックス\n X = (text_line_boxes[:, 0] + text_line_boxes[:, 2]) / 2 # 各小さなボックスの中心のx、y座標を見つける\n Y = (text_line_boxes[:, 1] + text_line_boxes[:, 3]) / 2\n\n z1 = np.polyfit(X, Y, 1) # 多項式フィッティング,以前に求めた中心点に基づいて直線(最小二乗)を当てはめる\n\n x0 = np.min(text_line_boxes[:, 0]) # テキスト行x座標の最小値\n x1 = np.max(text_line_boxes[:, 2]) # テキスト行のx座標の最大値\n\n offset = (text_line_boxes[0, 2] - text_line_boxes[0, 0]) * 0.5 # boxの半分の幅\n\n # すべての小さなボックスの左上隅の点に線を合わせてから、テキスト行のx座標の左端と右端のy座標を計算します。\n lt_y, rt_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0 + offset, x1 - offset)\n # すべての小さなボックスの左下隅に線を合わせてから、テキスト行のx座標の左端と右端のy座標を計算します。\n lb_y, rb_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0 + offset, x1 - offset)\n\n score = scores[list(tp_indices)].sum() / float(len(tp_indices)) # すべての小さなボックススコアの平均をテキスト行の平均として見つける\n\n text_lines[index, 0] = x0\n text_lines[index, 1] = min(lt_y, rt_y) # テキスト行の上部にあるラインセグメントのy座標の小さな値\n text_lines[index, 2] = x1\n text_lines[index, 3] = max(lb_y, rb_y) # テキスト行の下部にあるラインセグメントのy座標の大きな値\n text_lines[index, 4] = score # テキスト行スコア\n text_lines[index, 5] = z1[0] # 中心点に従って適合された線のk、b\n text_lines[index, 6] = z1[1]\n height = np.mean((text_line_boxes[:, 3] - text_line_boxes[:, 1])) # 小さなフレーム平均高さ\n text_lines[index, 7] = height + 2.5\n\n text_recs = np.zeros((len(text_lines), 9), np.float)\n index = 0\n for line in text_lines:\n b1 = line[6] - line[7] / 2 # テキスト行の高さと中心線に応じて、テキスト行の上下の行のb値を見つけます\n b2 = line[6] + line[7] / 2\n x1 = line[0]\n y1 = line[5] * line[0] + b1 # 左上\n x2 = line[2]\n y2 = line[5] * line[2] + b1 # 右上\n x3 = line[0]\n y3 = line[5] * line[0] + b2 # 左下\n x4 = line[2]\n y4 = line[5] * line[2] + b2 # 右下\n disX = x2 - x1\n disY = y2 - y1\n width = np.sqrt(disX * disX + disY * disY) # テキスト行幅\n\n fTmp0 = y3 - y1 # テキスト行の高さ\n fTmp1 = fTmp0 * disY / width\n x = np.fabs(fTmp1 * disX / width) # 補償する\n y = np.fabs(fTmp1 * disY / width)\n if line[5] < 0:\n x1 -= x\n y1 += y\n x4 += x\n y4 -= y\n else:\n x2 += x\n y2 += y\n x3 -= x\n y3 -= y\n text_recs[index, 0] = x1\n text_recs[index, 1] = y1\n text_recs[index, 2] = x2\n text_recs[index, 3] = y2\n text_recs[index, 4] = x4\n text_recs[index, 5] = y4\n text_recs[index, 6] = x3\n text_recs[index, 7] = y3\n text_recs[index, 8] = line[4]\n index = index + 1\n\n return text_recs\n"
]
| [
[
"numpy.max",
"numpy.sum",
"numpy.min",
"numpy.mean",
"numpy.fabs",
"numpy.polyfit",
"numpy.sqrt"
]
]
|
assistent-cat/catotron-cpu | [
"a247b4197b14b7e173018c292fa07114792a4422"
]
| [
"waveglow/denoiser.py"
]
| [
"#import sys\n#sys.path.append('tacotron2')\n\nimport torch\n#from waveglow.tacotron2.layers import STFT\nfrom layers import STFT\n\n\nclass Denoiser(torch.nn.Module):\n \"\"\" Removes model bias from audio produced with waveglow \"\"\"\n\n def __init__(self, waveglow, filter_length=1024, n_overlap=4,\n win_length=1024, mode='zeros'):\n super(Denoiser, self).__init__()\n self.stft = STFT(filter_length=filter_length,\n hop_length=int(filter_length/n_overlap),\n win_length=win_length) #.cuda()\n if mode == 'zeros':\n mel_input = torch.zeros(\n (1, 80, 88),\n dtype=waveglow.upsample.weight.dtype,\n device=waveglow.upsample.weight.device)\n elif mode == 'normal':\n mel_input = torch.randn(\n (1, 80, 88),\n dtype=waveglow.upsample.weight.dtype,\n device=waveglow.upsample.weight.device)\n else:\n raise Exception(\"Mode {} if not supported\".format(mode))\n\n with torch.no_grad():\n bias_audio = waveglow.infer(mel_input, sigma=0.0).float()\n bias_spec, _ = self.stft.transform(bias_audio)\n\n self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])\n\n def forward(self, audio, strength=0.1):\n #audio_spec, audio_angles = self.stft.transform(audio.cuda().float())\n audio_spec, audio_angles = self.stft.transform(audio.float())\n audio_spec_denoised = audio_spec - self.bias_spec * strength\n audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)\n audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)\n return audio_denoised\n"
]
| [
[
"torch.zeros",
"torch.no_grad",
"torch.randn",
"torch.clamp"
]
]
|
tchayintr/bilstm-crf-topk-wordseg | [
"eeaa8efa50643d610203ea87e51221c70f0ba4c7"
]
| [
"src/models/util.py"
]
| [
"from enum import Enum, auto\nimport numpy as np\nimport sys\nimport torch\nimport torch.nn as nn\n\nfrom models.common import Embedding, GRU, RNNTanh, LSTM\n\n\nclass ModelUsage(Enum):\n NONE = auto()\n ADD = auto()\n CONCAT = auto()\n INIT = auto()\n\n def get_instance(key):\n if key.lower() == 'concat':\n return ModelUsage.CONCAT\n elif key.lower() == 'add':\n return ModelUsage.ADD\n elif key.lower() == 'init':\n return ModelUsage.INIT\n else:\n return ModelUsage.NONE\n\n\ndef construct_embeddings(n_vocab,\n rand_size,\n pretrained_size=0,\n usage=ModelUsage.INIT,\n ignore_label=None):\n if pretrained_size <= 0 or usage == ModelUsage.NONE:\n rand_embedding = nn.Embedding(n_vocab,\n rand_size,\n padding_idx=ignore_label)\n pretrained_embedding = None\n\n elif usage == ModelUsage.CONCAT or usage == ModelUsage.ADD:\n rand_embedding = nn.Embedding(n_vocab,\n rand_size,\n padding_idx=ignore_label)\n pretrained_embedding = nn.Embedding(n_vocab,\n pretrained_size,\n padding_idx=ignore_label)\n\n elif usage == ModelUsage.INIT:\n rand_embedding = nn.Embedding(n_vocab,\n pretrained_size,\n padding_idx=ignore_label)\n pretrained_embedding = None\n\n return rand_embedding, pretrained_embedding\n\n\ndef construct_RNN(\n unit_type,\n embed_size,\n hidden_size,\n n_layers,\n batch_first,\n dropout,\n bidirectional,\n):\n rnn = None\n\n if unit_type == 'lstm':\n rnn = LSTM(embed_size, hidden_size, n_layers, batch_first, dropout,\n bidirectional)\n\n elif unit_type == 'gru':\n rnn = GRU(embed_size, hidden_size, n_layers, batch_first, dropout,\n bidirectional)\n\n else:\n rnn = RNNTanh(\n embed_size,\n hidden_size,\n n_layers,\n batch_first,\n dropout,\n bidirectional,\n nonlinearity='tanh',\n )\n\n print('# RNN unit: {}'.format(rnn), file=sys.stderr)\n\n return rnn\n\n\ndef load_pretrained_embedding_layer(id2unigram,\n embed,\n external_model,\n finetuning=False):\n xp = torch\n n_vocab = len(id2unigram)\n size = external_model.wv.syn0[0].shape[0]\n\n weight = []\n count = 0\n\n for i in range(n_vocab):\n key = id2unigram[i]\n if key in external_model.wv.vocab:\n vec = external_model.wv[key]\n count += 1\n else:\n if finetuning:\n vec = nn.init.normal_(torch.zeros((size, ), dtype=torch.float))\n else:\n vec = xp.zeros(size, dtype=torch.float)\n weight.append(vec)\n\n weight = xp.reshape(weight, (n_vocab, size))\n embed.weight = torch.nn.Parameter(weight)\n\n if count >= 1:\n print('Use {} pretrained embedding vectors\\n'.format(count),\n file=sys.stderr)\n\n\ndef grow_embedding_layers(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n pretrained_embed=None,\n external_model=None,\n id2unigram_grown=None,\n pretrained_model_usage=ModelUsage.NONE,\n train=False,\n fasttext=False):\n if n_vocab_org == n_vocab_grown:\n return\n\n if external_model and pretrained_model_usage != ModelUsage.NONE:\n if pretrained_model_usage == ModelUsage.INIT:\n grow_embedding_layer_with_pretrained_model(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n external_model,\n id2unigram_grown,\n train=train,\n fasttext=fasttext)\n\n else:\n grow_embedding_layers_with_pretrained_model(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n pretrained_embed,\n external_model,\n id2unigram_grown,\n train=train)\n\n else:\n grow_embedding_layer_without_pretrained_model(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n train=train)\n\n\ndef grow_embedding_layer_without_pretrained_model(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n train=False):\n xp = torch\n device = rand_embed.weight.device\n diff = n_vocab_grown - n_vocab_org\n d_rand = rand_embed.weight.shape[1]\n\n if train:\n w2_rand = nn.init.normal_(\n xp.zeros((diff, d_rand), dtype=torch.float, device=device))\n else:\n w2_rand = xp.zeros((diff, d_rand), dtype=torch.float, device=device)\n\n w_rand = torch.cat((rand_embed.weight, w2_rand), axis=0)\n rand_embed.weight = torch.nn.Parameter(w_rand)\n print('Grow embedding matrix: {} -> {}'.format(n_vocab_org,\n rand_embed.weight.shape[0]),\n file=sys.stderr)\n\n\n# rand model -> grow using external model\ndef grow_embedding_layer_with_pretrained_model(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n external_model,\n id2unigram_grown,\n train=False,\n fasttext=False):\n diff = n_vocab_grown - n_vocab_org\n d_rand = rand_embed.weight.shape[1]\n\n count = 0\n w2_rand = []\n\n # [MEMO] the following error happened if fasttext=True:\n # cupy.cuda.cudnn.CuDNNError: CUDNN_STATUS_INTERNAL_ERROR: b'CUDNN_STATUS_INTERNAL_ERROR'\n wv_vocab = external_model.wv if fasttext else external_model.wv.vocab\n for i in range(n_vocab_org, n_vocab_grown):\n key = id2unigram_grown[i]\n if key in wv_vocab:\n vec_rand = torch.tensor(external_model.wv[key], dtype=torch.float)\n count += 1\n elif train:\n vec_rand = nn.init.normal_(\n torch.zeros((d_rand, ), dtype=torch.float))\n else:\n vec_rand = rand_embed.weight[\n 0].data # use pretrained vector of unknown token\n w2_rand.append(vec_rand)\n\n # w2_rand = np.reshape(w2_rand, (diff, d_rand))\n # if cuda.get_array_module(rand_embed.W) == cuda.cupy:\n # w2_rand = chainer.Variable(w2_rand)\n # w2_rand.to_gpu()\n w2_rand = torch.stack(w2_rand)\n assert w2_rand.size() == (diff, d_rand)\n\n w_rand = torch.cat((rand_embed.weight, w2_rand), axis=0)\n rand_embed.weight = torch.nn.Parameter(w_rand)\n\n print('Grow embedding matrix: {} -> {}'.format(n_vocab_org,\n rand_embed.weight.shape[0]),\n file=sys.stderr)\n if count >= 1:\n print('Add {} pretrained embedding vectors'.format(count),\n file=sys.stderr)\n\n\n# rand model -> grow\n# pretrained model -> grow using external model\ndef grow_embedding_layers_with_pretrained_model(n_vocab_org,\n n_vocab_grown,\n rand_embed,\n pretrained_embed,\n external_model,\n id2unigram_grown,\n train=False):\n diff = n_vocab_grown - n_vocab_org\n d_rand = rand_embed.weight.shape[1]\n d_pretrained = pretrained_embed.weight.shape[\n 1] # external_model.wv.syn0[0].shape[0]\n\n count = 0\n w2_rand = []\n w2_pretrained = []\n\n for i in range(n_vocab_org, n_vocab_grown):\n if train: # resume training\n vec_rand = nn.init.normal_(\n torch.zeros((d_rand, ), dtype=torch.float))\n else: # test\n vec_rand = rand_embed.weight[\n 0].data # use pretrained vector of unknown token\n w2_rand.append(vec_rand)\n\n key = id2unigram_grown[i]\n if key in external_model.wv.vocab:\n vec_pretrained = torch.tensor(external_model.wv[key],\n dtype=torch.float)\n count += 1\n else:\n vec_pretrained = torch.zeros(d_pretrained, dtype=torch.float)\n w2_pretrained.append(vec_pretrained)\n\n # w2_rand = np.reshape(w2_rand, (diff, d_rand))\n # if cuda.get_array_module(rand_embed.W) == cuda.cupy:\n # w2_rand = chainer.Variable(w2_rand)\n # w2_rand.to_gpu()\n w2_rand = torch.stack(w2_rand)\n assert w2_rand.size() == (diff, d_rand)\n w_rand = torch.cat((rand_embed.weight, w2_rand), axis=0)\n rand_embed.weight = torch.nn.Parameter(w_rand)\n\n # w2_pretrained = np.reshape(w2_pretrained, (diff, d_pretrained))\n # if cuda.get_array_module(rand_embed.W) == cuda.cupy:\n # w2_pretrained = chainer.Variable(w2_pretrained)\n # w2_pretrained.to_gpu()\n w2_pretrained = torch.stack(w2_pretrained)\n assert w2_pretrained.size() == (diff, d_pretrained)\n assert w2_rand.size() == (diff, d_rand)\n w_pretrained = torch.cat((pretrained_embed.weight, w2_pretrained), 0)\n pretrained_embed.weight = torch.nn.Parameter(w_pretrained)\n\n print('Grow embedding matrix: {} -> {}'.format(n_vocab_org,\n rand_embed.weight.shape[0]),\n file=sys.stderr)\n print('Grow pretrained embedding matrix: {} -> {}'.format(\n n_vocab_org, pretrained_embed.weight.shape[0]),\n file=sys.stderr)\n if count >= 1:\n print('Add {} pretrained embedding vectors'.format(count),\n file=sys.stderr)\n\n\ndef grow_crf_layer(n_labels_org, n_labels_grown, crf, file=sys.stderr):\n diff = n_labels_grown - n_labels_org\n if diff <= 0:\n return\n\n c_org = crf.cost\n c_diff1 = torch.tensor(np.zeros((n_labels_org, diff), dtype=np.float32))\n c_diff2 = torch.tensor(np.zeros((diff, n_labels_grown), dtype=np.float32))\n c_tmp = torch.cat((c_org, c_diff1), 1)\n c_new = torch.cat((c_tmp, c_diff2), 0)\n crf.cost = torch.nn.Parameter(c_new)\n\n print('Grow CRF layer: {} -> {}'.format(c_org.shape,\n crf.cost.shape,\n file=sys.stderr))\n\n\ndef grow_MLP(n_labels_org, n_labels_grown, out_layer, file=sys.stderr):\n diff = n_labels_grown - n_labels_org\n if diff <= 0:\n return\n\n w_org = out_layer.weight\n w_org_shape = w_org.shape\n\n size = w_org.shape[1]\n w_diff_array = np.zeros((diff, size), dtype=np.float32)\n w_diff_array[:] = np.random.normal(scale=1.0, size=(diff, size))\n w_diff = torch.nn.Parameter(w_diff_array)\n w_new = torch.cat((w_org, w_diff), 0)\n out_layer.weight = torch.nn.Parameter(w_new)\n w_shape = out_layer.weight.shape\n\n if 'b' in out_layer.__dict__:\n b_org = out_layer.b\n b_org_shape = b_org.shape\n b_diff_array = np.zeros((diff, ), dtype=np.float32)\n b_diff_array[:] = np.random.normal(scale=1.0, size=(diff, ))\n b_diff = torch.nn.Parameter(b_diff_array)\n b_new = torch.cat((b_org, b_diff), 0)\n out_layer.b = torch.nn.Parameter(b_new)\n b_shape = out_layer.b.shape\n else:\n b_org_shape = b_shape = None\n\n print('Grow MLP output layer: {}, {} -> {}, {}'.format(\n w_org_shape, b_org_shape, w_shape, b_shape),\n file=sys.stderr)\n\n\ndef grow_biaffine_layer(n_labels_org,\n n_labels_grown,\n biaffine,\n file=sys.stderr):\n diff = n_labels_grown - n_labels_org\n if diff <= 0:\n return\n\n w_org = biaffine.weight\n w_org_shape = w_org.shape\n\n size = w_org.shape[1]\n\n w_diff_array = np.zeros((diff, size), dtype=np.float32)\n w_diff_array[:] = np.random.normal(scale=1.0, size=(diff, size))\n w_diff = torch.nn.Parameter(w_diff_array)\n w_new = torch.cat((w_org, w_diff), 0)\n biaffine.weight = torch.nn.Parameter(w_new)\n w_shape = biaffine.weight.shape\n\n if 'b' in biaffine.__dict__:\n b_org = biaffine.b\n b_org_shape = b_org.shape\n b_diff_array = np.zeros((diff, ), dtype=np.float32)\n b_diff_array[:] = np.random.normal(scale=1.0, size=(diff, ))\n b_diff = torch.nn.Parameter(b_diff_array)\n b_new = torch.cat((b_org, b_diff), 0)\n biaffine.b = torch.nn.Parameter(b_new)\n b_shape = biaffine.b.shape\n else:\n b_org_shape = b_shape = None\n\n print('Grow biaffine layer: {}, {} -> {}, {}'.format(\n w_org_shape, b_org_shape, w_shape, b_shape),\n file=sys.stderr)\n\n\ndef inverse_indices(indices):\n indices = indices.cpu().numpy()\n r = np.empty_like(indices)\n r[indices] = np.arange(len(indices))\n return r\n"
]
| [
[
"torch.zeros",
"numpy.random.normal",
"torch.cat",
"torch.stack",
"numpy.zeros",
"torch.nn.Parameter",
"torch.tensor",
"torch.nn.Embedding",
"numpy.empty_like"
]
]
|
dlitvak/sdcnd-term1-proj3-behavioral-cloning | [
"b57c0a8f18af4ac484cf95d1e73d9e2c889ebfaa"
]
| [
"utils.py"
]
| [
"import csv\nimport os\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.image as mpimg\n\n\ndef read_data(dir=\"data\", csv_file=\"driving_log.csv\", field_names=(\"center\",\"left\",\"right\",\"steering\")):\n \"\"\"Read data from csv_file per field_names columns. Obtain the image size.\"\"\"\n data = []\n with open(os.path.join(dir, csv_file)) as f:\n csvReader = csv.DictReader(f)\n for row in csvReader:\n data.append(list(row[k] for k in field_names))\n\n X, y = [], []\n img_shape = None\n for row in data:\n centerImgUrl = row[0].strip()\n X.append(os.path.join(dir, centerImgUrl))\n steering = row[3]\n y.append(steering)\n\n # leftImgUrl = row[1].strip()\n # X.append(os.path.join(dir, leftImgUrl))\n # y.append(float(steering) - 0.2)\n #\n # rightImgUrl = row[2].strip()\n # X.append(os.path.join(dir, rightImgUrl))\n # y.append(float(steering) + 0.2)\n\n if img_shape is None:\n im = mpimg.imread(os.path.join(dir, centerImgUrl), format=\"RGB\")\n img_shape = im.shape\n\n return np.array(X), np.array(y), img_shape\n\n\ndef resize_images_in_dir(dir=\"data\", img_dir=\"IMG\"):\n \"\"\"Function used to resize all the images in dir once.\"\"\"\n os.chdir(dir)\n orig_dir = img_dir + \"_orig\"\n os.rename(img_dir, orig_dir)\n os.mkdir(img_dir)\n\n imgs = os.listdir(orig_dir)\n for img_name in imgs:\n if not img_name.endswith(\"jpg\"):\n continue\n\n img = Image.open(os.path.join(orig_dir, img_name))\n img.thumbnail((img.size[0] / 2, img.size[1] / 2), Image.ANTIALIAS) # resizes image in-place\n resized_img = np.asarray(img, dtype=np.uint8)\n mpimg.imsave(os.path.join(img_dir, img_name), resized_img)\n\n os.chdir(\"..\")\n\n# resize_images_in_dir(dir=\"data\")\n# resize_images_in_dir(dir=\"col_data2\")\n# resize_images_in_dir(dir=\"col_data4_rev\")\n# resize_images_in_dir(dir=\"col_data3\")\n\n# resize_images_in_dir(dir=\"2nd_track1\")"
]
| [
[
"numpy.array",
"numpy.asarray"
]
]
|
kishwarshafin/signalAlign | [
"c9b7b9232ef6fb76aa427670981c969b887f4860",
"c9b7b9232ef6fb76aa427670981c969b887f4860"
]
| [
"src/signalalign/visualization/compare_trained_models.py",
"src/signalalign/fast5.py"
]
| [
"#!/usr/bin/env python\n\"\"\"Compare multiple hdp and ont trained models\"\"\"\n########################################################################\n# File: compare_trained_models.py\n# executable: compare_trained_models.py\n#\n# Author: Andrew Bailey\n# History: 01/24/18 Created\n########################################################################\n\nimport os\nimport numpy as np\nimport csv\nimport matplotlib as mpl\nimport platform\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nif platform.system() == \"Darwin\":\n mpl.use(\"macosx\")\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nfrom argparse import ArgumentParser\nfrom itertools import zip_longest\nimport itertools\nfrom scipy.stats import norm\nfrom sklearn.neighbors import KernelDensity\nfrom scipy.stats import norm, invgauss, entropy\nfrom scipy.spatial.distance import euclidean\n\nfrom py3helpers.utils import load_json, create_dot_dict, save_json\nfrom signalalign.hiddenMarkovModel import HmmModel, parse_assignment_file, parse_alignment_file, hellinger2\n\n\ndef parse_args():\n parser = ArgumentParser(description=__doc__)\n # required arguments\n parser.add_argument('--config', '-c', action='store',\n dest='config', required=True, type=str, default=None,\n help=\"Path to json config file\")\n\n args = parser.parse_args()\n return args\n\n\nclass MultipleModelHandler(object):\n\n def __init__(self, models, strands, assignment_data=None, savefig_dir=None):\n assert len(models) == len(strands), \"Must have strand with each model. models = {} :: strands = {}\".format(\n models, strands)\n if savefig_dir is not None:\n assert os.path.isdir(savefig_dir), \"savefig_dir must be a directory. {}\".format(savefig_dir)\n self.models = models\n self.assignment_data = assignment_data\n if self.assignment_data is None:\n self.assignment_data = [None]\n self.strands = strands\n self.savefig_dir = savefig_dir\n\n def plot_kmer_distribution(self, kmer_list_list):\n \"\"\"Plot multiple kmer distribution onto a single plot with ONT and/or HDP distributions\n :param kmer_list_list: list of kmers for plotting each model\n \"\"\"\n if self.savefig_dir:\n assert os.path.exists(self.savefig_dir), \"Save figure directory does not exist: {}\".format(self.savefig_dir)\n assert len(kmer_list_list) == len(self.models), \\\n \"Must have same number of kmer lists: {} and models: {}\".format(len(kmer_list_list), len(self.models))\n # keep track of handles and text depending on which models are loaded\n handles1 = []\n legend_text1 = []\n handles2 = []\n legend_text2 = []\n plt.figure(figsize=(20, 9))\n panel1 = plt.axes([0.1, 0.5, .8, .45])\n panel1.set_xlabel('pA')\n panel1.set_ylabel('Density')\n panel1.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)\n panel1.xaxis.set_major_locator(ticker.AutoLocator())\n panel1.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n min_x = 1000\n max_x = 0\n titles = []\n colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']\n markers = [\"+\", '^', 'o', '1', 's']\n marker_index = 0\n color_index = 0\n\n for kmer, model, model_assignment_data, strand in zip_longest(kmer_list_list, self.models,\n self.assignment_data, self.strands):\n if kmer is not None:\n nuc_type = \"RNA\" if model.rna else \"DNA\"\n strand = \"t\" if strand is None else strand\n name = \"_\".join([model.name, nuc_type, strand, kmer])\n normal_mean, normal_sd = model.get_event_mean_gaussian_parameters(kmer)\n\n tmp_min_x = normal_mean - (5 * normal_sd)\n tmp_max_x = normal_mean + (5 * normal_sd)\n if min_x > tmp_min_x:\n min_x = tmp_min_x\n if max_x < tmp_max_x:\n max_x = tmp_max_x\n\n # plot ont normal distribution\n x = np.linspace(normal_mean - 4 * normal_sd, normal_mean + 4 * normal_sd, 200)\n ont_handle, = panel1.plot(x, norm.pdf(x, normal_mean, normal_sd), label=kmer, color=colors[color_index])\n color_index += 1\n if color_index > 6:\n color_index = 0\n # panel1.plot([normal_mean, normal_mean], [0, norm.pdf(normal_mean, normal_mean, normal_sd)], lw=2)\n ont_model_name = os.path.basename(model.ont_model_file)\n txt_handle1, = panel1.plot([], [], ' ')\n txt_handle2, = panel1.plot([], [], ' ')\n\n handles1.append(ont_handle)\n legend_text1.append(\"{} ONT Normal\".format(name))\n\n handles2.extend([txt_handle1, txt_handle2])\n print(\"{} ONT Model: {}\".format(name, ont_model_name))\n print(\"{} ONT Event Mean: {}\".format(name, normal_mean))\n print(\"{} ONT Event SD: {}\".format(name, normal_sd))\n legend_text2.extend([\"{} ONT Model: {}\".format(name, ont_model_name),\n \"{} ONT Event Mean: {}\".format(name, normal_mean)])\n\n if model.has_hdp_model:\n # plot HDP predicted distribution\n kmer_id = model.get_kmer_index(kmer)\n x = model.linspace\n if min_x > min(x):\n min_x = min(x)\n if max_x < max(x):\n max_x = max(x)\n hdp_y = model.all_posterior_pred[kmer_id]\n if len(hdp_y) == len(x):\n hdp_handle, = panel1.plot(x, hdp_y, '--', color=colors[color_index])\n color_index += 1\n if color_index > 6:\n color_index = 0\n\n handles1.append(hdp_handle)\n legend_text1.append(\"{} HDP Distribution\".format(name))\n\n if model.has_nanopolish_model:\n # plot HDP predicted distribution\n normal_mean, normal_sd = model.get_event_mean_gaussian_parameters(kmer, nanopolish=True)\n\n tmp_min_x = normal_mean - (5 * normal_sd)\n tmp_max_x = normal_mean + (5 * normal_sd)\n if min_x > tmp_min_x:\n min_x = tmp_min_x\n if max_x < tmp_max_x:\n max_x = tmp_max_x\n\n # plot ont normal distribution\n x = np.linspace(normal_mean - 4 * normal_sd, normal_mean + 4 * normal_sd, 200)\n nanopolish_handle, = panel1.plot(x, norm.pdf(x, normal_mean, normal_sd), label=kmer, color=colors[color_index])\n color_index += 1\n if color_index > 6:\n color_index = 0\n # panel1.plot([normal_mean, normal_mean], [0, norm.pdf(normal_mean, normal_mean, normal_sd)], lw=2)\n nanopolish_model_name = os.path.basename(model.nanopolish_model_file)\n txt_handle1, = panel1.plot([], [], ' ')\n txt_handle2, = panel1.plot([], [], ' ')\n\n handles1.append(nanopolish_handle)\n legend_text1.append(\"{} Nanopolish Normal\".format(name))\n\n handles2.extend([txt_handle1, txt_handle2])\n print(\"{} Nanopolish Model: {}\".format(name, nanopolish_model_name))\n print(\"{} Nanopolish Event Mean: {}\".format(name, normal_mean))\n print(\"{} Nanopolish Event SD: {}\".format(name, normal_sd))\n legend_text2.extend([\"{} Nanopolish Model: {}\".format(name, nanopolish_model_name),\n \"{} Nanopolish Event Mean: {}\".format(name, normal_mean)])\n\n if model_assignment_data is not None:\n kmer_assignments = model_assignment_data.loc[model_assignment_data['kmer'] == kmer]\n kmer_assignments = kmer_assignments.loc[kmer_assignments['strand'] == strand]\n kmer_data = kmer_assignments[\"level_mean\"]\n kmer_prob = kmer_assignments[\"prob\"]\n # get event means and linspace in correct format\n x = np.asarray(kmer_data).reshape(len(kmer_data), 1)\n alphas = np.asarray(kmer_prob).reshape(len(kmer_prob), 1)\n x_plot = model.linspace[:, np.newaxis]\n rgba_colors = np.zeros((len(kmer_data), 4))\n # for red the first column needs to be one\n if 0 < len(titles) < 4:\n rgba_colors[:, len(titles)] = 1.0\n\n # the fourth column needs to be your alphas\n rgba_colors[:, 3] = alphas[:, 0]\n\n # get estimate for data\n if len(kmer_data) > 0:\n\n kde = KernelDensity(kernel=\"gaussian\", bandwidth=0.5).fit(x)\n # estimate across the linspace\n log_dens = kde.score_samples(x_plot)\n kde_handle, = panel1.plot(x_plot[:, 0], np.exp(log_dens), '-')\n raw_data_handle = panel1.scatter(x[:, 0], -0.005 - 0.01 * np.random.random(x.shape[0]),\n marker=markers[marker_index],\n c=rgba_colors)\n marker_index += 1\n if marker_index > 4:\n marker_index = 0\n\n # add to legend\n handles1.extend([kde_handle, raw_data_handle])\n legend_text1.extend([\"Gaussian KDE Estimate: {}\".format(name),\n \"Event Means: {} points\\nProb: mu: {}, sd:{}\".format(len(kmer_data),\n np.mean(alphas[:, 0]),\n np.std(\n alphas[:, 0]))])\n\n else:\n print(\"{} not found in alignment file\".format(kmer))\n\n titles.append(name)\n # create legend\n first_legend = panel1.legend(handles1, legend_text1, bbox_to_anchor=(-0.1, -0.1), loc='upper left')\n ax = plt.gca().add_artist(first_legend)\n\n panel1.legend(handles2, legend_text2, bbox_to_anchor=(0.5, -.1), loc='upper left')\n\n panel1.set_xlim(min_x, max_x)\n panel1.set_title(\"Kmer distribution comparisons\")\n\n # option to save figure or just show it\n if self.savefig_dir:\n base_name = \"-\".join(titles)\n name = \"{}.png\".format(base_name)\n out_path = os.path.join(self.savefig_dir, name)\n plt.savefig(out_path)\n else:\n plt.show()\n\n def plot_all_model_comparisons(self, write_log_file=True):\n \"\"\"Plot every comparison between each model\"\"\"\n plt.figure(figsize=(10, 8))\n panel1 = plt.axes([0.1, 0.08, .85, .2])\n panel1.set_title(\"Kullback–Leibler Divergence between distributions\", x=0.5, y=1.0)\n panel1.set_xlabel('KL Divergence Distance')\n panel1.set_ylabel('Count')\n panel1.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)\n\n panel2 = plt.axes([0.1, 0.4, .85, .2])\n panel2.set_title(\"Hellinger Distance between distributions\")\n panel2.set_xlabel('Hellinger Distance')\n panel2.set_ylabel('Count')\n panel2.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)\n\n panel3 = plt.axes([0.1, 0.72, .85, .2])\n panel3.set_title(\"abs(Median Delta) between distributions\")\n panel3.set_xlabel('abs(Median Delta)')\n panel3.set_ylabel('Count')\n panel3.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)\n\n all_hellinger_distances = []\n all_kl_divergences = []\n all_median_deltas = []\n\n for model_pair in itertools.combinations(self.models, 2):\n hellinger_distances, kl_divergences, median_deltas = \\\n self.compare_distributions_between_models(model_pair[0], model_pair[1])\n\n if write_log_file and self.savefig_dir:\n kmers = self.get_overlap_kmers(model_pair[0], model_pair[1])\n model_names = \"{}_{}\".format(model_pair[0].name, model_pair[1].name)\n hellinger_outpath = os.path.join(self.savefig_dir,\n \"{}_{}\".format(model_names, \"kl_hellinger_delta_distances.tsv\"))\n # write kmer_differences\n self.write_kmer_distribution_comparison_logfile(kmers, kl_divergences, hellinger_distances,\n median_deltas, outfile=hellinger_outpath)\n\n kl_divergences = [x for x in kl_divergences if x is not None if x > 0]\n hellinger_distances = [x for x in hellinger_distances if x > 0]\n median_deltas = [x for x in median_deltas if x > 0]\n\n if len(hellinger_distances) > 0:\n all_hellinger_distances.append(hellinger_distances)\n else:\n all_hellinger_distances.append([0])\n\n if len(kl_divergences) > 0:\n all_kl_divergences.append(kl_divergences)\n else:\n all_kl_divergences.append([0])\n\n if len(median_deltas) > 0:\n all_median_deltas.append(median_deltas)\n else:\n all_median_deltas.append([0])\n\n max_hellinger = max([max(x) for x in all_hellinger_distances])\n max_kl = max([max(x) for x in all_kl_divergences])\n max_delta = max([max(x) for x in all_median_deltas])\n panel1_bins = np.linspace(0, max_kl, num=30)\n panel2_bins = np.linspace(0, max_hellinger, num=30)\n panel3_bins = np.linspace(0, max_delta, num=30)\n\n for i, model_pair in enumerate(itertools.combinations(self.models, 2)):\n n_kmers = len(self.get_overlap_kmers(model_pair[0], model_pair[1]))\n panel1.hist(all_kl_divergences[i], bins=panel1_bins,\n label=\"KL divergences: {} vs {} | {}/{}\".format(model_pair[0].name,\n model_pair[1].name, len(all_kl_divergences[i]),\n n_kmers), alpha=0.6)\n panel2.hist(all_hellinger_distances[i], bins=panel2_bins,\n label=\"Hellinger distances: {} vs {} | {}/{}\".format(model_pair[0].name,\n model_pair[1].name,\n len(all_hellinger_distances[i]),\n n_kmers), alpha=0.6)\n panel3.hist(all_median_deltas[i], bins=panel3_bins,\n label=\"Median Deltas: {} vs {} | {}/{}\".format(model_pair[0].name,\n model_pair[1].name, len(all_median_deltas[i]),\n n_kmers), alpha=0.6)\n\n panel1.legend(loc='upper right', fancybox=True, shadow=True)\n panel2.legend(loc='upper right', fancybox=True, shadow=True)\n panel3.legend(loc='upper right', fancybox=True, shadow=True)\n\n if self.savefig_dir:\n plt.savefig(os.path.join(self.savefig_dir, \"model_comparisons.png\"))\n else:\n plt.show()\n\n @staticmethod\n def write_kmer_distribution_comparison_logfile(kmers, kl_divergences, hellinger_distances, median_deltas, outfile):\n \"\"\"Write a sorted by divergence tsv of kmers\"\"\"\n assert len(kmers) == len(kl_divergences), \\\n \"Number of kmers and divergences must match. \" \\\n \"n_kmers : {} != n_divergences: {}\".format(len(kmers), len(kl_divergences))\n assert len(kmers) == len(hellinger_distances), \\\n \"Number of kmers and hellinger_distances must match. n_kmers : \" \\\n \"{} != n_hellinger_distances: {}\".format(len(kmers), len(hellinger_distances))\n assert len(kmers) == len(median_deltas), \\\n \"Number of kmers and median_deltas must match. \" \\\n \"n_kmers : {} != n_median_deltas: {}\".format(len(kmers), len(median_deltas))\n\n zipped_kmers = [(k, d1, d2, d3) for k, d1, d2, d3 in\n zip(kmers, kl_divergences, hellinger_distances, median_deltas)\n if d1 is not None]\n\n zipped_kmers.sort(key=lambda x: x[1], reverse=True)\n none_zipped_kmers = [(k, d1, d2, d3) for k, d1, d2, d3 in\n zip(kmers, kl_divergences, hellinger_distances, median_deltas)\n if d1 is None]\n\n with open(outfile, 'w') as tsvfile:\n writer = csv.writer(tsvfile, delimiter='\\t')\n writer.writerows(zipped_kmers)\n writer.writerows(none_zipped_kmers)\n\n return outfile\n\n @staticmethod\n def read_kmer_distribution_comparison_logfile(infile):\n \"\"\"Read in kmer distribution comparison tsv logfile\"\"\"\n data = []\n with open(infile, newline='\\n') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t')\n for row in spamreader:\n # catch None's in tsv\n d1 = None if row[1] == '' else float(row[1])\n d2 = None if row[2] == '' else float(row[2])\n d3 = None if row[3] == '' else float(row[3])\n data.append([row[0], d1, d2, d3])\n return data\n\n def compare_distributions_between_models(self, model1, model2, hdp=True):\n \"\"\"Calculate hellinger divergence and kl divergence between the hdp or hmm model between two models.\"\"\"\n hellinger_distances = []\n kl_divergences = []\n median_deltas = []\n get_new_linspace = False\n if model1.has_hdp_model and model2.has_hdp_model and hdp:\n if np.array_equal(model1.linspace, model2.linspace):\n linspace = model1.linspace\n else:\n get_new_linspace = True\n linspace_min = max([model1.linspace[0], model2.linspace[0]])\n linspace_max = min([model1.linspace[-1], model2.linspace[-1]])\n linspace = np.linspace(linspace_min, linspace_max, 3000)\n elif model1.has_hdp_model:\n linspace = model1.linspace\n else:\n linspace = model2.linspace\n\n for kmer in self.get_overlap_kmers(model1, model2):\n # if statements used if the HDP model does not have information on the kmer distribution\n if hdp and model1.has_hdp_model:\n m1_dist = self.get_hdp_kmer_posterior_prediction(model1, kmer, linspace, get_new_linspace)\n if m1_dist is None:\n m1_dist = self.get_ont_kmer_posterior_prediction(model1, kmer, linspace)\n else:\n m1_dist = self.get_ont_kmer_posterior_prediction(model1, kmer, linspace)\n\n if hdp and model2.has_hdp_model:\n m2_dist = self.get_hdp_kmer_posterior_prediction(model2, kmer, linspace, get_new_linspace)\n if m2_dist is None:\n m2_dist = self.get_ont_kmer_posterior_prediction(model2, kmer, linspace)\n else:\n m2_dist = self.get_ont_kmer_posterior_prediction(model2, kmer, linspace)\n\n kl_divergences.append(self.get_kl_divergence(m1_dist, m2_dist))\n hellinger_distances.append(self.get_hellinger_distance(m1_dist, m2_dist))\n median_deltas.append(self.get_median_delta(m1_dist, m2_dist, linspace))\n\n return hellinger_distances, kl_divergences, median_deltas\n\n @staticmethod\n def get_overlap_kmers(model1, model2):\n \"\"\"Get the kmers that are in both models\n :param model1: HmmModel\n :param model2: HmmModel\n \"\"\"\n kmers = set(model1.sorted_kmer_tuple) & set(model2.sorted_kmer_tuple)\n if len(kmers) < len(model1.sorted_kmer_tuple) or len(kmers) < len(model1.sorted_kmer_tuple):\n print(\"[Warning] Not including kmers that do not exist in both models\")\n return kmers\n\n\n\n @staticmethod\n def get_hdp_kmer_posterior_prediction(model, kmer, linspace, get_new_linspace=False):\n \"\"\"For a given model, grab the posterior prediction distribution\"\"\"\n if model.has_hdp_model:\n if get_new_linspace:\n posterior_pred = model.get_new_linspace_hdp_probability_distribution(kmer, linspace)\n else:\n kmer_id = model.get_kmer_index(kmer)\n posterior_pred = model.all_posterior_pred[kmer_id]\n # print(\"[Kullback–Leibler divergence] No HDP data for {}\".format(kmer))\n if posterior_pred is None:\n return None\n elif len(posterior_pred) == 0:\n return None\n return posterior_pred\n else:\n return None\n\n @staticmethod\n def get_ont_kmer_posterior_prediction(model, kmer, linspace):\n \"\"\"For a given model, grab the posterior prediction distribution\"\"\"\n # print(\"[Kullback–Leibler divergence] No HDP data for {}\".format(kmer))\n normal_mean, normal_sd = model.get_event_mean_gaussian_parameters(kmer)\n posterior_pred = norm.pdf(linspace, normal_mean, normal_sd)\n\n return posterior_pred\n\n @staticmethod\n def get_kl_divergence(dist1, dist2):\n \"\"\"Get Kullback–Leibler divergence between the HDP and ONT models for a specific kmer\"\"\"\n if min(dist1) == 0:\n dist1[dist1 == 0] = 0.000001\n # np.nextafter(0, 1)\n if min(dist2) == 0:\n dist2[dist2 == 0] = 0.000001\n\n kl_divergence = entropy(pk=dist1, qk=dist2, base=2)\n if kl_divergence == np.inf:\n # print(\"[Kullback–Leibler divergence] Zero probability for {}\".format(kmer))\n return None\n return kl_divergence\n\n @staticmethod\n def get_hellinger_distance(dist1, dist2):\n \"\"\"Get Hellinger distance between the HDP and ONT models for a specific kmer\"\"\"\n h_distance = hellinger2(p=dist1, q=dist2)\n return h_distance\n\n @staticmethod\n def get_median_delta(dist1, dist2, linspace):\n \"\"\"Calculate the difference between the max value of HDP and ONT kmer distributions\"\"\"\n dist1 = list(dist1)\n dist2 = list(dist2)\n delta = linspace[dist1.index(max(dist1))] - linspace[dist2.index(max(dist2))]\n return abs(delta)\n\n\ndef main(config=None):\n if config is None:\n args = parse_args()\n # load model files\n assert os.path.exists(args.config), \"Config file does not exist: {}\".format(args.config)\n config = load_json(args.config)\n\n args = create_dot_dict(config)\n # load model files\n models = []\n kmer_lists = []\n assignment_data = []\n strands = []\n max_plots = 0\n # create models and grab kmer lists\n for model in args.models:\n models.append(HmmModel(ont_model_file=model.ont_model,\n hdp_model_file=model.hdp_model,\n nanopolish_model_file=model.nanopolish_model,\n rna=model.rna,\n name=model.name))\n model_kmer_list = model.kmers\n n_kmers_to_plot = len(model_kmer_list)\n kmer_lists.append(model_kmer_list)\n max_plots = n_kmers_to_plot if n_kmers_to_plot > max_plots else max_plots\n\n if model.builtAlignment_tsv is not None:\n assert os.path.exists(model.builtAlignment_tsv), \\\n \"builtAlignment_tsv does not exist: {}\".format(model.builtAlignment_tsv)\n # read in both types of data\n try:\n assignment_data.append(parse_assignment_file(model.builtAlignment_tsv))\n except ValueError:\n assignment_data.append(parse_alignment_file(model.builtAlignment_tsv))\n else:\n assignment_data.append(None)\n strands.append(model.strand)\n\n mmh = MultipleModelHandler(models, strands=strands, assignment_data=assignment_data, savefig_dir=args.save_fig_dir)\n if args.summary_distance:\n mmh.plot_all_model_comparisons()\n # Start plotting\n for kmer_list in zip_longest(*kmer_lists):\n mmh.plot_kmer_distribution(kmer_list)\n\n if args.save_fig_dir:\n save_json(args, os.path.join(args.save_fig_dir, \"compare_trained_models_config.json\"))\n\n\nif __name__ == \"__main__\":\n main()\n raise SystemExit\n",
"#!/usr/bin/env python\n\"\"\"Fast5 python handler\"\"\"\n########################################################################\n# File: fast5.py\n# executable: fast5.py\n#\n# Authors: Most of the initial work for this class was completed by the people at Nanonet\n# https://github.com/nanoporetech/nanonet.\n#\n# Further additions done by Andrew Bailey\n#\n# History: 12/19/17\n########################################################################\n\nimport os\nimport sys\nfrom glob import glob\nimport subprocess\nimport shutil\nimport re\nimport h5py\nimport numpy as np\nimport numpy.lib.recfunctions as nprf\nfrom copy import deepcopy\nfrom py3helpers.utils import check_numpy_table\nfrom py3helpers.seq_tools import check_fastq_line\n\n\ndef short_names(fname):\n filename_short = os.path.splitext(os.path.basename(fname))[0]\n short_name_match = re.search(re.compile(r'ch\\d+_file\\d+'), filename_short)\n name_short = filename_short\n if short_name_match:\n name_short = short_name_match.group()\n return filename_short, name_short\n\n\nclass Fast5(h5py.File):\n \"\"\"Class for grabbing data from single read fast5 files. Many attributes/\n groups are assumed to exist currently (we're concerned mainly with reading).\n Needs some development to make robust and for writing.\n\n \"\"\"\n __base_analysis__ = '/Analyses'\n __event_detect_name__ = 'EventDetection'\n __default_event_path__ = 'Reads'\n __raw_path__ = '/Raw/Reads'\n __raw_name_old__ = 'RawData'\n __raw_path_old__ = '{}/{}/'.format(__base_analysis__, __raw_name_old__)\n __raw_signal_path_old__ = '{}/Signal'.format(__raw_path_old__)\n __raw_meta_path_old__ = '{}/Meta'.format(__raw_path_old__)\n __channel_meta_path__ = '/UniqueGlobalKey/channel_id'\n __tracking_id_path__ = 'UniqueGlobalKey/tracking_id'\n __context_tags_path__ = 'UniqueGlobalKey/context_tags'\n\n __default_basecall_2d_analysis__ = 'Basecall_2D'\n __default_basecall_1d_analysis__ = 'Basecall_1D'\n\n __default_seq_section__ = '2D'\n __default_basecall_fastq__ = 'BaseCalled_{}/Fastq'\n __default_basecall_1d_events__ = 'BaseCalled_{}/Events'\n __default_basecall_1d_model__ = 'BaseCalled_{}/Model'\n __default_basecall_1d_summary__ = 'Summary/basecall_1d_{}'\n __default_basecall_1d__ = 'BaseCalled_{}'\n\n __default_alignment_analysis__ = 'Alignment'\n\n __default_hairpin_split_analysis__ = 'Hairpin_Split'\n __template_section__ = 'template'\n __complement_section__ = 'complement'\n __default_section__ = __template_section__\n\n __default_mapping_analysis__ = 'Squiggle_Map'\n __default_mapping_events__ = 'SquiggleMapped_{}/Events'\n __default_mapping_model__ = 'SquiggleMapped_{}/Model'\n __default_mapping_summary__ = 'Summary/squiggle_map_{}'\n\n __default_substep_mapping_analysis__ = 'Substate_Map'\n __default_substep_mapping_events__ = '/Events'\n\n __default_basecall_mapping_analysis__ = 'AlignToRef'\n __default_basecall_mapping_events__ = 'CurrentSpaceMapped_{}/Events/'\n __default_basecall_mapping_summary__ = '/Summary/current_space_map_{}/' # under AlignToRef analysis\n __default_basecall_alignment_summary__ = '/Summary/genome_mapping_{}/' # under Alignment analysis\n\n #todo fix the form of these\n __default_corrected_genome__ = '/Analyses/RawGenomeCorrected_00{}/BaseCalled_template' # nanoraw\n __default_signalalign_events__ = '/Analyses/SignalAlign_00{}' # signalalign events\n __default_eventalign_events__ = '/Analyses/EventAlign_00{}'\n __default_template_1d_basecall_events__ = '/Analyses/Basecall_1D_00{}/BaseCalled_template/Events'\n\n __default_event_table_fields__ = ('start', 'length', 'mean', 'stdv')\n\n __default_engine_state_path__ = '/EngineStates/'\n __temp_fields__ = ('heatsink', 'asic')\n\n def __init__(self, fname, read='r'):\n super(Fast5, self).__init__(fname, read)\n\n # Attach channel_meta as attributes, slightly redundant\n for k, v in self[self.__channel_meta_path__].attrs.items():\n setattr(self, k, v)\n # Backward compat.\n self.sample_rate = self.sampling_rate\n\n self.filename_short, self.name_short = short_names(self.filename)\n\n @classmethod\n def New(cls, fname, read='a', tracking_id={}, context_tags={}, channel_id={}):\n \"\"\"Construct a fresh single-read file, with meta data written to\n standard locations.\n\n \"\"\"\n # TODO: tracking_id and channel_id checks, do we care for these?\n # simply copies the data \"verbatim\" (it doesn't copy the group\n # directly, but reconstructs it in a parallel layout). channel_id\n # is enough for most purposes I (cjw) think, if we take filenames\n # to be a key to filter by.\n req_fields = ['channel_number', 'offset', 'range', 'digitisation', 'sampling_rate']\n if not set(req_fields).issubset(set(channel_id.keys())):\n raise KeyError(\n 'channel_id does not contain required fields: {},\\ngot {}.'.format(req_fields, channel_id.keys())\n )\n\n # Start a new file, populate it with meta\n with h5py.File(fname, 'w') as h:\n h.attrs['file_version'] = 1.0\n for data, location in zip(\n [tracking_id, context_tags],\n [cls.__tracking_id_path__, cls.__context_tags_path__]\n ):\n # cjw: no idea why these must be str, just following ossetra\n cls.__add_attrs(h, data, location, convert=str)\n # These aren't forced to be str\n cls.__add_attrs(h, channel_id, cls.__channel_meta_path__)\n\n # return instance from new file\n return cls(fname, read, update=False)\n\n def _add_attrs(self, data, location, convert=None):\n \"\"\"Convenience method for adding attrs to a possibly new group.\n :param data: dict of attrs to add\n :param location: hdf path\n :param convert: function to apply to all dictionary values\n \"\"\"\n self.__add_attrs(self, data, location, convert=convert)\n\n @staticmethod\n def __add_attrs(self, data, location, convert=None):\n \"\"\"Implementation of _add_attrs as staticmethod. This allows\n functionality to be used in .New() constructor but is otherwise nasty!\n \"\"\"\n if location not in self:\n self.create_group(location)\n attrs = self[location].attrs\n for k, v in data.items():\n if convert is not None:\n attrs[k] = convert(v)\n else:\n attrs[k] = v\n\n def _add_string_dataset(self, data, location):\n assert type(data) == str, 'Need to supply a string'\n self.create_dataset(location, data=data)\n\n def _add_numpy_table(self, data, location):\n self.create_dataset(location, data=data, compression=True)\n\n def _add_event_table(self, data, location):\n if not isinstance(data, np.ndarray):\n raise TypeError('Table is not a ndarray.')\n\n req_fields = ['mean', 'stdv', 'start', 'length']\n if not set(req_fields).issubset(data.dtype.names):\n raise KeyError(\n 'Array does not contain fields for event array: {}, got {}.'.format(\n req_fields, data.dtype.names\n )\n )\n self._add_numpy_table(data, location)\n\n def _join_path(self, *args):\n return '/'.join(args)\n\n @staticmethod\n def bytes_to_string(string):\n \"\"\"Check string. If bytes, convert to string and return string\n\n :param string: string or bytes\n \"\"\"\n if string is None or type(string) == str:\n return string\n elif 'bytes' in str(type(string)):\n return string.decode()\n else:\n raise AssertionError(\"String needs to be bytes or string \")\n\n\n @property\n def writable(self):\n \"\"\"Can we write to the file.\"\"\"\n if self.mode is 'r':\n return False\n else:\n return True\n\n def assert_writable(self):\n assert self.writable, \"File not writable, opened with {}.\".format(self.mode)\n\n @property\n def channel_meta(self):\n \"\"\"Channel meta information as python dict\"\"\"\n return dict(self[self.__channel_meta_path__].attrs)\n\n @property\n def tracking_id(self):\n \"\"\"Tracking id meta information as python dict\"\"\"\n return dict(self[self.__tracking_id_path__].attrs)\n\n @property\n def raw_attributes(self):\n \"\"\"Attributes for a read, assumes one read in file\"\"\"\n return dict(self.get_read(group=True, raw=True).attrs)\n\n @property\n def event_attributes(self):\n \"\"\"Attributes for a read, assumes one read in file\"\"\"\n return dict(self.get_read(group=True).attrs)\n\n def summary(self, rename=True, delete=True, scale=True):\n \"\"\"A read summary, assumes one read in file\"\"\"\n to_rename = zip(\n ('start_mux', 'abasic_found', 'duration', 'median_before'),\n ('mux', 'abasic', 'strand_duration', 'pore_before')\n )\n to_delete = ('read_number', 'scaling_used')\n\n data = deepcopy(self.event_attributes)\n data['filename'] = os.path.basename(self.filename)\n data['run_id'] = self.tracking_id['run_id']\n data['channel'] = self.channel_meta['channel_number']\n if scale:\n data['duration'] /= self.channel_meta['sampling_rate']\n data['start_time'] /= self.channel_meta['sampling_rate']\n\n if rename:\n for i,j in to_rename:\n try:\n data[j] = data[i]\n del data[i]\n except KeyError:\n pass\n if delete:\n for i in to_delete:\n try:\n del data[i]\n except KeyError:\n pass\n\n for key in data:\n if isinstance(data[key], float):\n data[key] = np.round(data[key], 4)\n\n return data\n\n def strip_analyses(self, keep=('{}_000'.format(__event_detect_name__), __raw_path__)):\n \"\"\"Remove all analyses from file\n\n :param keep: whitelist of analysis groups to keep\n\n \"\"\"\n analyses = self[self.__base_analysis__]\n for name in analyses.keys():\n if name not in keep:\n del analyses[name]\n\n def repack(self):\n \"\"\"Run h5repack on the current file. Returns a fresh object.\"\"\"\n path = os.path.abspath(self.filename)\n path_tmp = '{}.tmp'.format(path)\n mode = self.mode\n self.close()\n subprocess.check_call(['h5repack', path, path_tmp], stderr=subprocess.STDOUT, shell=False)\n shutil.move(path_tmp, path)\n return Fast5(path, mode)\n\n def create_copy(self, copy_path):\n \"\"\"Run h5repack on the current file. Returns a fresh object.\"\"\"\n path = os.path.abspath(self.filename)\n path_tmp = copy_path\n mode = self.mode\n self.close()\n subprocess.check_call(['h5repack', path, path_tmp], stderr=subprocess.STDOUT, shell=False)\n return Fast5(path_tmp, mode)\n\n ###\n # Extracting read event data\n\n def get_reads(self, group=False, raw=False, read_numbers=None, scale=True):\n \"\"\"Iterator across event data for all reads in file\n\n :param group: return hdf group rather than event data\n \"\"\"\n if not raw:\n event_group = self.get_analysis_latest(self.__event_detect_name__)\n event_path = self._join_path(event_group, self.__default_event_path__)\n reads = self[event_path]\n else:\n try:\n reads = self[self.__raw_path__]\n except:\n raise KeyError('No raw data available in file {}.'.format(self.filename))\n\n if read_numbers is None:\n it = reads.keys()\n else:\n it = (k for k in reads.keys()\n if reads[k].attrs['read_number'] in read_numbers)\n\n if group == 'all':\n for read in it:\n yield reads[read], read\n elif group:\n for read in it:\n yield reads[read]\n else:\n for read in it:\n if not raw:\n yield self._get_read_data(reads[read])\n else:\n yield self._get_read_data_raw(reads[read], scale=scale)\n\n def get_read(self, group=False, raw=False, read_number=None, scale=False):\n \"\"\"Like get_reads, but only the first read in the file\n\n :param group: return hdf group rather than event/raw data\n \"\"\"\n if read_number is None:\n return self.get_reads(group, raw, scale=scale).__next__()\n else:\n return self.get_reads(group, raw, read_numbers=[read_number], scale=scale).__next__()\n\n def get_corrected_events(self, number=0):\n \"\"\"Returns corrected events table along with the start relative to raw data\"\"\"\n try:\n reads = self[self.__default_corrected_genome__.format(number)]\n events = reads['Events']\n attributes = dict(events.attrs.items())\n corr_start_rel_to_raw = attributes['read_start_rel_to_raw']\n except KeyError:\n raise KeyError('Read does not contain required fields: {}'.format(self.__default_corrected_genome__.format(number)))\n return np.asarray(events), corr_start_rel_to_raw\n\n def get_corrected_events_attr(self, number=0):\n \"\"\"Returns corrected events table along with the start relative to raw data\"\"\"\n try:\n reads = self[self.__default_corrected_genome__.format(number)]\n alignment = reads['Alignment']\n attributes = dict(alignment.attrs.items())\n except KeyError:\n raise KeyError('Read does not contain required fields: {}'.format(self.__default_corrected_genome__.format(number)))\n return attributes\n\n\n #todo fix path creation\n def get_custom_analysis_events(self, name):\n \"\"\"Get events stored in a custom path\"\"\"\n path = None\n try:\n path = self.get_analysis_events_path_latest(name)\n events = np.asarray(self[path])\n except KeyError:\n raise KeyError('File does not contain events at: {}'.format(path))\n except IndexError:\n raise IndexError('File does not contain analysis with name: {}'.format(name))\n return events\n\n def get_signalalign_events(self, mea=False, sam=False, override_path=None, complement=False, variant=False):\n \"\"\"Get signal align events, sam or mea alignment\n :param mea: boolean option to grab the MEA_alignment_labels\n :param sam: boolean option to grab sam file\n :param override_path: if passed, will look for alignment events at that path\n :param complement: get mea complement\n \"\"\"\n assert (not mea or not sam), \"Both mea and sam cannot be set to True\"\n try:\n field = \"\"\n if override_path:\n path = override_path\n else:\n path = self.check_path(self.__default_signalalign_events__, latest=True)\n reads = self[path]\n if mea:\n if complement:\n field = \"MEA_alignment_labels_complement\"\n events = np.asarray(reads[field])\n else:\n field = \"MEA_alignment_labels\"\n events = np.asarray(reads[field])\n elif sam:\n field = \"sam\"\n events = str(np.asarray(reads[field]))\n elif variant:\n field = \"variantCaller\"\n events = np.asarray(reads[field])\n else:\n field = \"full\"\n events = np.asarray(reads[field])\n\n except KeyError:\n raise KeyError('Read does not contain required fields: {}'.format(os.path.join(path, field)))\n return events\n\n def get_signalalign_basecall_path(self, override_path=None):\n \"\"\"Get basecalled events used for signalAlign input\n :param override_path: if passed, will look for alignment events at that path\n \"\"\"\n try:\n if override_path:\n path = override_path\n else:\n path = self.check_path(self.__default_signalalign_events__, latest=True)\n attributes = self[path].attrs\n basecall_path = attributes[\"basecall_events\"]\n except KeyError:\n raise KeyError('Read does not contain required fields: {}'.format(path))\n return basecall_path\n\n # todo fix path creation\n def get_eventalign_events(self, section=__default_section__):\n \"\"\"Get signal align events, sam or mea alignment\"\"\"\n assert section in [self.__template_section__, self.__complement_section__], \\\n \"Section must be template or complement: {}\".format(section)\n try:\n path = self.check_path(self.__default_eventalign_events__, latest=True)\n reads = self[path]\n events = np.asarray(reads['BaseCalled_{}/Events'.format(section)])\n\n except KeyError:\n raise KeyError('Read does not contain required fields: {}'.format(path))\n return events\n\n def _get_read_data(self, read, indices=None):\n \"\"\"Private accessor to read event data\"\"\"\n # We choose the following to always be floats\n float_fields = ('start', 'length', 'mean', 'stdv')\n\n events = read['Events']\n\n # We assume that if start is an int or uint the data is in samples\n # else it is in seconds already.\n needs_scaling = False\n if events['start'].dtype.kind in ['i', 'u']:\n needs_scaling = True\n\n dtype = np.dtype([(d[0], 'float') if d[0] in float_fields else d\n for d in events.dtype.descr\n ])\n data = None\n with events.astype(dtype):\n if indices is None:\n data = events[()]\n else:\n try:\n data = events[indices[0]:indices[1]]\n except:\n raise ValueError(\n 'Cannot retrieve events using {} as indices'.format(indices)\n )\n\n # File spec mentions a read.attrs['scaling_used'] attribute,\n # its not clear what this is. We'll ignore it and hope for\n # the best.\n if needs_scaling:\n data['start'] /= self.sample_rate\n data['length'] /= self.sample_rate\n return data\n\n def _get_read_data_raw(self, read, indices=None, scale=True):\n \"\"\"Private accessor to read raw data\"\"\"\n raw = read['Signal']\n dtype = float if scale else int\n\n data = None\n with raw.astype(dtype):\n if indices is None:\n data = raw[()]\n else:\n try:\n data = raw[indices[0]:indices[1]]\n except:\n raise ValueError(\n 'Cannot retrieve events using {} as indices'.format(indices)\n )\n\n # Scale data to pA\n if scale:\n meta = self.channel_meta\n raw_unit = meta['range'] / meta['digitisation']\n data = (data + meta['offset']) * raw_unit\n return data\n\n #todo fix path creation\n def set_read(self, data, meta, scale=True):\n \"\"\"Write event data to file\n\n :param data: event data\n :param meta: meta data to attach to read\n :param read_number: per-channel read counter\n \"\"\"\n req_fields = [\n 'start_time', 'duration', 'read_number',\n 'start_mux', 'read_id', 'scaling_used'\n ]\n if not set(req_fields).issubset(meta.keys()):\n raise KeyError(\n 'Read meta does not contain required fields: {}, got {}'.format(\n req_fields, meta.keys()\n )\n )\n self.test_event_table(data)\n path = self._join_path(\n self.__event_path__, 'Read_{}'.format(meta['read_number'])\n )\n self._add_attrs(meta, path)\n\n # (see _get_read_data()). If the data is not an int or uint\n # we assume it is in seconds and scale appropriately\n if scale:\n data['start'] *= self.sample_rate\n data['length'] *= self.sample_rate\n\n self._add_event_table(data, self._join_path(path, 'Events'))\n\n def set_fastq(self, destination_root, data, section=__default_section__, overwrite=False):\n \"\"\"Write new fasta file to file\n\n :param destination_root: root directory; data will be stored in {destination_root}/Basecalled_{section}/Fastq\n :param data: fastq file\n :param section: name of basecall analysis default (template)\n \"\"\"\n check_fastq_line(data)\n\n # get location and sanity check\n path = self._join_path(destination_root, self.__default_basecall_fastq__.format(section))\n if path in self:\n if overwrite:\n self.delete(path, ignore=True)\n else:\n raise Exception(\"Destination {} already exists in {}\".format(path, self.filename))\n\n # save\n self._add_string_dataset(data, path)\n\n def set_event_table(self, destination_root, data, meta, section=__default_section__, scale=False, overwrite=False):\n \"\"\"Write new event data to file\n\n :param destination_root: root directory; data will be stored in {destination_root}/Basecalled_{section}/Events\n :param data: event data\n :param meta: meta data to attach to read\n :param section: name of basecall analysis default (template)\n :param scale: scale the the start and length by the sample rate\n :param overwrite: overwrite most recent path\n \"\"\"\n\n self.assert_writable()\n self.test_event_table(data)\n\n # modification to data\n if meta:\n #todo add attrs to dest_root or dest_events?\n self._add_attrs(meta, destination_root)\n if scale:\n data['start'] *= self.sample_rate\n data['length'] *= self.sample_rate\n\n # get location and sanity check\n destination_events = self._join_path(destination_root, self.__default_basecall_1d_events__.format(section))\n if destination_events in self:\n if overwrite:\n self.delete(destination_events, ignore=True)\n else:\n raise Exception(\"Destination {} already exists in {}\".format(destination_events, self.filename))\n\n # save\n self._add_event_table(data, destination_events)\n\n #todo fix path creation\n def set_eventalign_table(self, template, complement, meta, overwrite=False):\n \"\"\"Write eventalign table to fast5 file\n\n :param template: template dataset\n :param complement: complement dataset\n :param meta: meta data to attach to read\n :param overwrite: overwrite most recent path\n \"\"\"\n assert template or complement, \"Must set template and/or complement dataset\"\n self.assert_writable()\n path = \"EventAlign_00{}\"\n path = self._join_path(self.__base_analysis__, path)\n path = self.check_path(path, latest=overwrite)\n if overwrite:\n self.delete(path, ignore=True)\n if meta:\n self._add_attrs(meta, path)\n if not isinstance(template, np.ndarray):\n raise TypeError('Table is not a ndarray.')\n if not isinstance(complement, np.ndarray):\n raise TypeError('Table is not a ndarray.')\n\n if template is not None:\n self._add_numpy_table(template, self._join_path(path, \"BaseCalled_{}\".format(\"template\"), 'Events'))\n if complement is not None:\n self._add_numpy_table(complement, self._join_path(path, \"BaseCalled_{}\".format(\"complement\"), 'Events'))\n\n return True\n\n #todo change to look like get_analysis_latest\n def check_path(self, path, latest=False):\n \"\"\"Check if path exists, if it does increment numbering\n\n :param path: path to fast5 object. Needs to have a field where string.format can work! \"\"\"\n highest = 0\n while highest < 20:\n if path.format(highest) in self:\n highest += 1\n continue\n else:\n if latest and highest > 0:\n return path.format(highest-1) # the last base-called version we saw\n else:\n return path.format(highest) # the new base-called version\n\n def get_read_stats(self):\n \"\"\"Combines stats based on events with output of .summary, assumes a\n one read file.\n\n \"\"\"\n data = deepcopy(self.summary())\n read = self.get_read()\n sorted_means = np.sort(read['mean'])\n n_events = len(sorted_means)\n n10 = int(0.1*n_events)\n n90 = int(0.9*n_events)\n data['range_current'] = sorted_means[n90] - sorted_means[n10]\n data['median_current'] = sorted_means[int(0.5*n_events)] # could be better\n data['num_events'] = n_events\n data['median_sd'] = np.median(read['stdv'])\n return data\n\n @staticmethod\n def test_event_table(data, req_fields=__default_event_table_fields__):\n \"\"\"Wrapper function to test if event tables have required fields\n :param data: numpy array\n :param req_fields: required fields for event table \"\"\"\n return check_numpy_table(data, req_fields)\n\n ###\n # Raw Data\n\n def set_raw(self, raw, meta=None, read_number=None):\n \"\"\"Set the raw data in file.\n\n :param raw: raw data to add\n :param read_number: read number (as usually given in filename and\n contained within HDF paths, viz. Reads/Read_<>/). If not given\n attempts will be made to guess the number (assumes single read\n per file).\n \"\"\"\n # Attempt to guess read_number\n if read_number is None:\n if sum(1 for _ in self.get_reads()) == 1:\n read_number = self.get_read(group=True).attrs['read_number']\n else:\n raise RuntimeError(\"'read_number' not given and cannot guess.\")\n\n # Attempt to guess meta\n if meta is None:\n try:\n meta = dict(self.get_read(group=True, read_number=read_number).attrs)\n except KeyError:\n raise RuntimeError(\"'meta' not given and cannot guess.\")\n\n # Clean up keys as per spec. Note: TANG-281 found that 'read_id' is not\n # always present on the read event data, such that if we have copied\n # meta from there, we won't have 'read_id'. The following:\n # https://wiki/display/OFAN/Single-read+fast5+file+format\n # notes that prior to MinKNOW version 49.2 'read_id' was not present.\n # Why do we have a specification?\n req_keys = ['start_time', 'duration', 'read_number', 'start_mux'] #'read_id'\n meta = {k:v for k,v in meta.iteritems() if k in req_keys}\n if len(meta.keys()) != len(req_keys):\n raise KeyError(\n 'Raw meta data must contain keys: {}.'.format(req_keys)\n )\n # Check meta is same as that for event data, if any\n try:\n event_meta = dict(self.get_read(group=True, read_number=read_number).attrs)\n except:\n pass\n else:\n if sum(meta[k] != event_meta[k] for k in meta.keys()) > 0:\n raise ValueError(\n \"Attempted to set raw meta data as {} \"\n \"but event meta is {}\".format(meta, event_meta)\n )\n\n # Good to go!\n read_path = self._join_path(self.__raw_path__, 'Read_{}'.format(read_number))\n data_path = self._join_path(read_path, 'Signal')\n self._add_attrs(meta, read_path)\n self[data_path] = raw\n\n ###\n\n def get_read_id(self):\n reads = list(self[self.__raw_path__])\n if len(reads) != 1:\n return False\n read_loc = os.path.join(self.__raw_path__, reads[0])\n read_label = self.bytes_to_string(self[read_loc].attrs.get('read_id'))\n return read_label\n # Analysis path resolution\n\n def get_analysis_latest(self, name):\n \"\"\"Get group of latest (present) analysis with a given base path.\n\n :param name: Get the (full) path of newest analysis with a given base\n name.\n \"\"\"\n try:\n return self._join_path(\n self.__base_analysis__,\n sorted(filter(\n lambda x: x.startswith(name), self[self.__base_analysis__].keys()\n ))[-1]\n )\n except (IndexError, KeyError):\n raise IndexError('No analyses with name {} present.'.format(name))\n\n def get_analysis_new(self, name):\n \"\"\"Get group path for new analysis with a given base name.\n\n :param name: desired analysis name\n \"\"\"\n\n # Formatted as 'base/name_000'\n try:\n latest = self.get_analysis_latest(name)\n root, counter = latest.rsplit('_', 1)\n counter = int(counter) + 1\n except IndexError:\n # Nothing present\n root = self._join_path(\n self.__base_analysis__, name\n )\n counter = 0\n return '{}_{:03d}'.format(root, counter)\n\n def get_analysis_events_path_new(self, name, section=__default_section__):\n return self._join_path(self.get_analysis_new(name), self.__default_basecall_1d_events__.format(section))\n\n def get_analysis_path_new(self, name, section=__default_section__):\n return self._join_path(self.get_analysis_new(name), self.__default_basecall_1d__.format(section))\n\n def get_analysis_events_path_latest(self, name, section=__default_section__):\n return self._join_path(self.get_analysis_latest(name), self.__default_basecall_1d_events__.format(section))\n\n def ensure_path(self, path, include_last_element=False):\n # get directory parts we want to find or create\n parts = path.lstrip(\"/\").split(\"/\")\n if not include_last_element:\n parts = parts[:-1]\n\n # iterate over all parts, ensuring directory structure exists\n curr_path = \"/\"\n for part in parts:\n curr_path = os.path.join(curr_path, part)\n if curr_path not in self:\n self.create_group(curr_path)\n\n # The remaining are methods to read and write data as chimaera produces\n # It is necessarily all a bit nasty, but should provide a more\n # consistent interface to the files. Paths are defaulted\n\n ###\n # Temperature etc.\n\n def get_engine_state(self, state, time=None):\n \"\"\"Retrieve engine state from {}, either across the whole read\n (default) or at a given time.\n\n :param state: name of engine state\n :param time: time (in seconds) at which to retrieve temperature\n\n \"\"\"\n location = self._join_path(\n self.__default_engine_state_path__, state\n )\n states = self[location][()]\n if time is None:\n return states\n else:\n i = np.searchsorted(states['time'], time) - 1\n return states[state][i]\n\n def get_temperature(self, time=None, field=__temp_fields__[0]):\n \"\"\"Retrieve temperature data from {}, either across the whole read\n (default) or at a given time.\n\n :param time: time at which to get temperature\n :param field: one of {}\n\n \"\"\"\n if field not in self.__temp_fields__:\n raise RuntimeError(\"'field' argument must be one of {}.\".format(self.__temp_fields__))\n\n return self.get_engine_state('minion_{}_temperature'.format(field), time)\n\n def set_engine_state(self, data):\n \"\"\"Set the engine state data.\n\n :param data: a 1D-array containing two fields, the first of which\n must be named 'time'. The name of the second field will be used\n to name the engine state and be used in the dataset path.\n \"\"\"\n fields = data.dtype.names\n if fields[0] != 'time':\n raise ValueError(\"First field of engine state data must be 'time'.\")\n if len(fields) != 2:\n raise ValueError(\"Engine state data must contain exactly two fields.\")\n\n state = fields[1]\n location = self._join_path(\n self.__default_engine_state_path__, state\n )\n self[location] = data\n\n\n ###\n # Template/complement splitting data\n __split_summary_location__ = '/Summary/split_hairpin'\n\n def set_split_data(self, data, analysis=__default_hairpin_split_analysis__):\n \"\"\"Write a dict containing split point data.\n\n :param data: `dict`-like object containing attrs to add\n :param analysis: Base analysis name (under {})\n\n .. warning::\n Not checking currently for required fields.\n \"\"\"\n\n location = self._join_path(\n self.get_analysis_new(analysis), self.__split_summary_location__\n )\n self._add_attrs(data, location)\n\n def get_split_data(self, analysis=__default_hairpin_split_analysis__):\n \"\"\"Get template-complement segmentation data.\n\n :param analysis: Base analysis name (under {})\n \"\"\"\n\n location = self._join_path(\n self.get_analysis_latest(analysis), self.__split_summary_location__\n )\n try:\n return dict(self[location].attrs)\n except:\n raise ValueError(\n 'Could not retrieve template-complement split point data from attributes of {}'.format(location)\n )\n\n def get_section_indices(self, analysis=__default_hairpin_split_analysis__):\n \"\"\"Get two tuples indicating the event indices for the template and\n complement boundaries.\n\n :param analysis: Base analysis path (under {})\n \"\"\"\n\n # TODO: if the below fails, calculating the values on the fly would be\n # a useful feature. Which brings to mind could we do such a thing\n # in all cases of missing data? Probably not reasonble.\n attrs = self.get_split_data(analysis)\n try:\n return (\n (attrs['start_index_temp'], attrs['end_index_temp']),\n (attrs['start_index_comp'], attrs['end_index_comp'])\n )\n except:\n raise ValueError('Could not retrieve template-complement segmentation data.')\n\n def get_section_events(self, section, analysis=__default_hairpin_split_analysis__):\n \"\"\"Get the template event data.\n\n :param analysis: Base analysis path (under {})\n \"\"\"\n\n indices = self.get_section_indices(analysis)\n read = self.get_read(group=True)\n events = None\n if section == 'template':\n events = self._get_read_data(read, indices[0])\n elif section == 'complement':\n events = self._get_read_data(read, indices[1])\n else:\n raise ValueError(\n '\"section\" parameter for fetching events must be \"template\" or \"complement\".'\n )\n return events\n\n ###\n # 1D Basecalling data\n\n\n def has_basecall_data(self, section=__default_section__, analysis=__default_basecall_1d_analysis__):\n \"\"\"\n Determines whether events table exists\n\n :param section: String to use in paths, e.g. 'template' or 'complement'.\n :param analysis: Base analysis name (under {})\n \"\"\"\n\n try:\n self.get_basecall_data(section=section, analysis=analysis)\n return True\n except:\n return False\n\n\n def get_basecall_data(self, section=__default_section__, analysis=__default_basecall_1d_analysis__):\n \"\"\"Read the annotated basecall_1D events from the fast5 file.\n\n :param section: String to use in paths, e.g. 'template' or 'complement'.\n :param analysis: Base analysis name (under {})\n \"\"\"\n\n base = self.get_analysis_latest(analysis)\n events_path = self._join_path(base, self.__default_basecall_1d_events__.format(section))\n try:\n return self[events_path][()]\n except:\n raise ValueError('Could not retrieve basecall_1D data from {}'.format(events_path))\n\n\n def get_alignment_attrs(self, section=__default_section__, analysis=__default_alignment_analysis__):\n \"\"\"Read the annotated alignment meta data from the fast5 file.\n\n :param section: String to use in paths, e.g. 'template' or 'complement'.\n :param analysis: Base analysis name (under {})\n\n \"\"\"\n\n attrs = None\n base = self.get_analysis_latest(analysis)\n attr_path = self._join_path(base,\n self.__default_basecall_alignment_summary__.format(section))\n try:\n attrs = dict(self[attr_path].attrs)\n except:\n raise ValueError('Could not retrieve alignment attributes from {}'.format(attr_path))\n\n return attrs\n\n ###\n # Mapping data\n\n def get_mapping_data(self, section=__default_section__, analysis=__default_mapping_analysis__):\n \"\"\"Read the annotated mapping events from the fast5 file.\n\n .. note::\n The seq_pos column for the events table returned from basecall_mapping is\n adjusted to be the genome position (consistent with squiggle_mapping)\n\n :param section: String to use in paths, e.g. 'template' or 'complement'.\n :param analysis: Base analysis name (under {}). For basecall mapping\n use analysis = 'AlignToRef'.\n \"\"\"\n\n events = None\n if analysis == self.__default_mapping_analysis__:\n # squiggle_mapping\n base = self.get_analysis_latest(analysis)\n event_path = self._join_path(base, self.__default_mapping_events__.format(section))\n try:\n events = self[event_path][()]\n except:\n raise ValueError('Could not retrieve squiggle_mapping data from {}'.format(event_path))\n attrs = self.get_mapping_attrs(section=section)\n\n elif analysis == self.__default_substep_mapping_analysis__:\n # substep mapping\n base = self.get_analysis_latest(analysis)\n event_path = self._join_path(base, self.__default_substep_mapping_events__.format(section))\n try:\n events = self[event_path][()]\n except:\n raise ValueError('Could not retrieve substep_mapping data from {}'.format(event_path))\n attrs=None\n\n else:\n # basecall_mapping\n base = self.get_analysis_latest(analysis)\n event_path = self._join_path(base, self.__default_basecall_mapping_events__.format(section))\n try:\n events = self[event_path][()]\n except:\n raise ValueError('Could not retrieve basecall_mapping data from {}'.format(event_path))\n\n # Modify seq_pos to be the actual genome position (consistent with squiggle_map)\n attrs = self.get_mapping_attrs(section=section, analysis=self.__default_alignment_analysis__)\n if attrs['direction'] == '+':\n events['seq_pos'] = events['seq_pos'] + attrs['ref_start']\n else:\n events['seq_pos'] = attrs['ref_stop'] - events['seq_pos']\n\n # add transition field\n if attrs:\n move = np.ediff1d(events['seq_pos'], to_begin=0)\n if attrs['direction'] == '-':\n move *= -1\n events = nprf.append_fields(events, 'move', move)\n\n return events\n\n\n def get_mapping_attrs(self, section=__default_section__, analysis=__default_mapping_analysis__):\n \"\"\"Read the annotated mapping meta data from the fast5 file.\n Names which are inconsistent between squiggle_mapping and basecall_mapping are added to\n basecall_mapping (thus duplicating the attributes in basecall mapping).\n\n :param section: String to use in paths, e.g. 'template' or 'complement'.\n :param analysis: Base analysis name (under {})\n For basecall mapping use analysis = 'Alignment'\n \"\"\"\n\n attrs = None\n if analysis == self.__default_mapping_analysis__:\n # squiggle_mapping\n base = self.get_analysis_latest(analysis)\n attr_path = self._join_path(base, self.__default_mapping_summary__.format(section))\n try:\n attrs = dict(self[attr_path].attrs)\n except:\n raise ValueError('Could not retrieve squiggle_mapping meta data from {}'.format(attr_path))\n else:\n # basecall_mapping\n\n # AligToRef attributes (set AlignToRef first so that Alignment attrs are not overwritten)\n base = self.get_analysis_latest(self.__default_basecall_mapping_analysis__)\n attr_path = self._join_path(base, self.__default_basecall_mapping_summary__.format(section))\n try:\n attrs = dict(self[attr_path].attrs)\n except:\n raise ValueError('Could not retrieve basecall_mapping meta data from {}'.format(attr_path))\n\n # Rename some of the fields\n rename = [\n ('genome_start', 'ref_start'),\n ('genome_end', 'ref_stop'),\n ]\n for old, new in rename:\n attrs[new] = attrs.pop(old)\n\n # Alignment attributes\n base = self.get_analysis_latest(analysis)\n attr_path = self._join_path(\n base, self.__default_basecall_alignment_summary__.format(section))\n try:\n genome = self[attr_path].attrs.get('genome')\n except:\n raise ValueError('Could not retrieve basecall_mapping genome field from {}'.format(attr_path))\n try:\n attrs['reference'] = (self.get_reference_fasta(section = section)).split('\\n')[1]\n except:\n raise ValueError('Could not retrieve basecall_mapping fasta from Alignment analysis')\n\n # Add attributes with keys consistent with Squiggle_map\n rc = '_rc'\n is_rc = genome.endswith(rc)\n attrs['ref_name'] = genome[:-len(rc)] if is_rc else genome\n attrs['direction'] = '-' if is_rc else '+'\n\n # Trim any other fields, the allowed are those produced by\n # squiggle_mapping. We allow strand_score but do not require\n # it since our writer does not require it.\n required = [\n 'direction', 'ref_start', 'ref_stop', 'ref_name',\n 'num_skips', 'num_stays', 'reference'\n ]\n additional = ['strand_score', 'shift', 'scale', 'drift', 'var', 'scale_sd', 'var_sd']\n keep = required + additional\n assert set(required).issubset(set(attrs)), 'Required mapping attributes not found'\n for key in (set(attrs) - set(keep)):\n del(attrs[key])\n\n return attrs\n\n\n def get_any_mapping_data(self, section=__default_section__, attrs_only=False):\n \"\"\"Convenience method for extracting whatever mapping data might be\n present, favouring squiggle_mapping output over basecall_mapping.\n\n :param section: (Probably) one of '2D', 'template', or 'complement'\n :param attrs_only: Use attrs_only=True to return mapping attributes without events\n\n :returns: the tuple (events, attrs) or attrs only\n \"\"\"\n events = None\n attrs = None\n try:\n if not attrs_only:\n events = self.get_mapping_data(section=section)\n attrs = self.get_mapping_attrs(section=section)\n except Exception as e:\n try:\n if not attrs_only:\n events = self.get_mapping_data(section=section,\n analysis=self.__default_basecall_mapping_analysis__)\n attrs = self.get_mapping_attrs(section=section,\n analysis=self.__default_alignment_analysis__)\n except Exception as e:\n raise ValueError(\n \"Cannot find any mapping data at paths I know about in {}. \"\n \"Consider using get_mapping_data() with analysis argument.\"\n .format(self.filename)\n )\n if not attrs_only:\n return events, attrs\n else:\n return attrs\n\n ###\n # Sequence data\n\n def get_fastq(self, analysis=__default_basecall_2d_analysis__, section=__default_seq_section__, custom=None):\n \"\"\"Get the fastq (sequence) data.\n\n :param analysis: Base analysis name (under {})\n :param section: (Probably) one of '2D', 'template', or 'complement'\n :param custom: Custom hdf path overriding all of the above.\n \"\"\"\n\n err_msg = 'Could not retrieve sequence data from {}'\n\n if custom is not None:\n location = custom\n else:\n location = self._join_path(\n self.get_analysis_latest(analysis), self.__default_basecall_fastq__.format(section)\n )\n try:\n return self.bytes_to_string(self[location][()])\n except:\n # Did we get given section != 2D and no analysis, that's\n # more than likely incorrect. Try alternative analysis\n if section != self.__default_seq_section__ and analysis == self.__default_basecall_2d_analysis__:\n location = self._join_path(\n self.get_analysis_latest(self.__default_basecall_1d_analysis__),\n self.__default_basecall_fastq__.format(section)\n )\n try:\n return self.bytes_to_string(self[location][()])\n except:\n raise ValueError(err_msg.format(location))\n else:\n raise ValueError(err_msg.format(location))\n\n def get_sam(self, analysis=__default_alignment_analysis__, section=__default_seq_section__, custom=None):\n \"\"\"Get SAM (alignment) data.\n\n :param analysis: Base analysis name (under {})\n :param section: (Probably) one of '2D', 'template', or 'complement'\n :param custom: Custom hdf path overriding all of the above.\n \"\"\"\n\n if custom is not None:\n location = custom\n else:\n location = self._join_path(\n self.get_analysis_latest(analysis), 'Aligned_{}'.format(section), 'SAM'\n )\n try:\n return self[location][()]\n except:\n raise ValueError('Could not retrieve SAM data from {}'.format(location))\n\n def get_reference_fasta(self, analysis=__default_alignment_analysis__, section=__default_seq_section__, custom=None):\n \"\"\"Get fasta sequence of known DNA fragment for the read.\n\n :param analysis: Base analysis name (under {})\n :param section: (Probably) one of '2D', 'template', or 'complement'\n :param custom: Custom hdf path overriding all of the above.\n \"\"\"\n\n if custom is not None:\n location = custom\n else:\n location = self._join_path(\n self.get_analysis_latest(analysis), 'Aligned_{}'.format(section), 'Fasta'\n )\n try:\n return self[location][()]\n except:\n raise ValueError('Could not retrieve sequence data from {}'.format(location))\n\n def delete(self, section, ignore=False):\n \"\"\"Delete a section of the H5file\"\"\"\n self.assert_writable()\n try:\n del self[section]\n except KeyError:\n if ignore:\n pass\n else:\n raise KeyError(\"{} not found in Fast5 file\".format(section))\n\n def is_read_rna(self):\n \"\"\"\n Determine if a read is RNA or DNA\n source: https://github.com/nanoporetech/tombo/blob/master/tombo/tombo_helper.py\n \"\"\"\n # check both experiment type and kit slots for \"rna\"\n exp_type, exp_kit = None, None\n try:\n exp_type = self.bytes_to_string(self['UniqueGlobalKey/context_tags'].attrs[\n 'experiment_type'])\n # remove the word internal since it contains rna.\n exp_type = exp_type.replace('internal', '')\n except:\n pass\n try:\n exp_kit = self.bytes_to_string(self['UniqueGlobalKey/context_tags'].attrs[\n 'experiment_kit'])\n # remove the word internal since it contains rna.\n exp_kit = exp_kit.replace('internal', '')\n except:\n pass\n\n if exp_type is None and exp_kit is None:\n rna = False\n else:\n rna = (\n (exp_type is not None and re.search('rna', exp_type) is not None) or\n (exp_kit is not None and re.search('rna', exp_kit) is not None))\n\n return rna\n\n\ndef iterate_fast5(path, strand_list=None, paths=False, mode='r', limit=None, files_group_pattern=None, sort_by_size=None):\n \"\"\"Iterate over directory or list of .fast5 files.\n\n :param path: Directory in which single read fast5 are located or filename.\n :param strand_list: list of filenames to iterate, will be combined with path.\n :param paths: yield file paths instead of fast5 objects.\n :param mode: mode for opening files.\n :param limit: limit number of files to consider.\n :param files_group_pattern: yield file paths in groups of specified pattern\n :param sort_by_size: 'desc' - from largest to smallest, 'asc' - opposite\n \"\"\"\n if strand_list is None:\n # Could make glob more specific to filename pattern expected\n if os.path.isdir(path):\n files = glob(os.path.join(path, '*.fast5'))\n else:\n files = [path]\n elif os.path.isfile(strand_list):\n names = np.genfromtxt(\n strand_list, delimiter='\\t', dtype=None, names=True\n )['filename']\n files = [os.path.join(path, x) for x in names]\n else:\n files = [os.path.join(path, x) for x in strand_list]\n\n if sort_by_size is not None:\n reverse = True if sort_by_size == 'desc' else False\n files.sort(reverse=reverse, key=lambda x: os.path.getsize(x))\n\n for f in files[:limit]:\n if not os.path.exists(f):\n sys.stderr.write('File {} does not exist, skipping\\n'.format(f))\n continue\n if not paths:\n fh = Fast5(f, read=mode)\n yield fh\n fh.close()\n else:\n yield os.path.abspath(f)\n\n\ndef main():\n fast5_file = \"/Users/andrewbailey/CLionProjects/nanopore-RNN/nanotensor/tests/test_files/minion-reads/canonical/miten_PC_20160820_FNFAD20259_MN17223_sequencing_run_AMS_158_R9_WGA_Ecoli_08_20_16_43623_ch100_read214_strand.fast5\"\n f5fh = Fast5(fast5_file, read='r+')\n print(f5fh.raw_attributes)\n print(f5fh.get_read_stats())\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.use",
"scipy.stats.norm.pdf",
"numpy.array_equal",
"numpy.asarray",
"matplotlib.ticker.AutoMinorLocator",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.savefig",
"scipy.stats.entropy",
"matplotlib.ticker.AutoLocator",
"numpy.exp",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.std",
"sklearn.neighbors.KernelDensity",
"matplotlib.pyplot.show",
"numpy.linspace",
"matplotlib.pyplot.axes",
"numpy.random.random"
],
[
"numpy.asarray",
"numpy.lib.recfunctions.append_fields",
"numpy.median",
"numpy.round",
"numpy.genfromtxt",
"numpy.ediff1d",
"numpy.sort",
"numpy.searchsorted",
"numpy.dtype"
]
]
|
bispmri/Ultra-low-field-MRI-scanner | [
"a3b5b09d7301bde2f6c9b74216f4f00169c3cc00"
]
| [
"Deep Learning based EMI Elimination Codes and Data/EMIPrediction_test.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport datetime\nimport time\nfrom model import Net1\nimport torch.optim as optim\nfrom scipy import io\nimport argparse\nimport os # nn.BatchNorm2d(2,affine=False),\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nimport h5py \nimport matplotlib.pyplot as plt\nimport h5py \nimport matplotlib\nfrom PIL import Image\nimport math\nfrom sklearn.metrics import confusion_matrix\nimport pylab as pl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nimport ParaSetting\n\ntorch.cuda.synchronize()\nstarttime = time.time()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" \ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\nread_h5 = True\nnum_workers = 0\n\n\n## Working path define\nexpname = ParaSetting.expname\nroot_ws = ParaSetting.root_ws\n\nepoch_num_char = str(ParaSetting.epoch_num)\n\ndatapath = root_ws+expname+'data/'\nmodelpath_test = root_ws+expname+'model/'+'epoch-'+epoch_num_char+'.pth'\nsavepath = root_ws+expname+'results/'\nos.makedirs(savepath, exist_ok=True)\n\n## Hyperparameters\nNx = ParaSetting.Nx\nbs = ParaSetting.bs # batch size\n\nclass prepareData_test(Dataset):\n def __init__(self, train_or_test):\n\n self.files = os.listdir(datapath+train_or_test)\n self.files.sort(key=lambda x:int(x[:-4])) \n self.train_or_test= train_or_test\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n \n data = torch.load(datapath+self.train_or_test+'/'+self.files[idx])\n return data['k-space'], data['label']\n \n\ntestset = prepareData_test('test')\ntestloader = torch.utils.data.DataLoader(testset, batch_size=1,shuffle=False, num_workers=num_workers)\n\n\n# Testing\nfilename = os.listdir(datapath+'/test/')\nlength = len(filename)\n\ndin = np.empty((length,2,Nx,10))\ndout = np.empty((length,2,Nx,1))\ndlab = np.empty((length,2,Nx,1))\n\nmodel = torch.load(modelpath_test)\ncriterion1 = nn.MSELoss()\n\nmodel.eval()\nloss_validation_list = []\nloss_batch = []\nloss = []\ndata1 = []\nprint('\\n testing...')\nfor i, data in enumerate(testloader, 0):\n inputs = data[0].reshape(-1,2,Nx,10).to(device)\n labels = data[1].reshape(-1,2,Nx,1).to(device)\n \n with torch.no_grad():\n outs = model(inputs)\n \n loss = criterion1(outs, labels)\n loss_batch.append(loss.item())\n loss_validation_list.append(round(sum(loss_batch) / len(loss_batch),10))\n print(loss_validation_list)\n output = outs.cuda().data.cpu()\n labelo =labels.cuda().data.cpu()\n inputo = inputs.cuda().data.cpu()\n \n dout[i] = output[0:1,:,:,:]\n dlab[i] = labelo[0:1,:,:,:]\n \nf = h5py.File(savepath+'output.h5','w')\nf['k-space'] = dout\nk = h5py.File(savepath+'label.h5','w')\nk['k-space'] = dlab\nf.close()\nk.close()\n"
]
| [
[
"numpy.empty",
"torch.nn.MSELoss",
"torch.cuda.synchronize",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
dmsquare/CalendarGNN | [
"dc071b2a901d25909d8ef7bcf467dda58fb22c3c"
]
| [
"data_manager.py"
]
| [
"\"\"\"\nManage dataset\n\"\"\"\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\n\n\ndef prepare_dataset(dataset, lb, _top=0):\n assert lb in {'gender', 'age', 'income'}\n ''' Load dataset file(s) '''\n uids, uid2u = dataset['uids'], dataset['uid2u']\n # sids, sid2s = dataset['sids'], dataset['sid2s']\n lids, lid2l = dataset['lids'], dataset['lid2l']\n vids, vid2v = dataset['vids'], dataset['vid2v']\n u2ss, s2l = dataset['u2ss'], dataset['s2l']\n s2vs, s_v2dt = dataset['s2vs'], dataset['s_v2dt']\n u2lbs = dataset['u2lbs']\n ''' Prepare labels '''\n assert all(isinstance(lb, int) for lbs in u2lbs for lb in lbs)\n [u2gender, u2age, u2income] = zip(*u2lbs)\n u2lb = {'gender': u2gender, 'age': u2age, 'income': u2income}[lb]\n ''' Item and location initial embeddings '''\n v_embs_np, l_embs_np = np.eye(len(vids), dtype=np.float), np.eye(len(lids), dtype=np.float)\n assert np.sum(v_embs_np) == len(vids) and np.sum(l_embs_np) == len(lids)\n ''' Split train/valid/test '''\n _ds_size = len(uids) if _top == 0 else _top\n _train_size, _valid_size = int(_ds_size * 0.8), int(_ds_size * 0.1)\n _test_size = _ds_size - _train_size - _valid_size\n _perm_ds_idxs = np.random.permutation(_ds_size)\n train_us = _perm_ds_idxs[: _train_size]\n valid_us = _perm_ds_idxs[_train_size: -_test_size]\n test_us = _perm_ds_idxs[-_test_size:]\n assert (not set(train_us).intersection(valid_us)) and (not set(train_us).intersection(test_us))\n print(f' - Train/valid/test: {len(train_us):,}/{len(valid_us):,}/{len(test_us):,}')\n ''' Pack loaders'''\n train_loader = _build_loader(train_us, u2ss, s2vs, u2lb, s_v2dt, s2l, shuffle=True)\n valid_loader = _build_loader(valid_us, u2ss, s2vs, u2lb, s_v2dt, s2l, shuffle=False)\n test_loader = _build_loader(test_us, u2ss, s2vs, u2lb, s_v2dt, s2l, shuffle=False)\n\n return train_loader, valid_loader, test_loader, v_embs_np, l_embs_np\n\n\ndef _build_loader(us, u2ss, s2vs, u2lb, s_v2dt, s2l, shuffle, max_s=200, max_v=200):\n assert all([(s, s2vs[s][0]) in s_v2dt for ss in [u2ss[u] for u in us] for s in ss]) # all session times are known\n assert all([s < len(s2l) for ss in [u2ss[u] for u in us] for s in ss]) # all session locations are known\n ''' Sessions of items '''\n # truncate each user's sessions of items into size [<= max_s, <= max_v]\n _u_s_vs = [[s2vs[s][:max_v] for s in ss[:max_s]] for ss in [u2ss[u] for u in us]]\n assert all([len(s_vs) <= max_s for s_vs in _u_s_vs]) and all([len(vs) <= max_v for s_vs in _u_s_vs for vs in s_vs])\n u_s_vs = [nn.utils.rnn.pad_sequence([torch.tensor([v + 1 for v in vs]) for vs in s_vs], batch_first=True)\n for s_vs in _u_s_vs]\n assert all([_ten.size()[0] <= max_s and _ten.size()[1] <= max_v for _ten in u_s_vs])\n ''' Temporal signals '''\n _u_s_ts = [[s_v2dt[(s, s2vs[s][0])] for s in ss[:max_s]] for ss in [u2ss[u] for u in us]]\n assert all([len(s_ts) <= max_s for s_ts in _u_s_ts]) and all([len(ts) == 4 for s_ts in _u_s_ts for ts in s_ts])\n u_s_ts = [torch.tensor(s_ts, dtype=torch.long) for s_ts in _u_s_ts]\n assert all([_ten.size()[0] <= max_s and _ten.size()[1] == 4 for _ten in u_s_ts])\n ''' Location signals '''\n _u_s_l = [[s2l[s] for s in ss[:max_s]] for ss in [u2ss[u] for u in us]]\n assert all([len(s_l) <= max_s for s_l in _u_s_l])\n u_s_l = [torch.tensor(s_l, dtype=torch.long) for s_l in _u_s_l]\n ''' User labels '''\n u_lb = [torch.tensor(u2lb[u], dtype=torch.long) for u in us]\n assert len(u_s_vs) == len(u_s_ts) and len(u_s_vs) == len(u_s_l) and len(u_s_vs) == len(u_lb)\n dataset = list(zip(u_s_vs, u_s_ts, u_s_l, u_lb))\n # noinspection PyTypeChecker\n loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=shuffle)\n return loader\n"
]
| [
[
"numpy.sum",
"numpy.random.permutation",
"torch.tensor",
"torch.utils.data.DataLoader"
]
]
|
colleenjg/OpenScope_CA_Analysis | [
"a429f212068040729614d29d90aaac3d8d90e813"
]
| [
"plot_fcts/glm_plots.py"
]
| [
"\"\"\"\nglm_plots.py\n\nThis script contains functions to plot results of GLM analyses (glm.py) from \ndictionaries.\n\nAuthors: Colleen Gillon\n\nDate: October, 2019\n\nNote: this code uses python 3.7.\n\n\"\"\"\n\nimport copy\nimport logging\nimport os\nimport warnings\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom util import file_util, gen_util, logger_util, math_util, plot_util\nfrom sess_util import sess_plot_util, sess_str_util\n\nlogger = logging.getLogger(__name__)\n\n\n#############################################\ndef plot_from_dict(dict_path, plt_bkend=None, fontdir=None, datetime=True):\n \"\"\"\n plot_from_dict(dict_path)\n\n Plots data from dictionaries containing analysis parameters and results.\n\n Required args:\n - dict_path (str): path to dictionary to plot data from\n \n Optional_args:\n - plt_bkend (str): mpl backend to use for plotting (e.g., \"agg\")\n default: None\n - fontdir (str) : path to directory where additional fonts are stored\n default: None\n - datetime (bool): figpar[\"save\"] datatime parameter (whether to \n place figures in a datetime folder)\n default: True\n \"\"\"\n\n logger.info(f\"Plotting from dictionary: {dict_path}\", \n extra={\"spacing\": \"\\n\"})\n\n\n figpar = sess_plot_util.init_figpar(\n plt_bkend=plt_bkend, fontdir=fontdir, datetime=datetime)\n plot_util.manage_mpl(cmap=False, **figpar[\"mng\"])\n\n info = file_util.loadfile(dict_path)\n savedir = os.path.dirname(dict_path)\n\n analysis = info[\"extrapar\"][\"analysis\"]\n\n # 0. Plots the explained variance\n if analysis == \"v\": # difference correlation\n plot_glm_expl_var(figpar=figpar, savedir=savedir, **info)\n else:\n warnings.warn(f\"No plotting function for analysis {analysis}\")\n\n plt.close(\"all\")\n\n#############################################\ndef plot_glm_expl_var(analyspar, sesspar, stimpar, extrapar, glmpar,\n sess_info, all_expl_var, figpar=None, savedir=None):\n \"\"\"\n plot_pup_diff_corr(analyspar, sesspar, stimpar, extrapar, \n sess_info, all_expl_var)\n\n From dictionaries, plots explained variance for different variables for \n each ROI.\n\n Required args:\n - analyspar (dict) : dictionary with keys of AnalysPar namedtuple\n - sesspar (dict) : dictionary with keys of SessPar namedtuple \n - stimpar (dict) : dictionary with keys of StimPar namedtuple\n - glmpar (dict) : dictionary with keys of GLMPar namedtuple\n - extrapar (dict) : dictionary containing additional analysis \n parameters\n [\"analysis\"] (str): analysis type (e.g., \"c\")\n - sess_info (dict) : dictionary containing information from each\n session \n [\"mouse_ns\"] (list) : mouse numbers\n [\"sess_ns\"] (list) : session numbers \n [\"lines\"] (list) : mouse lines\n [\"planes\"] (list) : imaging planes\n [\"nrois\"] (list) : number of ROIs in session\n [\"nanrois_{}\"] (list) : list of ROIs with NaNs/Infs in raw or dF/F \n traces (\"raw\", \"dff\")\n - all_expl_var (list) : list of dictionaries with explained variance \n for each session set, with each glm \n coefficient as a key:\n [\"full\"] (list) : list of full explained variance stats for \n every ROI, structured as ROI x stats\n [\"coef_all\"] (dict): max explained variance for each ROI with each\n coefficient as a key, structured as ROI x stats\n [\"coef_uni\"] (dict): unique explained variance for each ROI with \n each coefficient as a key, \n structured as ROI x stats\n [\"rois\"] (list) : ROI numbers (-1 for GLMs fit to \n mean/median ROI activity)\n \n Optional args:\n - figpar (dict): dictionary containing the following figure parameter \n dictionaries\n default: None\n [\"init\"] (dict): dictionary with figure initialization parameters\n [\"save\"] (dict): dictionary with figure saving parameters\n [\"dirs\"] (dict): dictionary with additional figure parameters\n - savedir (str): path of directory in which to save plots.\n default: None\n \n Returns:\n - fulldir (str) : final name of the directory in which the figure is \n saved (may differ from input savedir, if datetime \n subfolder is added.)\n - savename (str): name under which the figure is saved\n \"\"\"\n stimstr_pr = sess_str_util.stim_par_str(\n stimpar[\"stimtype\"], stimpar[\"bri_dir\"], stimpar[\"bri_size\"], \n stimpar[\"gabk\"], \"print\")\n dendstr_pr = sess_str_util.dend_par_str(\n analyspar[\"dend\"], sesspar[\"plane\"], \"roi\", \"print\")\n\n sessstr = sess_str_util.sess_par_str(\n sesspar[\"sess_n\"], stimpar[\"stimtype\"], sesspar[\"plane\"], \n stimpar[\"bri_dir\"],stimpar[\"bri_size\"], stimpar[\"gabk\"]) \n dendstr = sess_str_util.dend_par_str(\n analyspar[\"dend\"], sesspar[\"plane\"], \"roi\")\n\n # extract some info from sess_info\n keys = [\"mouse_ns\", \"sess_ns\", \"lines\", \"planes\"]\n [mouse_ns, sess_ns, lines, planes] = [sess_info[key] for key in keys]\n\n n_sess = len(mouse_ns)\n \n nroi_strs = sess_str_util.get_nroi_strs(\n sess_info, analyspar[\"remnans\"], analyspar[\"fluor\"], style=\"par\")\n\n plot_bools = [ev[\"rois\"] != [-1] for ev in all_expl_var]\n n_sess = sum(plot_bools)\n\n if stimpar[\"stimtype\"] == \"gabors\":\n xyzc_dims = [\"surp\", \"gabfr\", \"pup_diam_data\", \"run_data\"]\n log_dims = xyzc_dims + [\"gab_ori\"]\n elif stimpar[\"stimtype\"] == \"bricks\":\n xyzc_dims = [\"surp\", \"bri_dir\", \"pup_diam_data\", \"run_data\"]\n log_dims = xyzc_dims\n\n logger.info(\"Plotting GLM full and unique explained variance for \"\n \"{}.\".format(\", \".join(xyzc_dims)))\n\n if n_sess > 0:\n if figpar is None:\n figpar = sess_plot_util.init_figpar()\n\n figpar = copy.deepcopy(figpar)\n cmap = plot_util.manage_mpl(cmap=True, nbins=100, **figpar[\"mng\"])\n\n if figpar[\"save\"][\"use_dt\"] is None:\n figpar[\"save\"][\"use_dt\"] = gen_util.create_time_str()\n figpar[\"init\"][\"ncols\"] = n_sess\n figpar[\"save\"][\"fig_ext\"] = \"png\"\n \n fig, ax = plot_util.init_fig(2 * n_sess, **figpar[\"init\"], proj=\"3d\")\n\n fig.suptitle(\"Explained variance per ROI\", y=1)\n else:\n logger.info(\"No plots, as only results across ROIs are included\")\n fig = None\n\n i = 0\n for e, expl_var in enumerate(all_expl_var):\n if expl_var[\"rois\"] == [\"all\"]:\n plot_bools[e] = False\n\n # collect info for plotting and logging results across ROIs\n rs = np.where(np.asarray(expl_var[\"rois\"]) != -1)[0]\n all_rs = np.where(np.asarray(expl_var[\"rois\"]) == -1)[0]\n if len(all_rs) != 1:\n raise ValueError(\"Expected only one results for all ROIs.\")\n else:\n all_rs = all_rs[0]\n full_ev = expl_var[\"full\"][all_rs]\n\n title = (f\"Mouse {mouse_ns[i]} - {stimstr_pr}\\n(sess {sess_ns[i]}, \"\n f\"{lines[i]} {planes[i]}{dendstr_pr},{nroi_strs[i]})\")\n logger.info(f\"{title}\", extra={\"spacing\": \"\\n\"})\n\n math_util.log_stats(full_ev, stat_str=\"\\nFull explained variance\")\n\n\n for v, var_type in enumerate([\"coef_all\", \"coef_uni\"]):\n if var_type == \"coef_all\":\n sub_title = \"Explained variance per coefficient\"\n elif var_type == \"coef_uni\":\n sub_title = \"Unique explained variance per coefficient\"\n logger.info(sub_title, extra={\"spacing\": \"\\n\"})\n\n dims_all = []\n for key in log_dims:\n if key in xyzc_dims:\n # get mean/med\n dims_all.append(np.asarray(expl_var[var_type][key])[rs, 0])\n math_util.log_stats(\n expl_var[var_type][key][all_rs], stat_str=key)\n\n if not plot_bools[-1]:\n continue\n\n [x, y, z, c] = dims_all\n \n if v == 0:\n subpl_title = f\"{title}\\n{sub_title}\"\n else:\n subpl_title = sub_title\n\n sub_ax = ax[v, i]\n im = sub_ax.scatter(x, y, z, c=c, cmap=cmap, vmin=0, vmax=1)\n sub_ax.set_title(subpl_title)\n # sub_ax.set_zlim3d(0, 1.0)\n sub_ax.set_xlabel(xyzc_dims[0])\n sub_ax.set_ylabel(xyzc_dims[1])\n sub_ax.set_zlabel(xyzc_dims[2])\n if v == 0:\n full_ev_lab = math_util.log_stats(\n full_ev, stat_str=\"Full EV\", ret_str_only=True)\n sub_ax.plot([], [], c=\"k\", label=full_ev_lab)\n sub_ax.legend()\n\n i += 1\n\n if savedir is None:\n savedir = os.path.join(\n figpar[\"dirs\"][\"roi\"],\n figpar[\"dirs\"][\"glm\"])\n\n savename = (f\"roi_glm_ev_{sessstr}{dendstr}\")\n\n if n_sess > 0:\n plot_util.add_colorbar(fig, im, n_sess, label=xyzc_dims[3])\n\n fulldir = plot_util.savefig(fig, savename, savedir, **figpar[\"save\"])\n\n return fulldir, savename \n\n"
]
| [
[
"numpy.asarray",
"matplotlib.pyplot.close"
]
]
|
PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii | [
"2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e"
]
| [
"aguas_bajas/preprocessing/audio_to_spectro_image.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLoad data from pickle files and save images of spectrogram\nThe pipeline includes:\n - A low Butterworth pass filter\n - Spectrogram computation\n - A gaussian smoothing of the spectrogram\n - Nomalization of the spectrogram accoring to vmin, vmax values\n\n\n@author: jsulloa\n\"\"\"\nimport numpy as np\nimport pickle\nfrom maad import sound, util\nfrom preprocessing_utils import listdir_pattern\nfrom skimage import io\nfrom skimage.filters import gaussian\nimport os\n\n#%% settings\nfs = 192000\nopt_spec = {'wl': 4096, 'ovlp': 0.5, 'fcrop': [10,60000], 'db_range': 250}\nfpath = '/Volumes/PAPAYA/pkl_data/'\npath_save = '/Volumes/PAPAYA/pkl_data/'\nfmt = '.png'\ntlims = [00,24] # tiempo en horas\nwrite_wav = True\n\n#%%\n\nim_dict= dict()\n# load elements\nflist_dir = listdir_pattern(fpath, ends_with='pkl')\nfor fname_open in flist_dir:\n print('Processing file:', fname_open)\n pickle_in = open(fpath+fname_open,'rb')\n s_dict = pickle.load(pickle_in)\n flist = s_dict['flist']\n # filter flist\n idx_time = (flist.date_fmt.dt.hour >= tlims[0]) & (flist.date_fmt.dt.hour <= tlims[1])\n flist = flist.loc[idx_time,:]\n flist_days = flist.groupby(flist.date_fmt.dt.dayofyear)\n \n # iterate by day \n for day, flist_day in flist_days:\n date = flist_day.date_fmt.iloc[0].strftime('%y-%m-%d')\n print('Processing date: ', date)\n # concat audio into array\n s_sum = list()\n for index, row in flist_day.iterrows():\n s = s_dict[row.date]['s']\n s_sum.append(s)\n \n # crossfade and high pass filtering\n #s_sum = crossfade_list(s_sum, fs)\n #s_sum = butter_filter(s_sum,cutoff=200, fs=fs, order=2, ftype='high')\n s_sum = np.concatenate(s_sum, axis=0)\n \n # compute spectrogram\n im, dt, df, ext = sound.spectrogram(s_sum, fs, nperseg=opt_spec['wl'],\n overlap=opt_spec['ovlp'], flims=opt_spec['fcrop'])\n im = util.power2dB(im, 90) + 90\n # Apply gaussian smoothing\n im = gaussian(im, sigma=0.5, mode='reflect')\n \n # Normalize spectrogram according to sensor model \n vmin, vmax = 0, 66 # Audiomoth\n im[im<vmin] = vmin\n im[im>vmax] = vmax\n im = (im - im.min())/(im.max() - im.min())\n # save to file\n im = np.flip(im, axis=0)\n key = fname_open[0:-4]+'_'+date\n io.imsave(path_save+key+fmt, im)\n if write_wav:\n sound.write(path_save+key+'.wav', fs, s_sum, bit_depth=16)\n else:\n pass\n \n#%% Remove pkl files to free disk usage\nfor fname_open in flist_dir:\n print('Removing file:', fname_open)\n os.remove(fpath+fname_open)\n"
]
| [
[
"numpy.concatenate",
"numpy.flip"
]
]
|
Sowul/fc | [
"bc4f42a555a3db78fb733761bf9443108e88f32a"
]
| [
"tests/test_ga.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom collections import Counter\nfrom copy import deepcopy\nfrom filecmp import cmp\nfrom operator import attrgetter\nfrom os import remove\n\nimport numpy as np\nimport pytest\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom ga import GeneticAlgorithm\n\[email protected]\ndef ga():\n np.random.seed(10)\n iris = load_iris()\n clf = RandomForestClassifier(max_depth=3, random_state=2222)\n ga = GeneticAlgorithm(clf, cv=5, duration=0.5)\n ga.X = np.asarray(iris.data)\n ga.y = np.asarray(iris.target)\n ga.y = ga.y.reshape(ga.y.shape[0], )\n ga.n_features = np.random.random_integers(10)\n return ga\n\[email protected](scope='module')\ndef ga_fitted():\n np.random.seed(10)\n clf = RandomForestClassifier(max_depth=3, random_state=2222)\n ga = GeneticAlgorithm(clf, 5, duration=0.5)\n iris = load_iris()\n ga.fit(iris.data, iris.target)\n return ga\n\ndef test_ga_init():\n clf = RandomForestClassifier(max_depth=3, random_state=2222)\n with pytest.raises(ValueError):\n ga = GeneticAlgorithm(clf, 5)\n with pytest.raises(ValueError):\n ga = GeneticAlgorithm(clf, 5, duration='s')\n with pytest.raises(ValueError):\n ga = GeneticAlgorithm(clf, 5, max_iter=0.5)\n with pytest.raises(ValueError):\n ga = GeneticAlgorithm(clf, 5, 1, 1)\n with pytest.raises(ValueError):\n ga = GeneticAlgorithm(clf, 5, base_included='s')\n \ndef test_ga_create_individual(ga):\n print(ga._create_individual().shape)\n assert ga._create_individual().shape == (1, ga.n_features)\n\ndef test_ga_create_population(ga):\n ga.pop_members = 5\n assert ga._create_population().shape == (ga.pop_members, ga.n_features)\n\ndef test_ga_apply_function(ga):\n ga.pop_members = 5\n population = ga._create_population()\n z = np.zeros((ga.X.shape[0], ga.n_features), dtype=ga.X.dtype)\n for i, member in enumerate(population):\n for col, feature in enumerate(member):\n z[:, col] = ga._apply_function(i, col, feature)\n assert ga.X.shape[0] == z.shape[0]\n assert ga.n_features == z.shape[1]\n\ndef test_ga_transform(ga):\n ga.pop_members = 5\n population = ga._create_population()\n for j, member in enumerate(population):\n new_X = ga._transform(j, member)\n assert ga.X.shape[0] == new_X.shape[0]\n assert ga.X.shape[1]+ga.n_features == new_X.shape[1]\n\ndef test_ga_get_fitness(ga):\n ga.pop_members = 5\n population = ga._create_population()\n scores = []\n for j, member in enumerate(population):\n new_X = ga._transform(j, member)\n score = ga._get_fitness(ga.clf, new_X, ga.y)\n scores.append(score)\n assert all(elem <= 0 for elem in scores) == True\n\ndef test_ga_select_parents(ga):\n parents = ga._select_parents()\n assert parents.shape == ((ga.pop_members-ga.migrants-ga.elite) // 2, 2)\n assert np.all(np.logical_and(parents >= 0, parents < ga.pop_members)) == True\n\ndef test_ga_crossover(ga):\n population = ga._create_population()\n for j, member in enumerate(population):\n new_X = ga._transform(j, member)\n score = ga._get_fitness(ga.clf, new_X, ga.y)\n ga._individuals.append(ga._Individual(member,\n ga._columns[j], score))\n population = sorted(ga._individuals, key=attrgetter('score'), reverse=True)\n best_ind = deepcopy(population[0])\n parents = ga._select_parents()\n first_parent = np.reshape(population[parents[2][0]].transformations,\n (-1, ga.n_features))\n second_parent = np.reshape(population[parents[2][1]].transformations,\n (-1, ga.n_features))\n new_population = ga._crossover(parents, population)\n best_ind_after_cross = population[0]\n x = [first_parent[0][0], first_parent[0][1], second_parent[0][0], second_parent[0][1]]\n y = [new_population[4][0], new_population[4][1], new_population[5][0], new_population[5][1]]\n assert len(new_population) == ga.pop_members-ga.elite-ga.migrants\n assert np.array_equal(best_ind.transformations,\n best_ind_after_cross.transformations) == True\n assert Counter(x) == Counter(y)\n\n\ndef test_ga_mutate(ga):\n population = ga._create_population()\n pop_shape = population.shape\n pop_shape = (pop_shape[0] - ga.elite - ga.migrants, pop_shape[1])\n for j, member in enumerate(population):\n new_X = ga._transform(j, member)\n score = ga._get_fitness(ga.clf, new_X, ga.y)\n ga._individuals.append(ga._Individual(member,\n ga._columns[j], score))\n population = sorted(ga._individuals, key=attrgetter('score'), reverse=True)\n parents = ga._select_parents()\n new_population = ga._crossover(parents, population)\n mutated_population = ga._mutate(new_population,\n np.std([ind[0] for ind in population]))\n assert np.all(np.logical_and(mutated_population >= 0,\n mutated_population < ga.n_operators)) == True\n assert pop_shape == new_population.shape == mutated_population.shape\n\ndef test_ga_create_next_generation(ga):\n population = ga._create_population()\n for j, member in enumerate(population):\n new_X = ga._transform(j, member)\n score = ga._get_fitness(ga.clf, new_X, ga.y)\n ga._individuals.append(ga._Individual(member,\n ga._columns[j], score))\n new_population = ga._create_next_generation(ga._individuals)\n assert population.shape == new_population.shape\n\ndef test_ga_fit(ga_fitted):\n assert ga_fitted._base_score < ga_fitted._best_score.score\n\ndef test_ga_transform(ga_fitted):\n iris = load_iris()\n new_X = ga_fitted.transform(iris.data, ga_fitted._best_score)\n assert new_X.shape[0] == iris.data.shape[0]\n assert new_X.shape[1] == iris.data.shape[1] + ga_fitted.n_features\n\ndef test_ga_save_load(ga_fitted):\n ga_fitted.save('tests/ga_saved_ind.json')\n loaded_ind = ga_fitted.load('tests/ga_saved_ind.json')\n assert np.array_equal(ga_fitted._most_freq.transformations.tolist(), loaded_ind.transformations) == True\n assert np.array_equal([x.tolist() for x in ga_fitted._most_freq.columns], loaded_ind.columns) == True\n remove('tests/ga_saved_ind.json')\n ga_fitted.save('tests/ga_saved_ind.json', 'best')\n loaded_ind = ga_fitted.load('tests/ga_saved_ind.json')\n assert np.array_equal(ga_fitted._best_score.transformations.tolist(), loaded_ind.transformations) == True\n assert np.array_equal([x.tolist() for x in ga_fitted._best_score.columns], loaded_ind.columns) == True\n remove('tests/ga_saved_ind.json')\n ga_fitted.save('tests/ga_saved_ind.json', 'most_freq')\n loaded_ind = ga_fitted.load('tests/ga_saved_ind.json')\n assert np.array_equal(ga_fitted._most_freq.transformations.tolist(), loaded_ind.transformations) == True\n assert np.array_equal([x.tolist() for x in ga_fitted._most_freq.columns], loaded_ind.columns) == True\n remove('tests/ga_saved_ind.json')\n with pytest.raises(ValueError):\n ga_fitted.save(1)\n with pytest.raises(ValueError):\n loaded_ind = ga_fitted.load(1)"
]
| [
[
"numpy.random.random_integers",
"numpy.asarray",
"numpy.zeros",
"numpy.reshape",
"numpy.random.seed",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array_equal",
"numpy.logical_and",
"numpy.std",
"sklearn.datasets.load_iris"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.