code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return normalise(zeromean(zpad(bcrop(x, dsz, dimN), Nv), dsz), dimN + dimC)
def _Pcn_zm(x, dsz, Nv, dimN=2, dimC=1)
Projection onto dictionary update constraint set: support projection, mean subtraction, and normalisation. The result has the full spatial dimensions of the input. Parameters ---------- x : array_like Input array dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices Returns ------- y : ndarray Projection of input onto constraint set
12.108153
15.073215
0.803289
return normalise(bcrop(x, dsz, dimN), dimN + dimC)
def _Pcn_crp(x, dsz, Nv, dimN=2, dimC=1)
Projection onto dictionary update constraint set: support projection and normalisation. The result is cropped to the support of the largest filter in the dictionary. Parameters ---------- x : array_like Input array dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices Returns ------- y : ndarray Projection of input onto constraint set
14.653001
18.913567
0.774735
return normalise(zeromean(bcrop(x, dsz, dimN), dsz, dimN), dimN + dimC)
def _Pcn_zm_crp(x, dsz, Nv, dimN=2, dimC=1)
Projection onto dictionary update constraint set: support projection, mean subtraction, and normalisation. The result is cropped to the support of the largest filter in the dictionary. Parameters ---------- x : array_like Input array dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop`. Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices Returns ------- y : ndarray Projection of input onto constraint set
11.68358
16.422693
0.711429
return np.asarray((np.hstack([col for col in ntpl]), ntpl._fields, ntpl.__class__.__name__))
def ntpl2array(ntpl)
Convert a :func:`collections.namedtuple` object to a :class:`numpy.ndarray` object that can be saved using :func:`numpy.savez`. Parameters ---------- ntpl : collections.namedtuple object Named tuple object to be converted to ndarray Returns ------- arr : ndarray Array representation of input named tuple
12.805281
13.090806
0.978189
cls = collections.namedtuple(arr[2], arr[1]) return cls(*tuple(arr[0]))
def array2ntpl(arr)
Convert a :class:`numpy.ndarray` object constructed by :func:`ntpl2array` back to the original :func:`collections.namedtuple` representation. Parameters ---------- arr : ndarray Array representation of named tuple constructed by :func:`ntpl2array` Returns ------- ntpl : collections.namedtuple object Named tuple object with the same name and fields as the original named typle object provided to :func:`ntpl2array`
9.68559
9.549939
1.014204
if not lst: return None else: cls = collections.namedtuple(lst[0].__class__.__name__, lst[0]._fields) return cls(*[[lst[k][l] for k in range(len(lst))] for l in range(len(lst[0]))])
def transpose_ntpl_list(lst)
Transpose a list of named tuple objects (of the same type) into a named tuple of lists. Parameters ---------- lst : list of collections.namedtuple object List of named tuple objects of the same type Returns ------- ntpl : collections.namedtuple object Named tuple object with each entry consisting of a list of the corresponding fields of the named tuple objects in list ``lst``
3.302344
3.301109
1.000374
# Handle standard 2D (non-convolutional) dictionary if D.ndim == 2: D = D.reshape((sz + (D.shape[1],))) sz = None dsz = D.shape if D.ndim == 4: axisM = 3 szni = 3 else: axisM = 2 szni = 2 # Construct dictionary atom size vector if not provided if sz is None: sz = np.tile(np.array(dsz[0:2]).reshape([2, 1]), (1, D.shape[axisM])) else: sz = np.array(sum(tuple((x[0:2],) * x[szni] for x in sz), ())).T # Compute the maximum atom dimensions mxsz = np.amax(sz, 1) # Shift and scale values to [0, 1] D = D - D.min() D = D / D.max() # Construct tiled image N = dsz[axisM] Vr = int(np.floor(np.sqrt(N))) Vc = int(np.ceil(N / float(Vr))) if D.ndim == 4: im = np.ones((Vr*mxsz[0] + Vr - 1, Vc*mxsz[1] + Vc - 1, dsz[2])) else: im = np.ones((Vr*mxsz[0] + Vr - 1, Vc*mxsz[1] + Vc - 1)) k = 0 for l in range(0, Vr): for m in range(0, Vc): r = mxsz[0]*l + l c = mxsz[1]*m + m if D.ndim == 4: im[r:(r+sz[0, k]), c:(c+sz[1, k]), :] = D[0:sz[0, k], 0:sz[1, k], :, k] else: im[r:(r+sz[0, k]), c:(c+sz[1, k])] = D[0:sz[0, k], 0:sz[1, k], k] k = k + 1 if k >= N: break if k >= N: break return im
def tiledict(D, sz=None)
Construct an image allowing visualization of dictionary content. Parameters ---------- D : array_like Dictionary matrix/array. sz : tuple Size of each block in dictionary. Returns ------- im : ndarray Image tiled with dictionary entries.
2.613962
2.569242
1.017406
if wnm is None: wnm = tuple(np.array(x.shape) - np.array(wsz) + 1) else: over = np.clip(np.array(wsz) + np.array(wnm) - np.array(x.shape) - 1, 0, np.iinfo(int).max) if np.any(over > 0): psz = [(0, p) for p in over] x = np.pad(x, psz, mode=pad) outsz = wsz + wnm outstrd = x.strides + x.strides return np.lib.stride_tricks.as_strided(x, outsz, outstrd)
def rolling_window(x, wsz, wnm=None, pad='wrap')
Use :func:`numpy.lib.stride_tricks.as_strided` to construct a view of the input array that represents different positions of a rolling window as additional axes of the array. If the number of shifts requested is such that the window extends beyond the boundary of the input array, it is padded before the view is constructed. For example, if ``x`` is 4 x 5 array, the output of ``y = rolling_window(x, (3, 3))`` is a 3 x 3 x 2 x 3 array, with the first window position indexed as ``y[..., 0, 0]``. Parameters ---------- x : ndarray Input array wsz : tuple Window size wnm : tuple, optional (default None) Number of shifts of window on each axis. If None, the number of shifts is set so that the end sample in the array is also the end sample in the final window position. pad : string, optional (default 'wrap') A pad mode specification for :func:`numpy.pad` Returns ------- xw : ndarray An array of shape wsz + wnm representing all requested shifts of the window within the input array
2.881546
2.927196
0.984405
if np.any(np.greater_equal(step, x.shape)): raise ValueError('Step size must be less than array size on each axis') sbsz, dvmd = np.divmod(x.shape, step) if pad and np.any(dvmd): sbsz += np.clip(dvmd, 0, 1) psz = np.subtract(np.multiply(sbsz, step), x.shape) pdt = [(0, p) for p in psz] x = np.pad(x, pdt, mode=mode) outsz = step + tuple(sbsz) outstrd = x.strides + tuple(np.multiply(step, x.strides)) return np.lib.stride_tricks.as_strided(x, outsz, outstrd)
def subsample_array(x, step, pad=False, mode='reflect')
Use :func:`numpy.lib.stride_tricks.as_strided` to construct a view of the input array that represents a subsampling of the array by the specified step, with different offsets of the subsampling as additional axes of the array. If the input array shape is not evenly divisible by the subsampling step, it is padded before the view is constructed. For example, if ``x`` is 6 x 6 array, the output of ``y = subsample_array(x, (2, 2))`` is a 2 x 2 x 3 x 3 array, with the first subsampling offset indexed as ``y[0, 0]``. Parameters ---------- x : ndarray Input array step : tuple Subsampling step size pad : bool, optional (default False) Flag indicating whether the input array should be padded when its size is not integer divisible by the step size mode : string, optional (default 'reflect') A pad mode specification for :func:`numpy.pad` Returns ------- xs : ndarray An array representing different subsampling offsets in the input array
3.398026
3.386631
1.003365
# See http://stackoverflow.com/questions/16774148 and # sklearn.feature_extraction.image.extract_patches_2d if isinstance(img, tuple): img = np.stack(img, axis=-1) if stpsz is None: stpsz = (1,) * len(blksz) imgsz = img.shape # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a - b) / c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) # Calculate the strides for blocks blockstrides = tuple(a * b for a, b in zip_longest(img.strides, stpsz, fillvalue=1)) new_shape = blksz + numblocks new_strides = img.strides[:len(blksz)] + blockstrides blks = np.lib.stride_tricks.as_strided(img, new_shape, new_strides) return np.reshape(blks, blksz + (-1,))
def extractblocks(img, blksz, stpsz=None)
Extract blocks from an ndarray signal into an ndarray. Parameters ---------- img : ndarray or tuple of ndarrays nd array of images, or tuple of images blksz : tuple tuple of block sizes, blocks are taken starting from the first index of img stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks Returns ------- blks : ndarray image blocks
2.751114
2.678293
1.027189
blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c)+1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.zeros(imgsz, dtype=blks.dtype) normalizer = np.zeros(imgsz, dtype=blks.dtype) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c+b) for a, b, c in zip(pos, blksz, stpsz)) imgs[slices+pos[len(blksz):]] += blks[(Ellipsis, )+pos] normalizer[slices+pos[len(blksz):]] += blks.dtype.type(1) return np.where(normalizer > 0, (imgs/normalizer).astype(blks.dtype), np.nan)
def averageblocks(blks, imgsz, stpsz=None)
Average blocks together from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan
3.049489
2.973892
1.02542
# Construct a vectorized append function def listapp(x, y): x.append(y) veclistapp = np.vectorize(listapp, otypes=[np.object_]) blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.empty(imgsz, dtype=np.object_) imgs.fill([]) imgs = np.frompyfunc(list, 1, 1)(imgs) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c + b) for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1)) veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze()) return np.vectorize(fn, otypes=[blks.dtype])(imgs)
def combineblocks(blks, imgsz, stpsz=None, fn=np.median)
Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan
3.216399
3.176938
1.012421
w = sla.atleast_nd(rgb.ndim, np.array([0.299, 0.587, 0.144], dtype=rgb.dtype, ndmin=3)) return np.sum(w * rgb, axis=2)
def rgb2gray(rgb)
Convert an RGB image (or images) to grayscale. Parameters ---------- rgb : ndarray RGB image as Nr x Nc x 3 or Nr x Nc x 3 x K array Returns ------- gry : ndarray Grayscale image as Nr x Nc or Nr x Nc x K array
4.853773
5.015204
0.967812
return np.random.randn(*args) + 1j*np.random.randn(*args)
def complex_randn(*args)
Return a complex array of samples drawn from a standard normal distribution. Parameters ---------- d0, d1, ..., dn : int Dimensions of the random array Returns ------- a : ndarray Random array of shape (d0, d1, ..., dn)
3.615555
5.006513
0.72217
sn = s.copy() spm = np.random.uniform(-1.0, 1.0, s.shape) sn[spm < frc - 1.0] = smn sn[spm > 1.0 - frc] = smx return sn
def spnoise(s, frc, smn=0.0, smx=1.0)
Return image with salt & pepper noise imposed on it. Parameters ---------- s : ndarray Input image frc : float Desired fraction of pixels corrupted by noise smn : float, optional (default 0.0) Lower value for noise (pepper) smx : float, optional (default 1.0) Upper value for noise (salt) Returns ------- sn : ndarray Noisy image
2.890703
2.959657
0.976702
r msk = np.asarray(np.random.uniform(-1.0, 1.0, shp), dtype=dtype) msk[np.abs(msk) > frc] = 1.0 msk[np.abs(msk) < frc] = 0.0 return msk
def rndmask(shp, frc, dtype=None)
r"""Return random mask image with values in :math:`\{0,1\}`. Parameters ---------- s : tuple Mask array shape frc : float Desired fraction of zero pixels dtype : data-type or None, optional (default None) Data type of mask array Returns ------- msk : ndarray Mask image
2.721659
3.136463
0.867748
if centre: C = np.mean(U, axis=1, keepdims=True) U = U - C else: C = None B, S, _ = np.linalg.svd(U, full_matrices=False, compute_uv=True) return B, S**2, C
def pca(U, centre=False)
Compute the PCA basis for columns of input array `U`. Parameters ---------- U : array_like 2D data array with rows corresponding to different variables and columns corresponding to different observations center : bool, optional (default False) Flag indicating whether to centre data Returns ------- B : ndarray A 2D array representing the PCA basis; each column is a PCA component. B.T is the analysis transform into the PCA representation, and B is the corresponding synthesis transform S : ndarray The eigenvalues of the PCA components C : ndarray or None None if centering is disabled, otherwise the mean of the data matrix subtracted in performing the centering
2.475321
2.701774
0.916183
r grv = np.array([-1.0, 1.0]).reshape([2, 1]) gcv = np.array([-1.0, 1.0]).reshape([1, 2]) Gr = sla.fftn(grv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1)) Gc = sla.fftn(gcv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1)) A = 1.0 + lmbda*np.conj(Gr)*Gr + lmbda*np.conj(Gc)*Gc if s.ndim > 2: A = A[(slice(None),)*2 + (np.newaxis,)*(s.ndim-2)] sp = np.pad(s, ((npd, npd),)*2 + ((0, 0),)*(s.ndim-2), 'symmetric') slp = np.real(sla.ifftn(sla.fftn(sp, axes=(0, 1)) / A, axes=(0, 1))) sl = slp[npd:(slp.shape[0] - npd), npd:(slp.shape[1] - npd)] sh = s - sl return sl.astype(s.dtype), sh.astype(s.dtype)
def tikhonov_filter(s, lmbda, npd=16)
r"""Lowpass filter based on Tikhonov regularization. Lowpass filter image(s) and return low and high frequency components, consisting of the lowpass filtered image and its difference with the input image. The lowpass filter is equivalent to Tikhonov regularization with `lmbda` as the regularization parameter and a discrete gradient as the operator in the regularization term, i.e. the lowpass component is the solution to .. math:: \mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s} \right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;, where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the regularization parameter, and :math:`G_i` is an operator that computes the discrete gradient along image axis :math:`i`. Once the lowpass component :math:`\mathbf{x}` has been computed, the highpass component is just :math:`\mathbf{s} - \mathbf{x}`. Parameters ---------- s : array_like Input image or array of images. lmbda : float Regularization parameter controlling lowpass filtering. npd : int, optional (default=16) Number of samples to pad at image boundaries. Returns ------- sl : array_like Lowpass image or array of images. sh : array_like Highpass image or array of images.
2.41025
2.27226
1.060728
gfn = lambda x, sd: np.exp(-(x**2) / (2.0 * sd**2)) / \ (np.sqrt(2.0 * np.pi) *sd) gc = 1.0 if isinstance(shape, int): shape = (shape,) for k, n in enumerate(shape): x = np.linspace(-3.0, 3.0, n).reshape( (1,) * k + (n,) + (1,) * (len(shape) - k - 1)) gc = gc * gfn(x, sd) gc /= np.sum(gc) return gc
def gaussian(shape, sd=1.0)
Sample a multivariate Gaussian pdf, normalised to have unit sum. Parameters ---------- shape : tuple Shape of output array. sd : float, optional (default 1.0) Standard deviation of Gaussian pdf. Returns ------- gc : ndarray Sampled Gaussian pdf.
2.780455
2.762374
1.006545
# Construct region weighting filter N = 2 * n + 1 g = gaussian((N, N), sd=1.0) # Compute required image padding pd = ((n, n),) * 2 if s.ndim > 2: g = g[..., np.newaxis] pd += ((0, 0),) sp = np.pad(s, pd, mode='symmetric') # Compute local mean and subtract from image smn = np.roll(sla.fftconv(g, sp), (-n, -n), axis=(0, 1)) s1 = sp - smn # Compute local norm snrm = np.roll(np.sqrt(np.clip(sla.fftconv(g, s1**2), 0.0, np.inf)), (-n, -n), axis=(0, 1)) # Set c parameter if not specified if c is None: c = np.mean(snrm, axis=(0, 1), keepdims=True) # Divide mean-subtracted image by corrected local norm snrm = np.maximum(c, snrm) s2 = s1 / snrm # Return contrast normalised image and normalisation components return s2[n:-n, n:-n], smn[n:-n, n:-n], snrm[n:-n, n:-n]
def local_contrast_normalise(s, n=7, c=None)
Perform local contrast normalisation :cite:`jarret-2009-what` of an image, consisting of subtraction of the local mean and division by the local norm. The original image can be reconstructed from the contrast normalised image as (`snrm` * `scn`) + `smn`. Parameters ---------- s : array_like Input image or array of images. n : int, optional (default 7) The size of the local region used for normalisation is :math:`2n+1`. c : float, optional (default None) The smallest value that can be used in the divisive normalisation. If `None`, this value is set to the mean of the local region norms. Returns ------- scn : ndarray Contrast normalised image(s) smn : ndarray Additive normalisation correction snrm : ndarray Multiplicative normalisation correction
3.647639
3.207712
1.137147
if PY2: ncpu = mp.cpu_count() else: ncpu = os.cpu_count() idle = int(ncpu - np.floor(os.getloadavg()[0])) return max(mincpu, idle)
def idle_cpu_count(mincpu=1)
Estimate number of idle CPUs, for use by multiprocessing code needing to determine how many processes can be run without excessive load. This function uses :func:`os.getloadavg` which is only available under a Unix OS. Parameters ---------- mincpu : int Minimum number of CPUs to report, independent of actual estimate Returns ------- idle : int Estimate of number of idle CPUs
4.271119
4.04751
1.055246
if fmin: slct = np.argmin else: slct = np.argmax fprm = itertools.product(*grd) if platform.system() == 'Windows': fval = list(map(fn, fprm)) else: if nproc is None: nproc = mp.cpu_count() pool = mp.Pool(processes=nproc) fval = pool.map(fn, fprm) pool.close() pool.join() if isinstance(fval[0], (tuple, list, np.ndarray)): nfnv = len(fval[0]) fvmx = np.reshape(fval, [a.size for a in grd] + [nfnv,]) sidx = np.unravel_index(slct(fvmx.reshape((-1, nfnv)), axis=0), fvmx.shape[0:-1]) + (np.array((range(nfnv))),) sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))]) sfvl = tuple(fvmx[sidx]) else: fvmx = np.reshape(fval, [a.size for a in grd]) sidx = np.unravel_index(slct(fvmx), fvmx.shape) sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))]) sfvl = fvmx[sidx] return sprm, sfvl, fvmx, sidx
def grid_search(fn, grd, fmin=True, nproc=None)
Perform a grid search for optimal parameters of a specified function. In the simplest case the function returns a float value, and a single optimum value and corresponding parameter values are identified. If the function returns a tuple of values, each of these is taken to define a separate function on the search grid, with optimum function values and corresponding parameter values being identified for each of them. On all platforms except Windows (where ``mp.Pool`` usage has some limitations), the computation of the function at the grid points is computed in parallel. **Warning:** This function will hang if `fn` makes use of :mod:`pyfftw` with multi-threading enabled (the `bug <https://github.com/pyFFTW/pyFFTW/issues/135>`_ has been reported). When using the FFT functions in :mod:`sporco.linalg`, multi-threading can be disabled by including the following code:: import sporco.linalg sporco.linalg.pyfftw_threads = 1 Parameters ---------- fn : function Function to be evaluated. It should take a tuple of parameter values as an argument, and return a float value or a tuple of float values. grd : tuple of array_like A tuple providing an array of sample points for each axis of the grid on which the search is to be performed. fmin : bool, optional (default True) Determine whether optimal function values are selected as minima or maxima. If `fmin` is True then minima are selected. nproc : int or None, optional (default None) Number of processes to run in parallel. If None, the number of CPUs of the system is used. Returns ------- sprm : ndarray Optimal parameter values on each axis. If `fn` is multi-valued, `sprm` is a matrix with rows corresponding to parameter values and columns corresponding to function values. sfvl : float or ndarray Optimum function value or values fvmx : ndarray Function value(s) on search grid sidx : tuple of int or tuple of ndarray Indices of optimal values on parameter grid
2.30364
2.015587
1.142913
pth = os.path.join(os.path.dirname(__file__), 'data', 'convdict.npz') npz = np.load(pth) cdd = {} for k in list(npz.keys()): cdd[k] = npz[k] return cdd
def convdicts()
Access a set of example learned convolutional dictionaries. Returns ------- cdd : dict A dict associating description strings with dictionaries represented as ndarrays Examples -------- Print the dict keys to obtain the identifiers of the available dictionaries >>> from sporco import util >>> cd = util.convdicts() >>> print(cd.keys()) ['G:12x12x72', 'G:8x8x16,12x12x32,16x16x48', ...] Select a specific example dictionary using the corresponding identifier >>> D = cd['G:8x8x96']
2.786657
2.925139
0.952658
err = ValueError('maxtry parameter should be greater than zero') for ntry in range(maxtry): try: rspns = urlrequest.urlopen(url, timeout=timeout) cntnt = rspns.read() break except urlerror.URLError as e: err = e if not isinstance(e.reason, socket.timeout): raise else: raise err return io.BytesIO(cntnt)
def netgetdata(url, maxtry=3, timeout=10)
Get content of a file via a URL. Parameters ---------- url : string URL of the file to be downloaded maxtry : int, optional (default 3) Maximum number of download retries timeout : int, optional (default 10) Timeout in seconds for blocking operations Returns ------- str : io.BytesIO Buffered I/O stream Raises ------ urlerror.URLError (urllib2.URLError in Python 2, urllib.error.URLError in Python 3) If the file cannot be downloaded
3.603687
3.350626
1.075527
from contextlib import contextmanager @contextmanager def null_context_manager(): yield if in_notebook(): try: from wurlitzer import sys_pipes except ImportError: sys_pipes = null_context_manager else: sys_pipes = null_context_manager return sys_pipes
def notebook_system_output()
Get a context manager that attempts to use `wurlitzer <https://github.com/minrk/wurlitzer>`__ to capture system-level stdout/stderr within a Jupyter Notebook shell, without affecting normal operation when run as a Python script. For example: >>> sys_pipes = sporco.util.notebook_system_output() >>> with sys_pipes(): >>> command_producing_system_level_output() Returns ------- sys_pipes : context manager Context manager that handles output redirection when run within a Jupyter Notebook shell
3.45299
3.526779
0.979077
if scaled is None: scaled = self.scaled if dtype is None: if self.dtype is None: dtype = np.uint8 else: dtype = self.dtype if scaled and np.issubdtype(dtype, np.integer): dtype = np.float32 if zoom is None: zoom = self.zoom if gray is None: gray = self.gray if group is None: pth = os.path.join(self.bpth, fname) else: pth = os.path.join(self.bpth, group, fname) try: img = np.asarray(imageio.imread(pth), dtype=dtype) except IOError: raise IOError('Could not access image %s in group %s' % (fname, group)) if scaled: img /= 255.0 if idxexp is not None: img = img[idxexp] if zoom is not None: if img.ndim == 2: img = sni.zoom(img, zoom) else: img = sni.zoom(img, (zoom,)*2 + (1,)*(img.ndim-2)) if gray: img = rgb2gray(img) return img
def image(self, fname, group=None, scaled=None, dtype=None, idxexp=None, zoom=None, gray=None)
Get named image. Parameters ---------- fname : string Filename of image group : string or None, optional (default None) Name of image group scaled : bool or None, optional (default None) Flag indicating whether images should be on the range [0,...,255] with np.uint8 dtype (False), or on the range [0,...,1] with np.float32 dtype (True). If the value is None, scaling behaviour is determined by the `scaling` parameter passed to the object initializer, otherwise that selection is overridden. dtype : data-type or None, optional (default None) Desired data type of images. If `scaled` is True and `dtype` is an integer type, the output data type is np.float32. If the value is None, the data type is determined by the `dtype` parameter passed to the object initializer, otherwise that selection is overridden. idxexp : index expression or None, optional (default None) An index expression selecting, for example, a cropped region of the requested image. This selection is applied *before* any `zoom` rescaling so the expression does not need to be modified when the zoom factor is changed. zoom : float or None, optional (default None) Optional rescaling factor to apply to the images. If the value is None, support rescaling behaviour is determined by the `zoom` parameter passed to the object initializer, otherwise that selection is overridden. gray : bool or None, optional (default None) Flag indicating whether RGB images should be converted to grayscale. If the value is None, behaviour is determined by the `gray` parameter passed to the object initializer. Returns ------- img : ndarray Image array Raises ------ IOError If the image is not accessible
2.000536
1.957843
1.021806
# Default label is self.dfltlbl if labels is None: labels = self.dfltlbl # If label is not a list or tuple, create a singleton list # containing it if not isinstance(labels, (list, tuple)): labels = [labels,] # Iterate over specified label(s) t = timer() for lbl in labels: # On first call to start for a label, set its accumulator to zero if lbl not in self.td: self.td[lbl] = 0.0 self.t0[lbl] = None # Record the time at which start was called for this lbl if # it isn't already running if self.t0[lbl] is None: self.t0[lbl] = t
def start(self, labels=None)
Start specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be started. If it is ``None``, start the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`.
4.133458
3.557038
1.162051
# Get current time t = timer() # Default label is self.dfltlbl if labels is None: labels = self.dfltlbl # All timers are affected if label is equal to self.alllbl, # otherwise only the timer(s) specified by label if labels == self.alllbl: labels = self.t0.keys() elif not isinstance(labels, (list, tuple)): labels = [labels,] # Iterate over specified label(s) for lbl in labels: if lbl not in self.t0: raise KeyError('Unrecognized timer key %s' % lbl) # If self.t0[lbl] is None, the corresponding timer is # already stopped, so no action is required if self.t0[lbl] is not None: # Increment time accumulator from the elapsed time # since most recent start call self.td[lbl] += t - self.t0[lbl] # Set start time to None to indicate timer is not running self.t0[lbl] = None
def stop(self, labels=None)
Stop specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be stopped. If it is ``None``, stop the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`. If it is equal to the string specified by the ``alllbl`` parameter of :meth:`__init__`, stop all timers.
3.899348
3.325393
1.172597
# Default label is self.dfltlbl if labels is None: labels = self.dfltlbl # All timers are affected if label is equal to self.alllbl, # otherwise only the timer(s) specified by label if labels == self.alllbl: labels = self.t0.keys() elif not isinstance(labels, (list, tuple)): labels = [labels,] # Iterate over specified label(s) for lbl in labels: if lbl not in self.t0: raise KeyError('Unrecognized timer key %s' % lbl) # Set start time to None to indicate timer is not running self.t0[lbl] = None # Set time accumulator to zero self.td[lbl] = 0.0
def reset(self, labels=None)
Reset specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be stopped. If it is ``None``, stop the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`. If it is equal to the string specified by the ``alllbl`` parameter of :meth:`__init__`, stop all timers.
4.111886
3.237681
1.27001
# Get current time t = timer() # Default label is self.dfltlbl if label is None: label = self.dfltlbl # Return 0.0 if default timer selected and it is not initialised if label not in self.t0: return 0.0 # Raise exception if timer with specified label does not exist if label not in self.t0: raise KeyError('Unrecognized timer key %s' % label) # If total flag is True return sum of accumulated time from # previous start/stop calls and current start call, otherwise # return just the time since the current start call te = 0.0 if self.t0[label] is not None: te = t - self.t0[label] if total: te += self.td[label] return te
def elapsed(self, label=None, total=True)
Get elapsed time since timer start. Parameters ---------- label : string, optional (default None) Specify the label of the timer for which the elapsed time is required. If it is ``None``, the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__` is selected. total : bool, optional (default True) If ``True`` return the total elapsed time since the first call of :meth:`start` for the selected timer, otherwise return the elapsed time since the most recent call of :meth:`start` for which there has not been a corresponding call to :meth:`stop`. Returns ------- dlt : float Elapsed time
4.91659
4.122781
1.192542
return self.timer.elapsed(self.label, total=total)
def elapsed(self, total=True)
Return the elapsed time for the timer. Parameters ---------- total : bool, optional (default True) If ``True`` return the total elapsed time since the first call of :meth:`start` for the selected timer, otherwise return the elapsed time since the most recent call of :meth:`start` for which there has not been a corresponding call to :meth:`stop`. Returns ------- dlt : float Elapsed time
13.011929
19.425373
0.669842
def press(event): if event.key == 'q': plt.close(fig) elif event.key == 'e': fig.set_size_inches(scaling * fig.get_size_inches(), forward=True) elif event.key == 'c': fig.set_size_inches(fig.get_size_inches() / scaling, forward=True) # Avoid multiple event handlers attached to the same figure if not hasattr(fig, '_sporco_keypress_cid'): cid = fig.canvas.mpl_connect('key_press_event', press) fig._sporco_keypress_cid = cid return press
def attach_keypress(fig, scaling=1.1)
Attach a key press event handler that configures keys for closing a figure and changing the figure size. Keys 'e' and 'c' respectively expand and contract the figure, and key 'q' closes it. **Note:** Resizing may not function correctly with all matplotlib backends (a `bug <https://github.com/matplotlib/matplotlib/issues/10083>`__ has been reported). Parameters ---------- fig : :class:`matplotlib.figure.Figure` object Figure to which event handling is to be attached scaling : float, optional (default 1.1) Scaling factor for figure size changes Returns ------- press : function Key press event handler function
2.596926
2.350608
1.104789
# See https://stackoverflow.com/questions/11551049 def zoom(event): # Get the current x and y limits cur_xlim = ax.get_xlim() cur_ylim = ax.get_ylim() # Get event location xdata = event.xdata ydata = event.ydata # Return if cursor is not over valid region of plot if xdata is None or ydata is None: return if event.button == 'up': # Deal with zoom in scale_factor = 1.0 / scaling elif event.button == 'down': # Deal with zoom out scale_factor = scaling # Get distance from the cursor to the edge of the figure frame x_left = xdata - cur_xlim[0] x_right = cur_xlim[1] - xdata y_top = ydata - cur_ylim[0] y_bottom = cur_ylim[1] - ydata # Calculate new x and y limits new_xlim = (xdata - x_left * scale_factor, xdata + x_right * scale_factor) new_ylim = (ydata - y_top * scale_factor, ydata + y_bottom * scale_factor) # Ensure that x limit range is no larger than that of the reference if np.diff(new_xlim) > np.diff(zoom.xlim_ref): new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim) # Ensure that lower x limit is not less than that of the reference if new_xlim[0] < zoom.xlim_ref[0]: new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0]) # Ensure that upper x limit is not greater than that of the reference if new_xlim[1] > zoom.xlim_ref[1]: new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1]) # Ensure that ylim tuple has the smallest value first if zoom.ylim_ref[1] < zoom.ylim_ref[0]: ylim_ref = zoom.ylim_ref[::-1] new_ylim = new_ylim[::-1] else: ylim_ref = zoom.ylim_ref # Ensure that y limit range is no larger than that of the reference if np.diff(new_ylim) > np.diff(ylim_ref): new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim) # Ensure that lower y limit is not less than that of the reference if new_ylim[0] < ylim_ref[0]: new_ylim += np.array(ylim_ref[0] - new_ylim[0]) # Ensure that upper y limit is not greater than that of the reference if new_ylim[1] > ylim_ref[1]: new_ylim -= np.array(new_ylim[1] - ylim_ref[1]) # Return the ylim tuple to its original order if zoom.ylim_ref[1] < zoom.ylim_ref[0]: new_ylim = new_ylim[::-1] # Set new x and y limits ax.set_xlim(new_xlim) ax.set_ylim(new_ylim) # Force redraw ax.figure.canvas.draw() # Record reference x and y limits prior to any zooming zoom.xlim_ref = ax.get_xlim() zoom.ylim_ref = ax.get_ylim() # Get figure for specified axes and attach the event handler fig = ax.get_figure() fig.canvas.mpl_connect('scroll_event', zoom) return zoom
def attach_zoom(ax, scaling=2.0)
Attach an event handler that supports zooming within a plot using the mouse scroll wheel. Parameters ---------- ax : :class:`matplotlib.axes.Axes` object Axes to which event handling is to be attached scaling : float, optional (default 2.0) Scaling factor for zooming in and out Returns ------- zoom : function Mouse scroll wheel event handler function
1.843675
1.839449
1.002297
# Extract kwargs entries that are not related to line properties fgsz = kwargs.pop('fgsz', None) fgnm = kwargs.pop('fgnm', None) fig = kwargs.pop('fig', None) ax = kwargs.pop('ax', None) figp = fig if fig is None: fig = plt.figure(num=fgnm, figsize=fgsz) fig.clf() ax = fig.gca() elif ax is None: ax = fig.gca() # Set defaults for line width and marker size if 'lw' not in kwargs and 'linewidth' not in kwargs: kwargs['lw'] = 1.5 if 'ms' not in kwargs and 'markersize' not in kwargs: kwargs['ms'] = 6.0 if ptyp not in ('plot', 'semilogx', 'semilogy', 'loglog'): raise ValueError("Invalid plot type '%s'" % ptyp) pltmth = getattr(ax, ptyp) if x is None: pltln = pltmth(y, **kwargs) else: pltln = pltmth(x, y, **kwargs) ax.fmt_xdata = lambda x: "{: .2f}".format(x) ax.fmt_ydata = lambda x: "{: .2f}".format(x) if title is not None: ax.set_title(title) if xlbl is not None: ax.set_xlabel(xlbl) if ylbl is not None: ax.set_ylabel(ylbl) if lgnd is not None: ax.legend(lgnd, loc=lglc) attach_keypress(fig) attach_zoom(ax) if have_mpldc: mpldc.datacursor(pltln) if figp is None: fig.show() return fig, ax
def plot(y, x=None, ptyp='plot', xlbl=None, ylbl=None, title=None, lgnd=None, lglc=None, **kwargs)
Plot points or lines in 2D. If a figure object is specified then the plot is drawn in that figure, and ``fig.show()`` is not called. The figure is closed on key entry 'q'. Parameters ---------- y : array_like 1d or 2d array of data to plot. If a 2d array, each column is plotted as a separate curve. x : array_like, optional (default None) Values for x-axis of the plot ptyp : string, optional (default 'plot') Plot type specification (options are 'plot', 'semilogx', 'semilogy', and 'loglog') xlbl : string, optional (default None) Label for x-axis ylbl : string, optional (default None) Label for y-axis title : string, optional (default None) Figure title lgnd : list of strings, optional (default None) List of legend string lglc : string, optional (default None) Legend location string **kwargs : :class:`matplotlib.lines.Line2D` properties or figure \ properties, optional Keyword arguments specifying :class:`matplotlib.lines.Line2D` properties, e.g. ``lw=2.0`` sets a line width of 2, or properties of the figure and axes. If not specified, the defaults for line width (``lw``) and marker size (``ms``) are 1.5 and 6.0 respectively. The valid figure and axes keyword arguments are listed below: .. |mplfg| replace:: :class:`matplotlib.figure.Figure` object .. |mplax| replace:: :class:`matplotlib.axes.Axes` object .. rst-class:: kwargs ===== ==================== ====================================== kwarg Accepts Description ===== ==================== ====================================== fgsz tuple (width,height) Specify figure dimensions in inches fgnm integer Figure number of figure fig |mplfg| Draw in specified figure instead of creating one ax |mplax| Plot in specified axes instead of current axes of figure ===== ==================== ====================================== Returns ------- fig : :class:`matplotlib.figure.Figure` object Figure object for this figure ax : :class:`matplotlib.axes.Axes` object Axes object for this plot
2.451965
2.093039
1.171485
# Check whether running within a notebook shell and have # not already monkey patched the plot function from sporco.util import in_notebook module = sys.modules[__name__] if in_notebook() and module.plot.__name__ == 'plot': # Set inline backend (i.e. %matplotlib inline) if in a notebook shell set_notebook_plot_backend() # Replace plot function with a wrapper function that discards # its return value (within a notebook with inline plotting, plots # are duplicated if the return value from the original function is # not assigned to a variable) plot_original = module.plot def plot_wrap(*args, **kwargs): plot_original(*args, **kwargs) module.plot = plot_wrap # Replace surf function with a wrapper function that discards # its return value (see comment for plot function) surf_original = module.surf def surf_wrap(*args, **kwargs): surf_original(*args, **kwargs) module.surf = surf_wrap # Replace contour function with a wrapper function that discards # its return value (see comment for plot function) contour_original = module.contour def contour_wrap(*args, **kwargs): contour_original(*args, **kwargs) module.contour = contour_wrap # Replace imview function with a wrapper function that discards # its return value (see comment for plot function) imview_original = module.imview def imview_wrap(*args, **kwargs): imview_original(*args, **kwargs) module.imview = imview_wrap # Disable figure show method (results in a warning if used within # a notebook with inline plotting) import matplotlib.figure def show_disable(self): pass matplotlib.figure.Figure.show = show_disable
def config_notebook_plotting()
Configure plotting functions for inline plotting within a Jupyter Notebook shell. This function has no effect when not within a notebook shell, and may therefore be used within a normal python script.
3.036679
2.92734
1.037351
r self.YU[:] = self.Y - self.U Zf = sl.rfftn(self.YU, None, self.cri.axisN) ZfQ = sl.dot(self.Q.T, Zf, axis=self.cri.axisC) b = self.DSfBQ + self.rho * ZfQ Xh = sl.solvedbi_sm(self.gDf, self.rho, b, self.c, axis=self.cri.axisM) self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * sl.inner(self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = DDXfBB + self.rho * self.Xf b = sl.dot(self.B.T, self.DSf, axis=self.cri.axisC) + \ self.rho * Zf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
4.69206
4.57239
1.026173
r DXBf = sl.dot(self.B, sl.inner(self.Df, self.obfn_fvarf(), axis=self.cri.axisM), axis=self.cri.axisC) Ef = DXBf - self.Sf return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
def obfn_dfd(self)
r"""Compute data fidelity term :math:`(1/2) \| D X B - S \|_2^2`.
11.986667
10.682708
1.122063
if self.opt['HighMemSolve']: self.c = sl.solvedbi_sm_c(self.gDf, np.conj(self.gDf), self.rho, self.cri.axisM)
def rhochange(self)
Updated cached c array when rho changes.
31.662016
26.189039
1.20898
r self.Y = sp.prox_l1l2(self.AX + self.U, (self.lmbda/self.rho)*self.wl1, (self.mu/self.rho), axis=self.cri.axisC) cbpdn.GenericConvBPDN.ystep(self)
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
15.662859
13.63026
1.149124
r rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1) rl21 = np.sum(np.sqrt(np.sum(self.obfn_gvar()**2, axis=self.cri.axisC))) return (self.lmbda*rl1 + self.mu*rl21, rl1, rl21)
def obfn_reg(self)
r"""Compute regularisation terms and contribution to objective function. Regularisation terms are :math:`\| Y \|_1` and :math:`\| Y \|_{2,1}`.
7.465122
6.273872
1.189875
r # This method is overridden because we have to change the # mechanism for combining the Y0 and Y1 blocks into a single # array (see comment in the __init__ method). shp = Y.shape[0:self.cri.axisC] + self.y0shp[self.cri.axisC:] return Y[(slice(None),)*self.cri.axisC + (slice(0, self.y0I),)].reshape(shp)
def block_sep0(self, Y)
r"""Separate variable into component corresponding to :math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`.
9.018533
8.907161
1.012504
r # This method is overridden because we have to change the # mechanism for combining the Y0 and Y1 blocks into a single # array (see comment in the __init__ method). shp = Y.shape[0:self.cri.axisC] + self.y1shp[self.cri.axisC:] return Y[(slice(None),)*self.cri.axisC + (slice(self.y0I, None),)].reshape(shp)
def block_sep1(self, Y)
r"""Separate variable into component corresponding to :math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`.
9.175878
9.18918
0.998552
r # This method is overridden because we have to change the # mechanism for combining the Y0 and Y1 blocks into a single # array (see comment in the __init__ method). y0shp = Y0.shape[0:self.cri.axisC] + (-1,) y1shp = Y1.shape[0:self.cri.axisC] + (-1,) return np.concatenate((Y0.reshape(y0shp), Y1.reshape(y1shp)), axis=self.cri.axisC)
def block_cat(self, Y0, Y1)
r"""Concatenate components corresponding to :math:`\mathbf{y}_0` and :math:`\mathbf{y}_1` to form :math:`\mathbf{y}\;\;`.
4.557587
4.693458
0.971051
r # This calculation involves non-negligible computational cost. It # should be possible to disable relevant diagnostic information # (dual residual) to avoid this cost. Y0f = sl.rfftn(Y0, None, self.cri.axisN) return sl.irfftn( sl.dot(self.B.T, np.conj(self.Df) * Y0f, axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
def cnst_A0T(self, Y0)
r"""Compute :math:`A_0^T \mathbf{y}_0` component of :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
10.81582
10.418108
1.038175
if D is not None: self.D = np.asarray(D, dtype=self.dtype) if B is not None: self.B = np.asarray(B, dtype=self.dtype) if B is not None or not hasattr(self, 'Gamma'): self.Gamma, self.Q = np.linalg.eigh(self.B.T.dot(self.B)) self.Gamma = np.abs(self.Gamma) if D is not None or not hasattr(self, 'Df'): self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN) # Fold square root of Gamma into the dictionary array to enable # use of the solvedbi_sm solver shpg = [1] * len(self.cri.shpD) shpg[self.cri.axisC] = self.Gamma.shape[0] Gamma2 = np.sqrt(self.Gamma).reshape(shpg) self.gDf = Gamma2 * self.Df if self.opt['HighMemSolve']: self.c = sl.solvedbd_sm_c( self.gDf, np.conj(self.gDf), (self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM) else: self.c = None
def setdict(self, D=None, B=None)
Set dictionary array.
4.879218
4.762639
1.024478
r self.YU[:] = self.Y - self.U self.block_sep0(self.YU)[:] += self.S Zf = sl.rfftn(self.YU, None, self.cri.axisN) Z0f = self.block_sep0(Zf) Z1f = self.block_sep1(Zf) DZ0f = np.conj(self.Df) * Z0f DZ0fBQ = sl.dot(self.B.dot(self.Q).T, DZ0f, axis=self.cri.axisC) Z1fQ = sl.dot(self.Q.T, Z1f, axis=self.cri.axisC) b = DZ0fBQ + Z1fQ Xh = sl.solvedbd_sm(self.gDf, (self.mu / self.rho) * self.GHGf + 1.0, b, self.c, axis=self.cri.axisM) self.Xf[:] = sl.dot(self.Q, Xh, axis=self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: DDXf = np.conj(self.Df) * sl.inner(self.Df, self.Xf, axis=self.cri.axisM) DDXfBB = sl.dot(self.B.T.dot(self.B), DDXf, axis=self.cri.axisC) ax = self.rho * (DDXfBB + self.Xf) + \ self.mu * self.GHGf * self.Xf b = self.rho * (sl.dot(self.B.T, DZ0f, axis=self.cri.axisC) + Z1f) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
4.23496
4.089402
1.035594
return self.rho * self.cnst_AT(self.U)
def rsdl_s(self, Yprev, Y)
Compute dual residual vector.
36.47485
32.174427
1.13366
r AXU = self.AX + self.U Y0 = sp.prox_l1(self.block_sep0(AXU) - self.S, (1.0/self.rho)*self.W) Y1 = sp.prox_l1l2(self.block_sep1(AXU), 0.0, (self.lmbda/self.rho)*self.wl21, axis=self.cri.axisC) self.Y = self.block_cat(Y0, Y1) cbpdn.ConvTwoBlockCnstrnt.ystep(self)
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
9.368464
8.559682
1.094487
r rl21 = np.sum(self.wl21 * np.sqrt(np.sum(self.obfn_gvar()**2, axis=self.cri.axisC))) rgr = sl.rfl2norm2(np.sqrt(self.GHGf*np.conj(fvf)*fvf), self.cri.Nv, self.cri.axisN)/2.0 return (self.lmbda*rl21 + self.mu*rgr, rl21, rgr)
def obfn_reg(self)
r"""Compute regularisation terms and contribution to objective function. Regularisation terms are :math:`\| Y \|_1` and :math:`\| Y \|_{2,1}`.
11.696532
10.680131
1.095167
# Use dimK specified in __init__ as default if dimK is None and self.dimK is not None: dimK = self.dimK # Start solve timer self.timer.start(['solve', 'solve_wo_eval']) # Solve CSC problem on S and do dictionary step self.init_vars(S, dimK) self.xstep(S, self.lmbda, dimK) self.dstep() # Stop solve timer self.timer.stop('solve_wo_eval') # Extract and record iteration stats self.manage_itstat() # Increment iteration count self.j += 1 # Stop solve timer self.timer.stop('solve') # Return current dictionary return self.getdict()
def solve(self, S, dimK=None)
Compute sparse coding and dictionary update for training data `S`.
5.795698
5.596069
1.035673
Nv = S.shape[0:self.dimN] if self.cri is None or Nv != self.cri.Nv: self.cri = cr.CDU_ConvRepIndexing(self.dsz, S, dimK, self.dimN) if self.opt['CUDA_CBPDN']: if self.cri.Cd > 1 or self.cri.Cx > 1: raise ValueError('CUDA CBPDN solver can only be used for ' 'single channel problems') if self.cri.K > 1: raise ValueError('CUDA CBPDN solver can not be used with ' 'mini-batches') self.Df = sl.pyfftw_byte_aligned(sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)) self.Gf = sl.pyfftw_empty_aligned(self.Df.shape, self.Df.dtype) self.Z = sl.pyfftw_empty_aligned(self.cri.shpX, self.dtype) else: self.Df[:] = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
def init_vars(self, S, dimK)
Initalise variables required for sparse coding and dictionary update for training data `S`.
5.069573
5.072639
0.999395
# Extract and record iteration stats itst = self.iteration_stats() self.itstat.append(itst) self.display_status(self.fmtstr, itst)
def manage_itstat(self)
Compute, record, and display iteration statistics.
9.229807
7.60161
1.214191
hdrmap = {'Itn': 'Iter', 'X r': 'PrimalRsdl', 'X s': 'DualRsdl', u('X ρ'): 'Rho', 'D cnstr': 'Cnstr', 'D dlt': 'DeltaD', u('D η'): 'Eta'} return hdrmap
def hdrval(cls)
Construct dictionary mapping display column title to IterationStats entries.
14.550349
11.770818
1.236138
tk = self.timer.elapsed(self.opt['IterTimer']) if self.xstep_itstat is None: objfn = (0.0,) * 3 rsdl = (0.0,) * 2 rho = (0.0,) else: objfn = (self.xstep_itstat.ObjFun, self.xstep_itstat.DFid, self.xstep_itstat.RegL1) rsdl = (self.xstep_itstat.PrimalRsdl, self.xstep_itstat.DualRsdl) rho = (self.xstep_itstat.Rho,) cnstr = np.linalg.norm(cr.zpad(self.D, self.cri.Nv) - self.G) dltd = np.linalg.norm(self.D - self.Dprv) tpl = (self.j,) + objfn + rsdl + rho + (cnstr, dltd, self.eta) + \ self.itstat_extra() + (tk,) return type(self).IterationStats(*tpl)
def iteration_stats(self)
Construct iteration stats record tuple.
5.403981
4.992195
1.082486
if self.opt['Verbose']: hdrtxt = type(self).hdrtxt() # Call utility function to construct status display formatting self.hdrstr, self.fmtstr, self.nsep = common.solve_status_str( hdrtxt, fwdth0=type(self).fwiter, fprec=type(self).fpothr) else: self.hdrstr, self.fmtstr, self.nsep = '', '', 0
def display_config(self)
Set up status display if option selected. NB: this method assumes that the first entry is the iteration count and the last is the rho value.
14.331241
11.249727
1.273919
if self.opt['Verbose'] and self.opt['StatusHeader']: print(self.hdrstr) print("-" * self.nsep)
def display_start(self)
Start status display if option selected.
17.464136
11.215001
1.557212
# Use dimK specified in __init__ as default if dimK is None and self.dimK is not None: dimK = self.dimK # Start solve timer self.timer.start(['solve', 'solve_wo_eval']) # Solve CSC problem on S and do dictionary step self.init_vars(S, dimK) if W is None: W = np.array([1.0], dtype=self.dtype) W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)), dtype=self.dtype) self.xstep(S, W, self.lmbda, dimK) self.dstep(W) # Stop solve timer self.timer.stop('solve_wo_eval') # Extract and record iteration stats self.manage_itstat() # Increment iteration count self.j += 1 # Stop solve timer self.timer.stop('solve') # Return current dictionary return self.getdict()
def solve(self, S, W=None, dimK=None)
Compute sparse coding and dictionary update for training data `S`.
5.689454
5.546233
1.025823
if self.opt['CUDA_CBPDN']: Z = cuda.cbpdnmsk(self.D.squeeze(), S[..., 0], W.squeeze(), lmbda, self.opt['CBPDN']) Z = Z.reshape(self.cri.Nv + (1, 1, self.cri.M,)) self.Z[:] = np.asarray(Z, dtype=self.dtype) self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN) self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv, self.cri.axisN) self.xstep_itstat = None else: # Create X update object (external representation is expected!) xstep = cbpdn.ConvBPDNMaskDcpl(self.D.squeeze(), S, lmbda, W, self.opt['CBPDN'], dimK=dimK, dimN=self.cri.dimN) xstep.solve() self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv, self.cri.axisN) self.setcoef(xstep.getcoef()) self.xstep_itstat = xstep.itstat[-1] if xstep.itstat else None
def xstep(self, S, W, lmbda, dimK)
Solve CSC problem for training data `S`.
4.503478
4.432865
1.01593
# Compute residual X D - S in frequency domain Ryf = sl.inner(self.Zf, self.Df, axis=self.cri.axisM) - self.Sf # Transform to spatial domain, apply mask, and transform back to # frequency domain Ryf[:] = sl.rfftn(W * sl.irfftn(Ryf, self.cri.Nv, self.cri.axisN), None, self.cri.axisN) # Compute gradient gradf = sl.inner(np.conj(self.Zf), Ryf, axis=self.cri.axisK) # If multiple channel signal, single channel dictionary if self.cri.C > 1 and self.cri.Cd == 1: gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True) # Update gradient step self.eta = self.eta_a / (self.j + self.eta_b) # Compute gradient descent self.Gf[:] = self.Df - self.eta * gradf self.G = sl.irfftn(self.Gf, self.cri.Nv, self.cri.axisN) # Eval proximal operator self.Dprv[:] = self.D self.D[:] = self.Pcn(self.G)
def dstep(self, W)
Compute dictionary update for training data of preceding :meth:`xstep`.
5.284033
5.142134
1.027595
akey = list(a.keys()) # Iterate over all keys in b for key in list(b.keys()): # If a key is encountered that is not in a, raise an # UnknownKeyError exception. if key not in akey: raise UnknownKeyError(pth + (key,)) else: # If corresponding values in a and b for the same key # are both dicts, recursively call this method for # those values. If the value in a is a dict and the # value in b is not, raise an InvalidValueError # exception. if isinstance(a[key], dict): if isinstance(b[key], dict): keycmp(a[key], b[key], pth + (key,)) else: raise InvalidValueError(pth + (key,))
def keycmp(a, b, pth=())
Recurse down the tree of nested dicts `b`, at each level checking that it does not have any keys that are not also at the same level in `a`. The key path is recorded in `pth`. If an unknown key is encountered in `b`, an `UnknownKeyError` exception is raised. If a non-dict value is encountered in `b` for which the corresponding value in `a` is a dict, an `InvalidValueError` exception is raised.
2.920736
2.286039
1.277641
# Call __setitem__ for all keys in d for key in list(d.keys()): self.__setitem__(key, d[key])
def update(self, d)
Update the dict with the dict tree in parameter d. Parameters ---------- d : dict New dict content
4.110065
4.413294
0.931292
# This test necessary to avoid unpickling errors in Python 3 if hasattr(self, 'dflt'): # Get corresponding node to self, as determined by pth # attribute, of the defaults dict tree a = self.__class__.getnode(self.dflt, self.pth) # Raise UnknownKeyError exception if key not in corresponding # node of defaults tree if key not in a: raise UnknownKeyError(self.pth + (key,)) # Raise InvalidValueError if the key value in the defaults # tree is a dict and the value parameter is not a dict and elif isinstance(a[key], dict) and not isinstance(value, dict): raise InvalidValueError(self.pth + (key,))
def check(self, key, value)
Check whether key,value pair is allowed. The key is allowed if there is a corresponding key in the defaults class attribute dict. The value is not allowed if it is a dict in the defaults dict and not a dict in value. Parameters ---------- key : str or tuple of str Dict key value : any Dict value corresponding to key
7.883231
6.926761
1.138083
c = d for key in pth[:-1]: if not isinstance(c, dict): raise InvalidValueError(c) elif key not in c: raise UnknownKeyError(pth) else: c = c.__getitem__(key) return c
def getparent(d, pth)
Get the parent node of a subdict as specified by the key path in `pth`. Parameters ---------- d : dict Dict tree in which access is required pth : str or tuple of str Dict key
4.569057
5.528967
0.826385
r global mp_X global mp_DX YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN) YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] - 1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN) if mp_Cd == 1: b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b, mp_cache[i], axis=mp_axisM) else: b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f, axis=mp_C) + mp_alpha**2*YU1f Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b, mp_axisM, mp_axisC) mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv, mp_axisN) mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf, mp_axisM), mp_Nv, mp_axisN)
def par_xstep(i)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing :math:`\mathbf{x}`. Parameters ---------- i : int Index of grouping to update
3.040827
3.09729
0.98177
global mp_X global mp_Xnr global mp_DX global mp_DXnr mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]] mp_DXnr[i] = mp_DX[i] if mp_rlx != 1.0: grpind = slice(mp_grp[i], mp_grp[i+1]) mp_X[grpind] = mp_rlx * mp_X[grpind] + (1-mp_rlx)*mp_Y1[grpind] mp_DX[i] = mp_rlx*mp_DX[i] + (1-mp_rlx)*mp_Y0[i]
def par_relax_AX(i)
Parallel implementation of relaxation if option ``RelaxParam`` != 1.0.
2.93699
2.84496
1.032349
r global mp_b mp_b[:] = mp_inv_off_diag * np.sum((mp_S + mp_rho*(mp_DX+mp_U0)), axis=mp_axisM, keepdims=True)
def y0astep()
r"""The serial component of the step to minimise the augmented Lagrangian with respect to :math:`\mathbf{y}_0`.
24.704931
24.603031
1.004142
r global mp_Y0 mp_Y0[i] = 1/mp_rho*mp_S + mp_DX[i] + mp_U0[i] + mp_b
def par_y0bstep(i)
r"""The parallel component of the step to minimise the augmented Lagrangian with respect to :math:`\mathbf{y}_0`. Parameters ---------- i : int Index of grouping to update
14.070125
12.865015
1.093673
r global mp_Y1 grpind = slice(mp_grp[i], mp_grp[i+1]) XU1 = mp_X[grpind] + 1/mp_alpha*mp_U1[grpind] if mp_wl1.shape[mp_axisM] is 1: gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1 else: gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1[grpind] Y1 = sp.prox_l1(XU1, gamma) if mp_NonNegCoef: Y1[Y1 < 0.0] = 0.0 if mp_NoBndryCross: for n in range(len(mp_Nv)): Y1[(slice(None),) + (slice(None),)*n + (slice(1-mp_Dshp[n], None),)] = 0.0 mp_Y1[mp_grp[i]:mp_grp[i+1]] = Y1
def par_y1step(i)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}_{1,G_i}`, one of the disjoint problems of optimizing :math:`\mathbf{y}_1`. Parameters ---------- i : int Index of grouping to update
4.943869
4.758606
1.038932
r global mp_U1 grpind = slice(mp_grp[i], mp_grp[i+1]) mp_U1[grpind] += mp_alpha*(mp_X[grpind] - mp_Y1[grpind])
def par_u1step(i)
r"""Dual variable update for :math:`\mathbf{u}_{1,G_i}`, one of the disjoint problems for updating :math:`\mathbf{u}_1`. Parameters ---------- i : int Index of grouping to update
9.242702
8.800632
1.050232
par_y0bstep(i) par_y1step(i) par_u0step(i) par_u1step(i)
def par_final_stepgrp(i)
The parallel step grouping of the final iteration in solve. A cyclic permutation of the steps is done to require only one merge per iteration, requiring unique initial and final step groups. Parameters ---------- i : int Index of grouping to update
4.539726
5.681902
0.79898
# Compute the residuals in parallel, need to check if the residuals # depend on alpha global mp_ry0 global mp_ry1 global mp_sy0 global mp_sy1 global mp_nrmAx global mp_nrmBy global mp_nrmu mp_ry0[i] = np.sum((mp_DXnr[i] - mp_Y0[i])**2) mp_ry1[i] = mp_alpha**2*np.sum((mp_Xnr[mp_grp[i]:mp_grp[i+1]]- mp_Y1[mp_grp[i]:mp_grp[i+1]])**2) mp_sy0[i] = np.sum((mp_Y0old[i] - mp_Y0[i])**2) mp_sy1[i] = mp_alpha**2*np.sum((mp_Y1old[mp_grp[i]:mp_grp[i+1]]- mp_Y1[mp_grp[i]:mp_grp[i+1]])**2) mp_nrmAx[i] = np.sum(mp_DXnr[i]**2) + mp_alpha**2 * np.sum( mp_Xnr[mp_grp[i]:mp_grp[i+1]]**2) mp_nrmBy[i] = np.sum(mp_Y0[i]**2) + mp_alpha**2 * np.sum( mp_Y1[mp_grp[i]:mp_grp[i+1]]**2) mp_nrmu[i] = np.sum(mp_U0[i]**2) + np.sum(mp_U1[mp_grp[i]:mp_grp[i+1]]**2)
def par_compute_residuals(i)
Compute components of the residual and stopping thresholds that can be done in parallel. Parameters ---------- i : int Index of group to compute
2.063195
2.085437
0.989334
# initialize the pool if needed if self.pool is None: if self.nproc > 1: self.pool = mp.Pool(processes=self.nproc) else: self.pool = None else: print('pool already initialized?')
def init_pool(self)
Initialize multiprocessing pool if necessary.
3.187479
2.633891
1.210179
if self.pool is None: return [f(i) for i in range(n)] else: return self.pool.map(f, range(n))
def distribute(self, f, n)
Distribute the computations amongst the multiprocessing pools Parameters ---------- f : function Function to be distributed to the processors n : int The values in range(0,n) will be passed as arguments to the function f.
2.866587
2.955399
0.969949
if self.pool is not None: self.pool.terminate() self.pool.join() del(self.pool) self.pool = None
def terminate_pool(self)
Terminate and close the multiprocessing pool if necessary.
2.887529
2.234834
1.292055
r l1 = np.sum(mp_wl1*np.abs(self.obfn_gvar())) return (self.lmbda*l1, l1)
def obfn_reg(self)
r"""Compute regularisation term, :math:`\| x \|_1`, and contribution to objective function.
19.229475
15.828309
1.214879
r XF = sl.rfftn(self.obfn_fvar(), mp_Nv, mp_axisN) DX = np.moveaxis(sl.irfftn(sl.inner(mp_Df, XF, mp_axisM), mp_Nv, mp_axisN), mp_axisM, self.cri.axisM) return np.sum((self.W*(DX-self.S))**2)/2.0
def obfn_dfd(self)
r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
10.755816
10.609946
1.013748
r if self.opt['NonNegCoef']: self.Y[self.Y < 0.0] = 0.0 if self.opt['NoBndryCross']: for n in range(0, self.cri.dimN): self.Y[(slice(None),) * n + (slice(1 - self.D.shape[n], None),)] = 0.0
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. If this method is not overridden, the problem is solved without any regularisation other than the option enforcement of non-negativity of the solution and filter boundary crossing supression. When it is overridden, it should be explicitly called at the end of the overriding method.
7.203117
5.715561
1.260264
r Ef = sl.inner(self.Df, self.obfn_fvarf(), axis=self.cri.axisM) - \ self.Sf return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
def obfn_dfd(self)
r"""Compute data fidelity term :math:`(1/2) \| \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`.
14.04699
13.945537
1.007275
r self.Y = sp.prox_l1(self.AX + self.U, (self.lmbda / self.rho) * self.wl1) super(ConvBPDN, self).ystep()
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
15.984235
11.475671
1.39288
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1) return (self.lmbda*rl1, rl1)
def obfn_reg(self)
Compute regularisation term and contribution to objective function.
13.564993
9.950743
1.363214
r self.YU[:] = self.Y - self.U b = self.DSf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN) if self.cri.Cd == 1: self.Xf[:] = sl.solvedbi_sm(self.Df, self.mu + self.rho, b, self.c, self.cri.axisM) else: self.Xf[:] = sl.solvemdbi_ism(self.Df, self.mu + self.rho, b, self.cri.axisM, self.cri.axisC) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM) if self.cri.Cd == 1: DHop = lambda x: np.conj(self.Df) * x else: DHop = lambda x: sl.inner(np.conj(self.Df), x, axis=self.cri.axisC) ax = DHop(Dop(self.Xf)) + (self.mu + self.rho)*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def xstep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`.
4.287759
4.025165
1.065238
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1) rl2 = 0.5*np.linalg.norm(self.obfn_gvar())**2 return (self.lmbda*rl1 + self.mu*rl2, rl1, rl2)
def obfn_reg(self)
Compute regularisation term and contribution to objective function.
5.644331
4.696279
1.201873
if D is not None: self.D = np.asarray(D, dtype=self.dtype) self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN) # Compute D^H S self.DSf = np.conj(self.Df) * self.Sf if self.cri.Cd > 1: self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True) if self.opt['HighMemSolve'] and self.cri.Cd == 1: self.c = sl.solvedbd_sm_c( self.Df, np.conj(self.Df), self.mu*self.GHGf + self.rho, self.cri.axisM) else: self.c = None
def setdict(self, D=None)
Set dictionary array.
4.862937
4.777526
1.017878
fvf = self.obfn_fvarf() rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1) rgr = sl.rfl2norm2(np.sqrt(self.GHGf*np.conj(fvf)*fvf), self.cri.Nv, self.cri.axisN)/2.0 return (self.lmbda*rl1 + self.mu*rgr, rl1, rgr)
def obfn_reg(self)
Compute regularisation term and contribution to objective function.
12.31163
11.251211
1.094249
if self.opt['Y0'] is None: return np.zeros(ushape, dtype=self.dtype) else: # If initial Y is non-zero, initial U is chosen so that # the relevant dual optimality criterion (see (3.10) in # boyd-2010-distributed) is satisfied. # NB: still needs to be worked out. return np.zeros(ushape, dtype=self.dtype)
def uinit(self, ushape)
Return initialiser for working variable U.
7.868196
7.191595
1.094082
r self.Y = sp.proj_l1(self.AX + self.U, self.gamma, axis=self.cri.axisN + (self.cri.axisC, self.cri.axisM)) super(ConvBPDNProjL1, self).ystep()
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
15.793715
12.820222
1.231938
dfd = self.obfn_dfd() prj = sp.proj_l1(self.obfn_gvar(), self.gamma, axis=self.cri.axisN + (self.cri.axisC, self.cri.axisM)) cns = np.linalg.norm(prj - self.obfn_gvar()) return (dfd, cns)
def eval_objfn(self)
Compute components of regularisation function as well as total objective function.
9.978266
9.006322
1.107918
if D is not None: self.D = np.asarray(D, dtype=self.dtype) self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN) if self.opt['HighMemSolve'] and self.cri.Cd == 1: self.c = sl.solvedbi_sm_c(self.Df, np.conj(self.Df), 1.0, self.cri.axisM) else: self.c = None
def setdict(self, D=None)
Set dictionary array.
5.231212
5.102196
1.025286
r if self.opt['NonNegCoef'] or self.opt['NoBndryCross']: Y1 = self.block_sep1(self.Y) if self.opt['NonNegCoef']: Y1[Y1 < 0.0] = 0.0 if self.opt['NoBndryCross']: for n in range(0, self.cri.dimN): Y1[(slice(None),)*n + (slice(1-self.D.shape[n], None),)] = 0.0 self.block_sep1(self.Y)[:] = Y1
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
6.01333
5.322934
1.129702
self.AXnr = self.cnst_A(self.X, self.Xf) if self.rlx == 1.0: self.AX = self.AXnr else: if not hasattr(self, 'c0'): self.c0 = self.cnst_c0() if not hasattr(self, 'c1'): self.c1 = self.cnst_c1() alpha = self.rlx self.AX = alpha*self.AXnr + (1-alpha)*self.block_cat( self.var_y0() + self.c0, self.var_y1() + self.c1)
def relax_AX(self)
Implement relaxation if option ``RelaxParam`` != 1.0.
3.848055
3.629265
1.060285
r if self.y0swapaxes: return np.swapaxes(Y[(slice(None),)*self.blkaxis + (slice(0, self.blkidx),)], self.cri.axisC, self.cri.axisM) else: return super(ConvTwoBlockCnstrnt, self).block_sep0(Y)
def block_sep0(self, Y)
r"""Separate variable into component corresponding to :math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method from parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to allow swapping of C (channel) and M (filter) axes in block 0 so that it can be concatenated on axis M with block 1. This is necessary because block 0 has the dimensions of S (N x C x K x 1) while block 1 has the dimensions of X (N x 1 x K x M).
10.869649
5.993885
1.813456
r if self.y0swapaxes: return np.concatenate((np.swapaxes(Y0, self.cri.axisC, self.cri.axisM), Y1), axis=self.blkaxis) else: return super(ConvTwoBlockCnstrnt, self).block_cat(Y0, Y1)
def block_cat(self, Y0, Y1)
r"""Concatenate components corresponding to :math:`\mathbf{y}_0` and :math:`\mathbf{y}_1` to form :math:`\mathbf{y}\;\;`. The method from parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to allow swapping of C (channel) and M (filter) axes in block 0 so that it can be concatenated on axis M with block 1. This is necessary because block 0 has the dimensions of S (N x C x K x 1) while block 1 has the dimensions of X (N x 1 x K x M).
9.800756
6.027897
1.6259
return self.var_y0() if self.opt['AuxVarObj'] else \ self.cnst_A0(None, self.Xf) - self.cnst_c0()
def obfn_g0var(self)
Variable to be evaluated in computing :meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj`` option value.
23.559818
11.471666
2.05374
r # This calculation involves non-negligible computational cost # when Xf is None (i.e. the function is not being applied to # self.X). if Xf is None: Xf = sl.rfftn(X, None, self.cri.axisN) return sl.irfftn(sl.inner(self.Df, Xf, axis=self.cri.axisM), self.cri.Nv, self.cri.axisN)
def cnst_A0(self, X, Xf=None)
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem constraint.
5.936916
5.967006
0.994957
r # This calculation involves non-negligible computational cost. It # should be possible to disable relevant diagnostic information # (dual residual) to avoid this cost. Y0f = sl.rfftn(Y0, None, self.cri.axisN) if self.cri.Cd == 1: return sl.irfftn(np.conj(self.Df) * Y0f, self.cri.Nv, self.cri.axisN) else: return sl.irfftn(sl.inner( np.conj(self.Df), Y0f, axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
def cnst_A0T(self, Y0)
r"""Compute :math:`A_0^T \mathbf{y}_0` component of :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
5.997095
5.87887
1.02011
r AXU = self.AX + self.U Y0 = sp.proj_l2(self.block_sep0(AXU) - self.S, self.epsilon, axis=self.cri.axisN) Y1 = sp.prox_l1(self.block_sep1(AXU), self.wl1 / self.rho) self.Y = self.block_cat(Y0, Y1) super(ConvMinL1InL2Ball, self).ystep()
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
12.861923
11.964765
1.074983
r return np.linalg.norm(sp.proj_l2(Y0, self.epsilon, axis=self.cri.axisN) - Y0)
def obfn_g0(self, Y0)
r"""Compute :math:`g_0(\mathbf{y}_0)` component of ADMM objective function.
20.380398
18.355492
1.110316
r return np.linalg.norm((self.wl1 * Y1).ravel(), 1)
def obfn_g1(self, Y1)
r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective function.
19.647284
16.947716
1.159288
g0v = self.obfn_g0(self.obfn_g0var()) g1v = self.obfn_g1(self.obfn_g1var()) return (g1v, g0v)
def eval_objfn(self)
Compute components of regularisation function as well as total contribution to objective function.
4.633886
4.176621
1.109482
r AXU = self.AX + self.U Y0 = (self.rho*(self.block_sep0(AXU) - self.S)) / \ (self.W**2 + self.rho) Y1 = sp.prox_l1(self.block_sep1(AXU), (self.lmbda / self.rho) * self.wl1) self.Y = self.block_cat(Y0, Y1) super(ConvBPDNMaskDcpl, self).ystep()
def ystep(self)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
10.891172
9.901244
1.09998