code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# Extract AMS part of ystep argument so that it is not
# affected by the main part of the ystep
amidx = self.index_addmsk()
Yi = self.cbpdn.AX[amidx] + self.cbpdn.U[amidx]
# Perform main part of ystep from inner cbpdn object
self.inner_ystep()
# Apply mask to AMS component and insert into Y from inner
# cbpdn object
Yi[np.where(self.W.astype(np.bool))] = 0.0
self.cbpdn.Y[amidx] = Yi | def ystep(self) | This method is inserted into the inner cbpdn object,
replacing its own ystep method, thereby providing a hook for
applying the additional steps necessary for the AMS method. | 12.227668 | 7.712631 | 1.585408 |
# Get inner cbpdn object gvar
gv = self.inner_obfn_gvar().copy()
# Set slice corresponding to the coefficient map of the final
# filter (the impulse inserted for the AMS method) to zero so
# that it does not affect the results (e.g. l1 norm) computed
# from this variable by the inner cbpdn object
gv[..., -self.cri.Cd:] = 0
return gv | def obfn_gvar(self) | This method is inserted into the inner cbpdn object,
replacing its own obfn_gvar method, thereby providing a hook for
applying the additional steps necessary for the AMS method. | 21.560003 | 12.817202 | 1.682115 |
Di = np.concatenate((D, sl.atleast_nd(D.ndim, self.imp)),
axis=D.ndim-1)
self.cbpdn.setdict(Di) | def setdict(self, D=None) | Set dictionary array. | 14.124273 | 13.999721 | 1.008897 |
if D is not None:
self.D = np.asarray(D, dtype=self.dtype)
self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbd_sm_c(
self.Df, np.conj(self.Df),
(self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM)
else:
self.c = None | def setdict(self, D=None) | Set dictionary array. | 6.535255 | 6.388721 | 1.022936 |
g0v = self.obfn_g0(self.obfn_g0var())
g1v = self.obfn_g1(self.obfn_g1var())
rgr = sl.rfl2norm2(np.sqrt(self.GHGf * np.conj(self.Xf) * self.Xf),
self.cri.Nv, self.cri.axisN)/2.0
obj = g0v + self.lmbda*g1v + self.mu*rgr
return (obj, g0v, g1v, rgr) | def eval_objfn(self) | Compute components of regularisation function as well as total
contribution to objective function. | 6.949391 | 6.396599 | 1.08642 |
r
return np.sum(np.abs(self.W * self.obfn_g0var())) | def obfn_g0(self, Y0) | r"""Compute :math:`g_0(\mathbf{y}_0)` component of ADMM objective
function. | 21.162226 | 17.850582 | 1.18552 |
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbd_sm_c(
self.Df, np.conj(self.Df),
(self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM) | def rhochange(self) | Updated cached c array when rho changes. | 24.35239 | 21.617069 | 1.126535 |
# Call solve method of inner cbpdn object
Xi = self.cbpdn.solve()
# Copy attributes from inner cbpdn object
self.timer = self.cbpdn.timer
self.itstat = self.cbpdn.itstat
# Return result of inner cbpdn object
return Xi | def solve(self) | Call the solve method of the inner cbpdn object and return the
result. | 6.022942 | 3.424233 | 1.758917 |
if X is None:
X = self.getcoef()
Xf = sl.rfftn(X, None, self.cbpdn.cri.axisN)
slc = (slice(None),)*self.dimN + \
(slice(self.chncs[b], self.chncs[b+1]),)
Sf = np.sum(self.cbpdn.Df[slc] * Xf, axis=self.cbpdn.cri.axisM)
return sl.irfftn(Sf, self.cbpdn.cri.Nv, self.cbpdn.cri.axisN) | def reconstruct(self, b, X=None) | Reconstruct representation of signal b in signal set. | 4.84065 | 4.610575 | 1.049901 |
# Check that nstnm is an attribute of cls
if nstnm in cls.__dict__:
# Get the attribute of cls by its name
nst = cls.__dict__[nstnm]
# Check that the attribute is a class
if isinstance(nst, type):
# Get the module in which the outer class is defined
mdl = sys.modules[cls.__module__]
# Construct an extended name by concatenating inner and outer
# names
extnm = cls.__name__ + nst.__name__
# Allow lookup of the nested class within the module via
# its extended name
setattr(mdl, extnm, nst)
# Change the nested class name to the extended name
nst.__name__ = extnm
return cls | def _fix_nested_class_lookup(cls, nstnm) | Fix name lookup problem that prevents pickling of classes with
nested class definitions. The approach is loosely based on that
implemented at https://git.io/viGqU , simplified and modified to
work in both Python 2.7 and Python 3.x.
Parameters
----------
cls : class
Outer class to which fix is to be applied
nstnm : string
Name of nested (inner) class to be renamed | 3.728541 | 3.550979 | 1.050004 |
# Extended name for the class that will be added to the module namespace
extnm = '_' + cls.__name__ + '_' + pstfx
# Get the module in which the dynamic class is defined
mdl = sys.modules[cls.__module__]
# Allow lookup of the dynamically generated class within the module via
# its extended name
setattr(mdl, extnm, cls)
# Change the dynamically generated class name to the extended name
if hasattr(cls, '__qualname__'):
cls.__qualname__ = extnm
else:
cls.__name__ = extnm | def _fix_dynamic_class_lookup(cls, pstfx) | Fix name lookup problem that prevents pickling of dynamically
defined classes.
Parameters
----------
cls : class
Dynamically generated class to which fix is to be applied
pstfx : string
Postfix that can be used to identify dynamically generated classes
that are equivalent by construction | 4.364373 | 4.472595 | 0.975803 |
if fmtmap is None:
fmtmap = {}
fwdthn = fprec + fwdthdlt
# Construct a list specifying the format string for each field.
# Use format string from fmtmap if specified, otherwise use
# a %d specifier with field width fwdth0 for the first field,
# or a %e specifier with field width fwdthn and precision
# fprec
fldfmt = [fmtmap[lbl] if lbl in fmtmap else
(('%%%dd' % (fwdth0)) if idx == 0 else
(('%%%d.%de' % (fwdthn, fprec))))
for idx, lbl in enumerate(hdrlbl)]
fmtstr = (' ').join(fldfmt)
# Construct a list of field widths for each field by extracting
# field widths from field format strings
cre = re.compile(r'%-?(\d+)')
fldwid = []
for fmt in fldfmt:
mtch = cre.match(fmt)
if mtch is None:
raise ValueError("Format string '%s' does not contain field "
"width" % fmt)
else:
fldwid.append(int(mtch.group(1)))
# Construct list of field header strings formatted to the
# appropriate field width, and join to construct a combined field
# header string
hdrlst = [('%-*s' % (w, t)) for t, w in zip(hdrlbl, fldwid)]
hdrstr = (' ').join(hdrlst)
return hdrstr, fmtstr, len(hdrstr) | def solve_status_str(hdrlbl, fmtmap=None, fwdth0=4, fwdthdlt=6,
fprec=2) | Construct header and format details for status display of an
iterative solver.
Parameters
----------
hdrlbl : tuple of strings
Tuple of field header strings
fmtmap : dict or None, optional (default None)
A dict providing a mapping from field header strings to print
format strings, providing a mechanism for fields with print
formats that depart from the standard format
fwdth0 : int, optional (default 4)
Number of characters in first field formatted for integers
fwdthdlt : int, optional (default 6)
The width of fields formatted for floats is the sum of the value
of this parameter and the field precision
fprec : int, optional (default 2)
Precision of fields formatted for floats
Returns
-------
hdrstr : string
Complete header string
fmtstr : string
Complete print formatting string for numeric values
nsep : integer
Number of characters in separator string | 3.659066 | 3.41891 | 1.070244 |
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype
else:
self.dtype = np.dtype(opt['DataType']) | def set_dtype(self, opt, dtype) | Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option) | 6.041076 | 4.819912 | 1.253358 |
# If `val` is None and `dval` is not None, replace it with dval
if dval is not None and val is None:
val = dval
# If dtype is not None, assume val is numeric and convert it to
# type dtype
if dtype is not None and val is not None:
if isinstance(dtype, type):
val = dtype(val)
else:
val = dtype.type(val)
# Set attribute value depending on reset flag and whether the
# attribute exists and is None
if reset or not hasattr(self, name) or \
(hasattr(self, name) and getattr(self, name) is None):
setattr(self, name, val) | def set_attr(self, name, val, dval=None, dtype=None, reset=False) | Set an object attribute by its name. The attribute value
can be specified as a primary value `val`, and as default
value 'dval` that will be used if the primary value is None.
This arrangement allows an attribute to be set from an entry
in an options object, passed as `val`, while specifying a
default value to use, passed as `dval` in the event that the
options entry is None. Unless `reset` is True, the attribute
is only set if it doesn't exist, or if it exists with value
None. This arrangement allows for attributes to be set in
both base and derived class initialisers, with the derived
class value taking preference.
Parameters
----------
name : string
Attribute name
val : any
Primary attribute value
dval : any
Default attribute value in case `val` is None
dtype : data-type, optional (default None)
If the `dtype` parameter is not None, the attribute `name` is
set to `val` (which is assumed to be of numeric type) after
conversion to the specified type.
reset : bool, optional (default False)
Flag indicating whether attribute assignment should be
conditional on the attribute not existing or having value None.
If False, an attribute value other than None will not be
overwritten. | 2.847087 | 2.656147 | 1.071886 |
rank = comm.Get_rank() # Id of this process
size = comm.Get_size() # Total number of processes in communicator
end = 0
# The scan should be done with ints, not floats
ranklen = int(arrlen / size)
if rank < arrlen % size:
ranklen += 1
# Compute upper limit based on the sizes covered by the processes
# with less rank
end = comm.scan(sendobj=ranklen, op=MPI.SUM)
begin = end - ranklen
return (begin, end) | def _get_rank_limits(comm, arrlen) | Determine the chunk of the grid that has to be computed per
process. The grid has been 'flattened' and has arrlen length. The
chunk assigned to each process depends on its rank in the MPI
communicator.
Parameters
----------
comm : MPI communicator object
Describes topology of network: number of processes, rank
arrlen : int
Number of points in grid search.
Returns
-------
begin : int
Index, with respect to 'flattened' grid, where the chunk
for this process starts.
end : int
Index, with respect to 'flattened' grid, where the chunk
for this process ends. | 6.298768 | 7.143213 | 0.881784 |
# Open status display
fmtstr, nsep = self.display_start()
# Start solve timer
self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Main optimisation iterations
for self.k in range(self.k, self.k + self.opt['MaxMainIter']):
# Update record of X from previous iteration
self.store_prev()
# Compute backtracking
if self.opt['BackTrack', 'Enabled'] and self.k >= 0:
self.timer.stop('solve_wo_btrack')
# Compute backtracking
self.backtracking()
self.timer.start('solve_wo_btrack')
else:
# Compute just proximal step
self.proximal_step()
# Update by combining previous iterates
self.combination_step()
# Compute residuals and stopping thresholds
self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])
if not self.opt['FastSolve']:
frcxd, adapt_tol = self.compute_residuals()
self.timer.start('solve_wo_rsdl')
# Compute and record other iteration statistics and
# display iteration stats if Verbose option enabled
self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
if not self.opt['FastSolve']:
itst = self.iteration_stats(self.k, frcxd)
self.itstat.append(itst)
self.display_status(fmtstr, itst)
self.timer.start(['solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Call callback function if defined
if self.opt['Callback'] is not None:
if self.opt['Callback'](self):
break
# Stop if residual-based stopping tolerances reached
if not self.opt['FastSolve']:
if frcxd < adapt_tol:
break
# Increment iteration count
self.k += 1
# Record solve time
self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Print final separator string if Verbose option enabled
self.display_end(nsep)
return self.getmin() | def solve(self) | Start (or re-start) optimisation. This method implements the
framework for the iterations of a FISTA algorithm. There is
sufficient flexibility in overriding the component methods that
it calls that it is usually not necessary to override this method
in derived clases.
If option ``Verbose`` is ``True``, the progress of the
optimisation is displayed at every iteration. At termination
of this method, attribute :attr:`itstat` is a list of tuples
representing statistics of each iteration, unless option
``FastSolve`` is ``True`` and option ``Verbose`` is ``False``.
Attribute :attr:`timer` is an instance of :class:`.util.Timer`
that provides the following labelled timers:
``init``: Time taken for object initialisation by
:meth:`__init__`
``solve``: Total time taken by call(s) to :meth:`solve`
``solve_wo_func``: Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics
``solve_wo_rsdl`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals
``solve_wo_btrack`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals and implemented ``BackTrack`` mechanism | 4.200945 | 3.408813 | 1.232378 |
if grad is None:
grad = self.eval_grad()
V = self.Y - (1. / self.L) * grad
self.X = self.eval_proxop(V)
return grad | def proximal_step(self, grad=None) | Compute proximal update (gradient descent + regularization). | 6.553319 | 6.237304 | 1.050665 |
# Update t step
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
# Update Y
if not self.opt['FastSolve']:
self.Yprv = self.Y.copy()
self.Y = self.X + ((tprv - 1.) / self.t) * (self.X - self.Xprv) | def combination_step(self) | Build next update by a smart combination of previous updates.
(standard FISTA :cite:`beck-2009-fast`). | 5.54596 | 5.047621 | 1.098727 |
gradY = self.eval_grad() # Given Y(f), this updates computes gradY(f)
maxiter = self.L_maxiter
iterBTrack = 0
linesearch = 1
while linesearch and iterBTrack < maxiter:
self.proximal_step(gradY) # Given gradY(f), L, this updates X(f)
f = self.obfn_f(self.var_x())
Dxy = self.eval_Dxy()
Q = self.obfn_f(self.var_y()) + \
self.eval_linear_approx(Dxy, gradY) + \
(self.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2
if f <= Q:
linesearch = 0
else:
self.L *= self.L_gamma_u
iterBTrack += 1
self.F = f
self.Q = Q
self.iterBTrack = iterBTrack
# Update auxiliary sequence
self.combination_step() | def standard_backtrack(self) | Estimate step size L by computing a linesearch that
guarantees that F <= Q according to the standard FISTA
backtracking strategy in :cite:`beck-2009-fast`.
This also updates variable Y. | 6.459764 | 5.784864 | 1.116666 |
self.L *= self.L_gamma_d
maxiter = self.L_maxiter
iterBTrack = 0
linesearch = 1
self.store_Yprev()
while linesearch and iterBTrack < maxiter:
t = float(1. + np.sqrt(1. + 4. * self.L * self.Tk)) / (2. * self.L)
T = self.Tk + t
y = (self.Tk * self.var_xprv() + t * self.ZZ) / T
self.update_var_y(y)
gradY = self.proximal_step() # Given Y(f), L, this updates X(f)
f = self.obfn_f(self.var_x())
Dxy = self.eval_Dxy()
Q = self.obfn_f(self.var_y()) + \
self.eval_linear_approx(Dxy, gradY) + \
(self.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2
if f <= Q:
linesearch = 0
else:
self.L *= self.L_gamma_u
iterBTrack += 1
self.Tk = T
self.ZZ += (t * self.L * (self.var_x() - self.var_y()))
self.F = f
self.Q = Q
self.iterBTrack = iterBTrack | def robust_backtrack(self) | Estimate step size L by computing a linesearch that
guarantees that F <= Q according to the robust FISTA
backtracking strategy in :cite:`florea-2017-robust`.
This also updates all the supporting variables. | 5.558023 | 5.233961 | 1.061915 |
r = self.rsdl()
adapt_tol = self.opt['RelStopTol']
if self.opt['AutoStop', 'Enabled']:
adapt_tol = self.tau0 / (1. + self.k)
return r, adapt_tol | def compute_residuals(self) | Compute residuals and stopping thresholds. | 11.950301 | 9.632377 | 1.240639 |
dict = {'Itn': 'Iter'}
dict.update(cls.hdrval_objfun)
dict.update({'Rsdl': 'Rsdl', 'F': 'F_Btrack', 'Q': 'Q_Btrack',
'It_Bt': 'IterBTrack', 'L': 'L'})
return dict | def hdrval(cls) | Construct dictionary mapping display column title to
IterationStats entries. | 11.914172 | 11.775415 | 1.011784 |
tk = self.timer.elapsed(self.opt['IterTimer'])
tpl = (k,) + self.eval_objfn() + \
(frcxd, self.F, self.Q, self.iterBTrack, self.L) + \
self.itstat_extra() + (tk,)
return type(self).IterationStats(*tpl) | def iteration_stats(self, k, frcxd) | Construct iteration stats record tuple. | 12.796123 | 10.833296 | 1.181185 |
fval = self.obfn_f(self.X)
gval = self.obfn_g(self.X)
obj = fval + gval
return (obj, fval, gval) | def eval_objfn(self) | Compute components of objective function as well as total
contribution to objective function. | 3.906413 | 3.385605 | 1.15383 |
if gradf is None:
gradf = self.eval_grad()
self.Vf[:] = self.Yf - (1. / self.L) * gradf
V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)
self.X[:] = self.eval_proxop(V)
self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
return gradf | def proximal_step(self, gradf=None) | Compute proximal update (gradient descent + constraint).
Variables are mapped back and forth between input and
frequency domains. | 5.157978 | 4.792406 | 1.076282 |
# Update t step
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
# Update Y
if not self.opt['FastSolve']:
self.Yfprv = self.Yf.copy()
self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv) | def combination_step(self) | Update auxiliary state by a smart combination of previous
updates in the frequency domain (standard FISTA
:cite:`beck-2009-fast`). | 6.152603 | 5.671999 | 1.084733 |
r
return np.sum(np.real(np.conj(Dxy) * gradY)) | def eval_linear_approx(self, Dxy, gradY) | r"""Compute term :math:`\langle \nabla f(\mathbf{y}),
\mathbf{x} - \mathbf{y} \rangle` (in frequency domain) that is
part of the quadratic function :math:`Q_L` used for
backtracking. Since this class computes the backtracking in
the DFT, it is important to preserve the DFT scaling. | 9.453895 | 7.436784 | 1.271234 |
r = np.asarray(vref, dtype=np.float64).ravel()
c = np.asarray(vcmp, dtype=np.float64).ravel()
return np.mean(np.abs(r - c)**2) | def mse(vref, vcmp) | Compute Mean Squared Error (MSE) between two images.
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
Returns
-------
x : float
MSE between `vref` and `vcmp` | 2.358032 | 2.491932 | 0.946267 |
dv = np.var(vref)
with np.errstate(divide='ignore'):
rt = dv / mse(vref, vcmp)
return 10.0 * np.log10(rt) | def snr(vref, vcmp) | Compute Signal to Noise Ratio (SNR) of two images.
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
Returns
-------
x : float
SNR of `vcmp` with respect to `vref` | 4.513562 | 5.886715 | 0.766737 |
if rng is None:
rng = vref.max() - vref.min()
dv = (rng + 0.0)**2
with np.errstate(divide='ignore'):
rt = dv / mse(vref, vcmp)
return 10.0 * np.log10(rt) | def psnr(vref, vcmp, rng=None) | Compute Peak Signal to Noise Ratio (PSNR) of two images. The PSNR
calculation defaults to using the less common definition in terms
of the actual range (i.e. max minus min) of the reference signal
instead of the maximum possible range for the data type
(i.e. :math:`2^b-1` for a :math:`b` bit representation).
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
rng : None or int, optional (default None)
Signal range, either the value to use (e.g. 255 for 8 bit samples) or
None, in which case the actual range of the reference signal is used
Returns
-------
x : float
PSNR of `vcmp` with respect to `vref` | 3.906975 | 4.410536 | 0.885828 |
msedeg = mse(vref, vdeg)
mserst = mse(vref, vrst)
with np.errstate(divide='ignore'):
rt = msedeg / mserst
return 10.0 * np.log10(rt) | def isnr(vref, vdeg, vrst) | Compute Improvement Signal to Noise Ratio (ISNR) for reference,
degraded, and restored images.
Parameters
----------
vref : array_like
Reference image
vdeg : array_like
Degraded image
vrst : array_like
Restored image
Returns
-------
x : float
ISNR of `vrst` with respect to `vref` and `vdeg` | 3.357614 | 3.987182 | 0.842102 |
blrvar = np.var(vblr)
nsevar = np.var(vnsy - vblr)
with np.errstate(divide='ignore'):
rt = blrvar / nsevar
return 10.0 * np.log10(rt) | def bsnr(vblr, vnsy) | Compute Blurred Signal to Noise Ratio (BSNR) for a blurred and noisy
image.
Parameters
----------
vblr : array_like
Blurred noise free image
vnsy : array_like
Blurred image with additive noise
Returns
-------
x : float
BSNR of `vnsy` with respect to `vblr` and `vdeg` | 4.302083 | 4.584685 | 0.93836 |
# Calculate difference, promoting to float if vref and vcmp have integer
# dtype
emap = np.asarray(vref, dtype=np.float64) - \
np.asarray(vcmp, dtype=np.float64)
# Input images in reference code on which this implementation is
# based are assumed to be on range [0,...,255].
if rescale:
emap *= (255.0 / vref.max())
sigma = 0.8
herr = ndimage.filters.gaussian_filter(emap, sigma)
score = np.mean(herr**2)
return score | def pamse(vref, vcmp, rescale=True) | Compute Perceptual-fidelity Aware Mean Squared Error (PAMSE) IQA metric
:cite:`xue-2013-perceptual`. This implementation is a translation of the
reference Matlab implementation provided by the authors of
:cite:`xue-2013-perceptual`.
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
rescale : bool, optional (default True)
Rescale inputs so that `vref` has a maximum value of 255, as assumed
by reference implementation
Returns
-------
score : float
PAMSE IQA metric | 5.663913 | 5.489201 | 1.031828 |
# Input images in reference code on which this implementation is
# based are assumed to be on range [0,...,255].
if rescale:
scl = (255.0 / vref.max())
else:
scl = np.float32(1.0)
T = 170.0
dwn = 2
dx = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) / 3.0
dy = dx.T
ukrn = np.ones((2, 2)) / 4.0
aveY1 = signal.convolve2d(scl * vref, ukrn, mode='same', boundary='symm')
aveY2 = signal.convolve2d(scl * vcmp, ukrn, mode='same', boundary='symm')
Y1 = aveY1[0::dwn, 0::dwn]
Y2 = aveY2[0::dwn, 0::dwn]
IxY1 = signal.convolve2d(Y1, dx, mode='same', boundary='symm')
IyY1 = signal.convolve2d(Y1, dy, mode='same', boundary='symm')
grdMap1 = np.sqrt(IxY1**2 + IyY1**2)
IxY2 = signal.convolve2d(Y2, dx, mode='same', boundary='symm')
IyY2 = signal.convolve2d(Y2, dy, mode='same', boundary='symm')
grdMap2 = np.sqrt(IxY2**2 + IyY2**2)
quality_map = (2*grdMap1*grdMap2 + T) / (grdMap1**2 + grdMap2**2 + T)
score = np.std(quality_map)
if returnMap:
return (score, quality_map)
else:
return score | def gmsd(vref, vcmp, rescale=True, returnMap=False) | Compute Gradient Magnitude Similarity Deviation (GMSD) IQA metric
:cite:`xue-2014-gradient`. This implementation is a translation of the
reference Matlab implementation provided by the authors of
:cite:`xue-2014-gradient`.
Parameters
----------
vref : array_like
Reference image
vcmp : array_like
Comparison image
rescale : bool, optional (default True)
Rescale inputs so that `vref` has a maximum value of 255, as assumed
by reference implementation
returnMap : bool, optional (default False)
Flag indicating whether quality map should be returned in addition to
scalar score
Returns
-------
score : float
GMSD IQA metric
quality_map : ndarray
Quality map | 2.369661 | 2.285211 | 1.036955 |
# Extract method selection argument or set default
if 'method' in kwargs:
method = kwargs['method']
del kwargs['method']
else:
method = 'cns'
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus
else:
raise ValueError('Unknown ConvCnstrMODMaskDcpl solver method %s'
% method)
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskDcpl(base):
def __init__(self, *args, **kwargs):
super(ConvCnstrMODMaskDcpl, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvCnstrMODMaskDcpl
_fix_dynamic_class_lookup(ConvCnstrMODMaskDcpl, method)
# Return object of the nested class type
return ConvCnstrMODMaskDcpl(*args, **kwargs) | def ConvCnstrMODMaskDcpl(*args, **kwargs) | A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
with Mask Decoupling problems, and returns an object instantiated
with the provided. parameters. The wrapper is designed to allow the
appropriate object to be created by calling this function using the
same syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMODMaskDcpl_IterSM`.
This method works well for a small number of training images, but is
very slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMODMaskDcpl_CG`.
This method is slower than ``'ism'`` for small training sets, but has
better run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in
:class:`.ConvCnstrMODMaskDcpl_Consensus`. This method is the best choice
for large training sets.
The default value is ``'cns'``. | 3.524389 | 2.565576 | 1.373722 |
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM.Options
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG.Options
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus.Options
else:
raise ValueError('Unknown ConvCnstrMODMaskDcpl solver method %s'
% method)
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskDcplOptions(base):
def __init__(self, opt):
super(ConvCnstrMODMaskDcplOptions, self).__init__(opt)
# Allow pickling of objects of type ConvCnstrMODMaskDcplOptions
_fix_dynamic_class_lookup(ConvCnstrMODMaskDcplOptions, method)
# Return object of the nested class type
return ConvCnstrMODMaskDcplOptions(opt) | def ConvCnstrMODMaskDcplOptions(opt=None, method='cns') | A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional Constrained MOD with Mask Decoupling problem,
and returns an object instantiated with the provided parameters.
The wrapper is designed to allow the appropriate object to be
created by calling this function using the same syntax as would be
used if it were a class. The specific implementation is selected
by use of an additional keyword argument 'method'. Valid values are
as specified in the documentation for :func:`ConvCnstrMODMaskDcpl`. | 3.765089 | 3.562943 | 1.056736 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Ub0 = (self.W**2) * self.block_sep0(self.Y) / self.rho
Ub1 = self.block_sep1(self.Y)
return self.block_cat(Ub0, Ub1) | def uinit(self, ushape) | Return initialiser for working variable U | 7.614602 | 7.113163 | 1.070494 |
r
if self.opt['LinSolveCheck']:
Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
ZHop = lambda x: sl.inner(np.conj(self.Zf), x,
axis=self.cri.axisK)
ax = ZHop(Zop(self.Xf)) + self.Xf
self.xrrs = sl.rrs(ax, b)
else:
self.xrrs = None | def xstep_check(self, b) | r"""Check the minimisation of the Augmented Lagrangian with
respect to :math:`\mathbf{x}` by method `xstep` defined in
derived classes. This method should be called at the end of any
`xstep` method. | 8.155298 | 8.05528 | 1.012417 |
r
AXU = self.AX + self.U
Y0 = (self.rho*(self.block_sep0(AXU) - self.S)) / (self.W**2 +
self.rho)
Y1 = self.Pcn(self.block_sep1(AXU))
self.Y = self.block_cat(Y0, Y1) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 11.955232 | 10.629848 | 1.124685 |
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
self.AX = self.AXnr
else:
alpha = self.rlx
self.AX = alpha*self.AXnr + (1-alpha)*self.block_cat(
self.var_y0() + self.S, self.var_y1()) | def relax_AX(self) | Implement relaxation if option ``RelaxParam`` != 1.0. | 7.152822 | 6.593637 | 1.084807 |
r
return np.swapaxes(
Y[(slice(None),)*self.blkaxis + (slice(0, self.blkidx),)],
self.cri.axisK, self.cri.axisM) | def block_sep0(self, Y) | r"""Separate variable into component corresponding to
:math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method from
parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to
allow swapping of K (multi-image) and M (filter) axes in block 0
so that it can be concatenated on axis M with block 1. This is
necessary because block 0 has the dimensions of S while block 1
has the dimensions of D. Handling of multi-channel signals
substantially complicate this issue. There are two multi-channel
cases: multi-channel dictionary and signal (Cd = C > 1), and
single-channel dictionary with multi-channel signal (Cd = 1, C >
1). In the former case, S and D shapes are (N x C x K x 1) and
(N x C x 1 x M) respectively. In the latter case,
:meth:`.__init__` has already taken care of combining C
(multi-channel) and K (multi-image) axes in S, so the S and D
shapes are (N x 1 x C K x 1) and (N x 1 x 1 x M) respectively. | 12.225887 | 11.334359 | 1.078657 |
r
return np.concatenate((np.swapaxes(Y0, self.cri.axisK,
self.cri.axisM), Y1),
axis=self.blkaxis) | def block_cat(self, Y0, Y1) | r"""Concatenate components corresponding to :math:`\mathbf{y}_0`
and :math:`\mathbf{y}_1` to form :math:`\mathbf{y}\;\;`. The
method from parent class :class:`.ADMMTwoBlockCnstrnt` is
overridden here to allow swapping of K (multi-image) and M
(filter) axes in block 0 so that it can be concatenated on axis
M with block 1. This is necessary because block 0 has the
dimensions of S while block 1 has the dimensions of D. Handling
of multi-channel signals substantially complicate this
issue. There are two multi-channel cases: multi-channel
dictionary and signal (Cd = C > 1), and single-channel
dictionary with multi-channel signal (Cd = 1, C > 1). In the
former case, S and D shapes are (N x C x K x 1) and (N x C x 1 x
M) respectively. In the latter case, :meth:`.__init__` has
already taken care of combining C (multi-channel) and K
(multi-image) axes in S, so the S and D shapes are (N x 1 x C K
x 1) and (N x 1 x 1 x M) respectively. | 12.573731 | 9.642084 | 1.304047 |
r
# This calculation involves non-negligible computational cost. It
# should be possible to disable relevant diagnostic information
# (dual residual) to avoid this cost.
Y0f = sl.rfftn(Y0, None, self.cri.axisN)
return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f,
axis=self.cri.axisK), self.cri.Nv,
self.cri.axisN) | def cnst_A0T(self, Y0) | r"""Compute :math:`A_0^T \mathbf{y}_0` component of
:math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`). | 9.822392 | 9.259719 | 1.060766 |
dfd = self.obfn_g0(self.obfn_g0var())
cns = self.obfn_g1(self.obfn_g1var())
return (dfd, cns) | def eval_objfn(self) | Compute components of regularisation function as well as total
contribution to objective function. | 7.549992 | 6.788647 | 1.11215 |
r
return np.linalg.norm((self.Pcn(Y1) - Y1)) | def obfn_g1(self, Y1) | r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective
function. | 20.126724 | 18.154442 | 1.108639 |
return self.rho*np.linalg.norm(self.cnst_AT(self.U)) | def rsdl_s(self, Yprev, Y) | Compute dual residual vector. | 21.36132 | 18.776247 | 1.137678 |
r
self.YU[:] = self.Y - self.U
self.block_sep0(self.YU)[:] += self.S
YUf = sl.rfftn(self.YU, None, self.cri.axisN)
b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
axis=self.cri.axisK) + self.block_sep1(YUf)
self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
self.cri.axisK)
self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
self.xstep_check(b) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.254999 | 5.856908 | 1.067969 |
super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
if self.rlx == 1.0:
self.AX1 = self.AX1nr
else:
alpha = self.rlx
self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S) | def relax_AX(self) | The parent class method that this method overrides only
implements the relaxation step for the variables of the baseline
consensus algorithm. This method calls the overridden method and
then implements the relaxation step for the additional variables
required for the mask decoupling modification to the baseline
algorithm. | 8.452846 | 7.280375 | 1.161045 |
self.YU1[:] = self.Y1 - self.U1
self.ZSf = np.conj(self.Zf) * (self.Sf + sl.rfftn(
self.YU1, None, self.cri.axisN))
rho = self.rho
self.rho = 1.0
super(ConvCnstrMODMaskDcpl_Consensus, self).xstep()
self.rho = rho | def xstep(self) | The xstep of the baseline consensus class from which this
class is derived is re-used to implement the xstep of the
modified algorithm by replacing ``self.ZSf``, which is constant
in the baseline algorithm, with a quantity derived from the
additional variables ``self.Y1`` and ``self.U1``. It is also
necessary to set the penalty parameter to unity for the duration
of the x step. | 14.0013 | 7.814492 | 1.791709 |
super(ConvCnstrMODMaskDcpl_Consensus, self).ystep()
AXU1 = self.AX1 + self.U1
self.Y1 = self.rho*(AXU1 - self.S) / (self.W**2 + self.rho) | def ystep(self) | The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm. | 19.719498 | 16.462637 | 1.197833 |
super(ConvCnstrMODMaskDcpl_Consensus, self).ustep()
self.U1 += self.AX1 - self.Y1 - self.S | def ustep(self) | The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm. | 35.940769 | 34.290634 | 1.048122 |
r
Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
- self.Sf
return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
self.cri.axisN))**2) / 2.0 | def obfn_dfd(self) | r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`. | 12.523978 | 12.950996 | 0.967028 |
# The full primary residual is straightforward to compute from
# the primary residuals for the baseline algorithm and for the
# additional variables
r0 = self.rsdl_r(self.AXnr, self.Y)
r1 = self.AX1nr - self.Y1 - self.S
r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))
# The full dual residual is more complicated to compute than the
# full primary residual
ATU = self.swapaxes(self.U) + sl.irfftn(
np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN),
self.cri.Nv, self.cri.axisN)
s = self.rho * np.linalg.norm(ATU)
# The normalisation factor for the full primal residual is also not
# straightforward
nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 +
np.linalg.norm(self.AX1nr)**2)
nY = np.sqrt(np.linalg.norm(self.Y)**2 +
np.linalg.norm(self.Y1)**2)
rn = max(nAX, nY, np.linalg.norm(self.S))
# The normalisation factor for the full dual residual is
# straightforward to compute
sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 +
np.linalg.norm(self.U1)**2)
# Final residual values and stopping tolerances depend on
# whether standard or normalised residuals are specified via the
# options object
if self.opt['AutoRho', 'StdResiduals']:
epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
rn*self.opt['RelStopTol']
edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
sn*self.opt['RelStopTol']
else:
if rn == 0.0:
rn = 1.0
if sn == 0.0:
sn = 1.0
r /= rn
s /= sn
epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
self.opt['RelStopTol']
edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
self.opt['RelStopTol']
return r, s, epri, edua | def compute_residuals(self) | Compute residuals and stopping thresholds. The parent class
method is overridden to ensure that the residual calculations
include the additional variables introduced in the modification
to the baseline algorithm. | 3.894473 | 3.692116 | 1.054808 |
super(RobustPCA, self).solve()
return self.X, self.Y | def solve(self) | Start (or re-start) optimisation. | 15.100923 | 12.232013 | 1.234541 |
r
self.X, self.ss = sp.prox_nuclear(self.S - self.Y - self.U,
1/self.rho) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 26.283745 | 18.246349 | 1.440493 |
r
self.Y = np.asarray(sp.prox_l1(self.S - self.AX - self.U,
self.lmbda/self.rho), dtype=self.dtype) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 16.272482 | 11.890792 | 1.368494 |
if self.opt['fEvalX']:
return self.X
else:
return self.cnst_c() - self.cnst_B(self.Y) | def obfn_fvar(self) | Variable to be evaluated in computing regularisation term,
depending on 'fEvalX' option value. | 14.122071 | 7.300998 | 1.934266 |
if self.opt['fEvalX']:
rnn = np.sum(self.ss)
else:
rnn = sp.norm_nuclear(self.obfn_fvar())
rl1 = np.sum(np.abs(self.obfn_gvar()))
cns = np.linalg.norm(self.X + self.Y - self.S)
obj = rnn + self.lmbda*rl1
return (obj, rnn, rl1, cns) | def eval_objfn(self) | Compute components of objective function as well as total
contribution to objective function. | 8.285515 | 7.579284 | 1.093179 |
vn = np.sqrt(np.sum(v**2, 0))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype) | def normalise(v) | Normalise columns of matrix.
Parameters
----------
v : array_like
Array with columns to be normalised
Returns
-------
vnrm : ndarray
Normalised array | 3.470863 | 4.154955 | 0.835355 |
self.Z = np.asarray(Z, dtype=self.dtype)
self.SZT = self.S.dot(Z.T)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.lu_factor(Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def setcoef(self, Z) | Set coefficient array. | 6.670438 | 6.222125 | 1.072051 |
r
self.X = np.asarray(sl.lu_solve_AATI(self.Z, self.rho, self.SZT +
self.rho*(self.Y - self.U), self.lu, self.piv,),
dtype=self.dtype) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 20.295996 | 15.869741 | 1.278912 |
self.lu, self.piv = sl.lu_factor(self.Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def rhochange(self) | Re-factorise matrix when rho changes | 6.706736 | 4.853309 | 1.381889 |
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = list(args)
for n, a in enumerate(args):
if isinstance(a, np.ndarray):
args[n] = cp.asarray(a)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = cp.asarray(v)
rtn = func(*args, **kwargs)
if isinstance(rtn, (list, tuple)):
for n, a in enumerate(rtn):
if isinstance(a, cp.core.core.ndarray):
rtn[n] = cp.asnumpy(a)
else:
if isinstance(rtn, cp.core.core.ndarray):
rtn = cp.asnumpy(rtn)
return rtn
return wrapped | def cupy_wrapper(func) | A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays. | 1.500666 | 1.450309 | 1.034722 |
# Convert int axis into a tuple
if isinstance(axis, int):
axis = (axis,)
# Handle negative axis indices
axis = tuple([k if k >= 0 else x.ndim + k for k in axis])
# Complement of axis set on full set of axes of input v
caxis = tuple(set(range(x.ndim)) - set(axis))
# Permute axes of x (generalised transpose) so that axes over
# which operation is to be applied are all at the end
prm = caxis + axis
xt = np.transpose(x, axes=prm)
xts = xt.shape
# Reshape into a 2D array with the axes specified by the axis
# parameter flattened into an index along rows, and the remaining
# axes flattened into an index aalong the columns
xtr = xt.reshape((np.product(xts[0:len(caxis)]), -1))
# Return reshaped array and a tuple containing the information
# necessary to undo the entire operation
return xtr, (xts, prm) | def ndto2d(x, axis=-1) | Convert a multi-dimensional array into a 2d array, with the axes
specified by the `axis` parameter flattened into an index along
rows, and the remaining axes flattened into an index along the
columns. This operation can not be properly achieved by a simple
reshape operation since a reshape would shuffle element order if
the axes to be grouped together were not consecutive: this is
avoided by first permuting the axes so that the grouped axes are
consecutive.
Parameters
----------
x : array_like
Multi-dimensional input array
axis : int or tuple of ints, optional (default -1)
Axes of `x` to be grouped together to form the rows of the output
2d array.
Returns
-------
xtr : ndarray
2D output array
rsi : tuple
A tuple containing the details of transformation applied in the
conversion to 2D | 6.075932 | 4.691793 | 1.295013 |
# Extract components of conversion information tuple
xts = rsi[0]
prm = rsi[1]
# Reshape x to the shape obtained after permuting axes in ndto2d
xt = xtr.reshape(xts)
# Undo axis permutation performed in ndto2d
x = np.transpose(xt, np.argsort(prm))
# Return array with shape corresponding to that of the input to ndto2d
return x | def ndfrom2d(xtr, rsi) | Undo the array shape conversion applied by :func:`ndto2d`,
returning the input 2D array to its original shape.
Parameters
----------
xtr : array_like
Two-dimensional input array
rsi : tuple
A tuple containing the shape of the axis-permuted array and the
permutation order applied in :func:`ndto2d`.
Returns
-------
x : ndarray
Multi-dimensional output array | 8.286704 | 6.52033 | 1.270903 |
r
AXU = self.AX + self.U
self.Y[..., 0:-1] = sp.prox_l2(AXU[..., 0:-1], self.mu/self.rho)
self.Y[..., -1] = sp.prox_l1(AXU[..., -1],
(self.lmbda/self.rho) * self.Wl1) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 7.71509 | 6.738295 | 1.144962 |
rl1 = np.linalg.norm((self.Wl1 * self.obfn_g1var()).ravel(), 1)
rtv = np.sum(np.sqrt(np.sum(self.obfn_g0var()**2, axis=-1)))
return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv) | def obfn_reg(self) | Compute regularisation term and contribution to objective
function. | 6.693721 | 5.717762 | 1.170689 |
r
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return self.Wtv[..., np.newaxis] * sl.irfftn(
self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN) | def cnst_A0(self, X, Xf=None) | r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`. | 6.154924 | 5.979457 | 1.029345 |
r
return np.concatenate((self.cnst_A0(X, Xf),
self.cnst_A1(X)), axis=-1) | def cnst_A(self, X, Xf=None) | r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots \;\; I)^T \mathbf{x}`. | 6.685651 | 7.274501 | 0.919053 |
r
return np.sum(self.cnst_A0T(X), axis=-1) + self.cnst_A1T(X) | def cnst_AT(self, X) | r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
a component of ADMM problem constraint. In this case
:math:`A^T \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots
\;\; I) \mathbf{x}`. | 7.844149 | 7.456191 | 1.052032 |
# We need to keep the non-relaxed version of AX since it is
# required for computation of primal residual r
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
# If RelaxParam option is 1.0 there is no relaxation
self.AX = self.AXnr
else:
# Avoid calling cnst_c() more than once in case it is expensive
# (e.g. due to allocation of a large block of memory)
if not hasattr(self, '_cnst_c'):
self._cnst_c = self.cnst_c()
# Compute relaxed version of AX
alpha = self.rlx
self.AX = alpha*self.AXnr - (1-alpha)*(self.cnst_B(self.Y) -
self._cnst_c) | def relax_AX(self) | Implement relaxation if option ``RelaxParam`` != 1.0. | 5.636659 | 5.013283 | 1.124345 |
if D is not None:
self.D = np.asarray(D, dtype=self.dtype)
self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
self.GDf = self.Gf * (self.Wtv * self.Df)[..., np.newaxis]
# Compute D^H S
self.DSf = np.conj(self.Df) * self.Sf
if self.cri.Cd > 1:
self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True) | def setdict(self, D=None) | Set dictionary array. | 4.428289 | 4.414048 | 1.003226 |
Y1 = Y[..., self.cri.M:]
# If cri.Cd > 1 (multi-channel dictionary), we need to undo the
# reshape performed in block_cat
if self.cri.Cd > 1:
shp = list(Y1.shape)
shp[self.cri.axisM] = self.cri.dimN
shp[self.cri.axisC] = self.cri.Cd
Y1 = Y1.reshape(shp)
# Axes are swapped here for similar reasons to those
# motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_sep0
Y1 = np.swapaxes(Y1[..., np.newaxis], self.cri.axisM, -1)
return Y1 | def block_sep1(self, Y) | Separate variable into component corresponding to Y1 in Y. | 7.185776 | 6.920424 | 1.038343 |
# Axes are swapped here for similar reasons to those
# motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_cat
Y1sa = np.swapaxes(Y1, self.cri.axisM, -1)[..., 0]
# If cri.Cd > 1 (multi-channel dictionary) Y0 has a singleton
# channel axis but Y1 has a non-singleton channel axis. To make
# it possible to concatenate Y0 and Y1, we reshape Y1 by a
# partial ravel of axisM and axisC onto axisM.
if self.cri.Cd > 1:
shp = list(Y1sa.shape)
shp[self.cri.axisM] *= shp[self.cri.axisC]
shp[self.cri.axisC] = 1
Y1sa = Y1sa.reshape(shp)
return np.concatenate((Y0, Y1sa), axis=self.cri.axisM) | def block_cat(self, Y0, Y1) | Concatenate components corresponding to Y0 and Y1 blocks
into Y. | 6.200484 | 6.289706 | 0.985815 |
r
AXU = self.AX + self.U
self.block_sep0(self.Y)[:] = sp.prox_l1(
self.block_sep0(AXU), (self.lmbda/self.rho) * self.Wl1)
self.block_sep1(self.Y)[:] = sp.prox_l2(
self.block_sep1(AXU), self.mu/self.rho, axis=(self.cri.axisC, -1)) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 8.49893 | 7.674831 | 1.107377 |
# Use of self.block_sep0(self.AXnr) instead of self.cnst_A0(self.X)
# reduces number of calls to self.cnst_A0
return self.var_y0() if self.opt['gEvalY'] else \
self.block_sep0(self.AXnr) | def obfn_g0var(self) | Variable to be evaluated in computing the TV regularisation
term, depending on the ``gEvalY`` option value. | 15.614508 | 11.347817 | 1.375992 |
r
# Use of self.block_sep1(self.AXnr) instead of self.cnst_A1(self.X)
# reduces number of calls to self.cnst_A0
return self.var_y1() if self.opt['gEvalY'] else \
self.block_sep1(self.AXnr) | def obfn_g1var(self) | r"""Variable to be evaluated in computing the :math:`\ell_1`
regularisation term, depending on the ``gEvalY`` option value. | 19.168615 | 14.12709 | 1.356869 |
r
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return sl.irfftn(sl.inner(
self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv,
self.cri.axisN) | def cnst_A1(self, X, Xf=None) | r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`. | 6.114775 | 5.646993 | 1.082837 |
r
Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)
return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,
self.cri.axisN) | def cnst_A1T(self, Y1) | r"""Compute :math:`A_1^T \mathbf{y}_1` component of
:math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
(\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`. | 7.094579 | 7.301353 | 0.97168 |
r
return self.cnst_A0T(self.block_sep0(Y)) + \
np.sum(self.cnst_A1T(self.block_sep1(Y)), axis=-1) | def cnst_AT(self, Y) | r"""Compute :math:`A^T \mathbf{y}`. In this case
:math:`A^T \mathbf{y} = (I \;\; \Gamma_0^T \;\; \Gamma_1^T \;\;
\ldots) \mathbf{y}`. | 7.278774 | 6.485399 | 1.122333 |
self.D = np.asarray(D, dtype=self.dtype)
self.DTS = self.D.T.dot(self.S)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def setdict(self, D) | Set dictionary array. | 5.44842 | 5.121305 | 1.063873 |
r
self.X = np.asarray(sl.cho_solve_ATAI(
self.D, self.rho, self.DTS + self.rho * (self.Y - self.U),
self.lu, self.piv), dtype=self.dtype)
if self.opt['LinSolveCheck']:
b = self.DTS + self.rho * (self.Y - self.U)
ax = self.D.T.dot(self.D.dot(self.X)) + self.rho*self.X
self.xrrs = sl.rrs(ax, b)
else:
self.xrrs = None | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.5609 | 5.928817 | 1.106612 |
r
return 0.5*np.linalg.norm((self.D.dot(self.obfn_fvar()) - self.S))**2 | def obfn_dfd(self) | r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} -
\mathbf{s} \|_2^2`. | 14.818292 | 9.643889 | 1.536547 |
self.lu, self.piv = sl.cho_factor(self.D, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def rhochange(self) | Re-factorise matrix when rho changes. | 7.126117 | 5.195018 | 1.371721 |
r
self.Y = np.asarray(sp.prox_l1(self.AX + self.U,
(self.lmbda / self.rho) * self.wl1),
dtype=self.dtype)
super(BPDN, self).ystep() | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 12.416013 | 9.578526 | 1.296234 |
r
self.Y = np.asarray(sp.prox_l1l2(
self.AX + self.U, (self.lmbda / self.rho) * self.wl1,
self.mu / self.rho, axis=-1), dtype=self.dtype)
GenericBPDN.ystep(self) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 12.015044 | 10.28943 | 1.167708 |
r
self.Y = np.asarray(sp.proj_l1(self.AX + self.U, self.gamma, axis=0),
dtype=self.dtype)
super(BPDNProjL1, self).ystep() | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 15.875358 | 12.187541 | 1.302589 |
dfd = self.obfn_dfd()
prj = sp.proj_l1(self.obfn_gvar(), self.gamma, axis=0)
cns = np.linalg.norm(prj - self.obfn_gvar())
return (dfd, cns) | def eval_objfn(self) | Compute components of regularisation function as well as total
contribution to objective function. | 10.054409 | 8.559482 | 1.174652 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
U0 = np.sign(self.block_sep0(self.Y)) / self.rho
U1 = self.block_sep1(self.Y) - self.S
return self.block_cat(U0, U1) | def uinit(self, ushape) | Return initialiser for working variable U. | 7.910183 | 7.284658 | 1.085869 |
self.D = np.asarray(D, dtype=self.dtype)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, 1.0)
self.lu = np.asarray(self.lu, dtype=self.dtype) | def setdict(self, D) | Set dictionary array. | 5.623528 | 5.161973 | 1.089415 |
r
YU = self.Y - self.U
self.X = np.asarray(sl.cho_solve_ATAI(
self.D, 1.0, self.block_sep0(YU) +
self.D.T.dot(self.block_sep1(YU)), self.lu, self.piv),
dtype=self.dtype) | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 11.759018 | 10.354983 | 1.13559 |
r
AXU = self.AX + self.U
Y0 = np.asarray(sp.prox_l1(self.block_sep0(AXU), self.wl1 / self.rho),
dtype=self.dtype)
if self.opt['NonNegCoef']:
Y0[Y0 < 0.0] = 0.0
Y1 = sl.proj_l2ball(self.block_sep1(AXU), self.S, self.epsilon, axes=0)
self.Y = self.block_cat(Y0, Y1) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 8.203942 | 7.548796 | 1.086788 |
r
obj = np.linalg.norm((self.wl1 * self.obfn_g0var()).ravel(), 1)
cns = np.linalg.norm(sl.proj_l2ball(
self.obfn_g1var(), self.S, self.epsilon, axes=0) -
self.obfn_g1var())
return (obj, cns) | def eval_objfn(self) | r"""Compute components of objective function as well as total
contribution to objective function. The objective function is
:math:`\| \mathbf{x} \|_1` and the constraint violation
measure is :math:`P(\mathbf{x}) - \mathbf{x}` where
:math:`P(\mathbf{x})` is the projection into the constraint
set. | 11.313956 | 9.695802 | 1.166892 |
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
return (self.Wdf/self.rho)*np.sign(self.Y) | def uinit(self, ushape) | Return initialiser for working variable U. | 9.645727 | 8.659322 | 1.113913 |
r
self.X = sl.idctii(self.Gamma*sl.dctii(self.Y + self.S - self.U,
axes=self.axes), axes=self.axes)
if self.opt['LinSolveCheck']:
self.xrrs = sl.rrs(
self.X + (self.lmbda/self.rho) *
sl.idctii((self.Alpha**2) *
sl.dctii(self.X, axes=self.axes),
axes=self.axes), self.Y + self.S - self.U)
else:
self.xrrs = None | def xstep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`. | 6.709798 | 6.105496 | 1.098977 |
r
self.Y = sp.prox_l1(self.AX - self.S + self.U, self.Wdf / self.rho) | def ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | 33.684063 | 22.966116 | 1.466685 |
self.Gamma = 1.0 / (1.0 + (self.lmbda/self.rho)*(self.Alpha**2)) | def rhochange(self) | Action to be taken when rho parameter is changed. | 9.106956 | 6.954474 | 1.30951 |
if self.opt['gEvalY']:
return self.Y
else:
return self.cnst_A(self.X) - self.cnst_c() | def obfn_gvar(self) | Variable to be evaluated in computing regularisation term,
depending on 'gEvalY' option value. | 15.600191 | 7.136277 | 2.186041 |
r
gvr = self.obfn_gvar()
dfd = np.sum(np.abs(self.Wdf * gvr))
reg = 0.5*np.linalg.norm(
sl.idctii(self.Alpha*sl.dctii(self.X, axes=self.axes),
axes=self.axes))**2
obj = dfd + self.lmbda*reg
return (obj, dfd, reg) | def eval_objfn(self) | r"""Compute components of objective function as well as total
contribution to objective function. Data fidelity term is
:math:`(1/2) \| \mathbf{x} - \mathbf{s} \|_2^2` and
regularisation term is :math:`\| D \mathbf{x} \|_2^2`. | 9.883881 | 8.432636 | 1.172099 |
GPUInfo = namedtuple('GPUInfo', ['name', 'driver', 'totalmem', 'freemem'])
gpus = GPUtil.getGPUs()
info = []
for g in gpus:
info.append(GPUInfo(g.name, g.driver, g.memoryTotal, g.memoryFree))
return info | def gpu_info() | Return a list of namedtuples representing attributes of each GPU
device. | 2.457144 | 2.165399 | 1.134731 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.