plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
skovnats/madmm-master
|
sympositivedefinitefactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/symfixedrank/sympositivedefinitefactory.m
| 5,506 |
utf_8
|
352c21fe40d0e4f75e7c0fa89ea4ab04
|
function M = sympositivedefinitefactory(n)
% Manifold of n-by-n symmetric positive definite matrices with
% the bi-invariant geometry.
%
% function M = sympositivedefinitefactory(n)
%
% A point X on the manifold is represented as a symmetric positive definite
% matrix X (nxn).
%
% The following material is referenced from Chapter 6 of the book:
% Rajendra Bhatia, "Positive definite matrices",
% Princeton University Press, 2007.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, August 29, 2013.
% Contributors: Nicolas Boumal
% Change log:
%
% March 5, 2014 (NB)
% There were a number of mistakes in the code owing to the tacit
% assumption that if X and eta are symmetric, then X\eta is
% symmetric too, which is not the case. See discussion on the Manopt
% forum started on Jan. 19, 2014. Functions norm, dist, exp and log
% were modified accordingly. Furthermore, they only require matrix
% inversion (as well as matrix log or matrix exp), not matrix square
% roots or their inverse.
%
% July 28, 2014 (NB)
% The dim() function returned n*(n-1)/2 instead of n*(n+1)/2.
% Implemented proper parallel transport from Sra and Hosseini (not
% used by default).
% Also added symmetrization in exp and log (to be sure).
symm = @(X) .5*(X+X');
M.name = @() sprintf('Symmetric positive definite geometry of %dx%d matrices', n, n);
M.dim = @() n*(n+1)/2;
% Choice of the metric on the orthnormal space is motivated by the
% symmetry present in the space. The metric on the positive definite
% cone is its natural bi-invariant metric.
M.inner = @(X, eta, zeta) trace( (X\eta) * (X\zeta) );
% Notice that X\eta is *not* symmetric in general.
M.norm = @(X, eta) sqrt(trace((X\eta)^2));
% Same here: X\Y is not symmetric in general. There should be no need
% to take the real part, but rounding errors may cause a small
% imaginary part to appear, so we discard it.
M.dist = @(X, Y) sqrt(real(trace((logm(X\Y))^2)));
M.typicaldist = @() sqrt(n*(n+1)/2);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
eta = X*symm(eta)*X;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
% Directional derivatives of the Riemannian gradient
Hess = X*symm(ehess)*X + 2*symm(eta*symm(egrad)*X);
% Correction factor for the non-constant metric
Hess = Hess - symm(eta*symm(egrad)*X);
end
M.proj = @(X, eta) symm(eta);
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @exponential;
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
% The symm() and real() calls are mathematically not necessary but
% are numerically necessary.
Y = symm(X*real(expm(X\(t*eta))));
end
M.log = @logarithm;
function H = logarithm(X, Y)
% Same remark regarding the calls to symm() and real().
H = symm(X*real(logm(X\Y)));
end
M.hash = @(X) ['z' hashmd5(X(:))];
% Generate a random symmetric positive definite matrix following a
% certain distribution. The particular choice of a distribution is of
% course arbitrary, and specific applications might require different
% ones.
M.rand = @random;
function X = random()
D = diag(1+rand(n, 1));
[Q, R] = qr(randn(n)); %#ok<NASGU>
X = Q*D*Q';
end
% Generate a uniformly random unit-norm tangent vector at X.
M.randvec = @randomvec;
function eta = randomvec(X)
eta = symm(randn(n));
nrm = M.norm(X, eta);
eta = eta / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) zeros(n);
% Poor man's vector transport: exploit the fact that all tangent spaces
% are the set of symmetric matrices, so that the identity is a sort of
% vector transport. It may perform poorly if the origin and target (X1
% and X2) are far apart though. This should not be the case for typical
% optimization algorithms, which perform small steps.
M.transp = @(X1, X2, eta) eta;
% For reference, a proper vector transport is given here, following
% work by Sra and Hosseini (2014), "Conic geometric optimisation on the
% manifold of positive definite matrices",
% http://arxiv.org/abs/1312.1039
% This will not be used by default. To force the use of this transport,
% call "M.transp = M.paralleltransp;" on your M returned by the present
% factory.
M.paralleltransp = @parallel_transport;
function zeta = parallel_transport(X, Y, eta)
E = sqrtm((Y/X));
zeta = E*eta*E';
end
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) U(:);
M.mat = @(X, u) reshape(u, n, n);
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(X, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of sympositivedefinitefactory.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
symfixedrankYYfactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/symfixedrank/symfixedrankYYfactory.m
| 3,628 |
utf_8
|
ed10332d6c3f8af67578d34eb7817b8c
|
function M = symfixedrankYYfactory(n, k)
% Manifold of n-by-n symmetric positive semidefinite matrices of rank k.
%
% function M = symfixedrankYYfactory(n, k)
%
% The geometry is based on the paper,
% M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
% "Low-Rank Optimization on the Cone of Positive Semidefinite Matrices",
% SIAM Journal on Optimization, 2010.
%
% Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
%
% A point X on the manifold is parameterized as YY^T where Y is a matrix of
% size nxk. The matrix Y (nxk) is a full column-rank matrix. Hence, we deal
% directly with Y.
%
% Notice that this manifold is not complete: if optimization leads Y to be
% rank-deficient, the geometry will break down. Hence, this geometry should
% only be used if it is expected that the points of interest will have rank
% exactly k. Reduce k if that is not the case.
%
% An alternative, complete, geometry for positive semidefinite matrices of
% rank k is described in Bonnabel and Sepulchre 2009, "Riemannian Metric
% and Geometric Mean for Positive Semidefinite Matrices of Fixed Rank",
% SIAM Journal on Matrix Analysis and Applications.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
% July 10, 2013 (NB)
% Added vec, mat, tangent, tangent2ambient ;
% Correction for the dimension of the manifold.
M.name = @() sprintf('YY'' quotient manifold of %dx%d PSD matrices of rank %d', n, k);
M.dim = @() k*n - k*(k-1)/2;
% Euclidean metric on the total space
M.inner = @(Y, eta, zeta) trace(eta'*zeta);
M.norm = @(Y, eta) sqrt(M.inner(Y, eta, eta));
M.dist = @(Y, Z) error('symfixedrankYYfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
M.proj = @projection;
function etaproj = projection(Y, eta)
% Projection onto the horizontal space
YtY = Y'*Y;
SS = YtY;
AS = Y'*eta - eta'*Y;
Omega = lyap(SS, -AS);
etaproj = eta - Y*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(Y, eta) eta;
M.retr = @retraction;
function Ynew = retraction(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = Y + t*eta;
end
M.egrad2rgrad = @(Y, eta) eta;
M.ehess2rhess = @(Y, egrad, ehess, U) M.proj(Y, ehess);
M.exp = @exponential;
function Ynew = exponential(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = retraction(Y, eta, t);
warning('manopt:symfixedrankYYfactory:exp', ...
['Exponential for symmetric, fixed-rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
% Notice that the hash of two equivalent points will be different...
M.hash = @(Y) ['z' hashmd5(Y(:))];
M.rand = @random;
function Y = random()
Y = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(Y)
eta = randn(n, k);
eta = projection(Y, eta);
nrm = M.norm(Y, eta);
eta = eta / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(Y) zeros(n, k);
M.transp = @(Y1, Y2, d) projection(Y2, d);
M.vec = @(Y, u_mat) u_mat(:);
M.mat = @(Y, u_vec) reshape(u_vec, [n, k]);
M.vecmatareisometries = @() true;
end
% Linear conbination of tangent vectors
function d = lincomb(Y, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of symfixedrankYYfactory.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
complexcirclefactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/complexcircle/complexcirclefactory.m
| 3,696 |
utf_8
|
f317f1fdbb76c8fb6cb2c39cee5c0db0
|
function M = complexcirclefactory(n)
% Returns a manifold struct to optimize over unit-modulus complex numbers.
%
% function M = complexcirclefactory()
% function M = complexcirclefactory(n)
%
% Description of vectors z in C^n (complex) such that each component z(i)
% has unit modulus. The manifold structure is the Riemannian submanifold
% structure from the embedding space R^2 x ... x R^2, i.e., the complex
% circle is identified with the unit circle in the real plane.
%
% By default, n = 1.
%
% See also spherecomplexfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
%
% July 7, 2014 (NB): Added ehess2rhess function.
%
if ~exist('n', 'var')
n = 1;
end
M.name = @() sprintf('Complex circle (S^1)^%d', n);
M.dim = @() n;
M.inner = @(z, v, w) real(v'*w);
M.norm = @(x, v) norm(v);
M.dist = @(x, y) norm(acos(conj(x) .* y));
M.typicaldist = @() pi*sqrt(n);
M.proj = @(z, u) u - real( conj(u) .* z ) .* z;
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(z, egrad, ehess, zdot)
rhess = M.proj(z, ehess - real(z.*conj(egrad)).*zdot);
end
M.exp = @exponential;
function y = exponential(z, v, t)
if nargin <= 2
t = 1.0;
end
y = zeros(n, 1);
tv = t*v;
nrm_tv = abs(tv);
% We need to distinguish between very small steps and the others.
% For very small steps, we use a a limit version of the exponential
% (which actually coincides with the retraction), so as to not
% divide by very small numbers.
mask = nrm_tv > 1e-6;
y(mask) = z(mask).*cos(nrm_tv(mask)) + ...
tv(mask).*(sin(nrm_tv(mask))./nrm_tv(mask));
y(~mask) = z(~mask) + tv(~mask);
y(~mask) = y(~mask) ./ abs(y(~mask));
end
M.retr = @retraction;
function y = retraction(z, v, t)
if nargin <= 2
t = 1.0;
end
y = z+t*v;
y = y ./ abs(y);
end
M.log = @logarithm;
function v = logarithm(x1, x2)
v = M.proj(x1, x2 - x1);
di = M.dist(x1, x2);
nv = norm(v);
v = v * (di / nv);
end
M.hash = @(z) ['z' hashmd5( [real(z(:)) ; imag(z(:))] ) ];
M.rand = @random;
function z = random()
z = randn(n, 1) + 1i*randn(n, 1);
z = z ./ abs(z);
end
M.randvec = @randomvec;
function v = randomvec(z)
% i*z(k) is a basis vector of the tangent vector to the k-th circle
v = randn(n, 1) .* (1i*z);
v = v / norm(v);
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, 1);
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function z = pairmean(z1, z2)
z = z1+z2;
z = z ./ abs(z);
end
M.vec = @(x, u_mat) [real(u_mat) ; imag(u_mat)];
M.mat = @(x, u_vec) u_vec(1:n) + 1i*u_vec((n+1):end);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of sphere.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_3factors_preconditioned.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankfactory_3factors_preconditioned.m
| 11,730 |
utf_8
|
25828327278d65ab2cb851ea6574833c
|
function M = fixedrankfactory_3factors_preconditioned(m, n, k)
% Manifold of m-by-n matrices of rank k with polar quotient geometry.
%
% function M = fixedrankLSRquotientfactory(m, n, k)
%
% A point X on the manifold is represented as a structure with three
% fields: L, S and R. The matrices L (mxk) and R (nxk) are orthonormal,
% while the matrix S (kxk) is a full rank matrix
% matrix.
%
% Tangent vectors are represented as a structure with three fields: L, S
% and R.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LSR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'StS','SSt','invStS','invSSt'}) == 1)
X.SSt = X.S*X.S';
X.StS = X.S'*X.S;
X.invSSt = eye(size(X.S, 2))/X.SSt;
X.invStS = eye(size(X.S, 2))/X.StS;
end
end
% Choice of the metric on the orthnormal space is the low-rank matrix completio cost function.
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = trace(X.SSt*(eta.L'*zeta.L)) + trace(X.StS*(eta.R'*zeta.R)) ...
+ trace(eta.S'*zeta.S);
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankLSRquotientfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
skew = @(X) .5*(X-X');
symm = @(X) .5*(X+X');
M.egrad2rgrad = @egrad2rgrad;
function rgrad = egrad2rgrad(X, egrad)
X = prepare(X);
SSL = X.SSt;
ASL = 2*symm(SSL*(egrad.S*X.S'));
SSR = X.StS;
ASR = 2*symm(SSR*(egrad.S'*X.S));
% BL1 = lyap(SSL, -ASL);
% BR1 = lyap(SSR, -ASR);
[BL, BR] = tangent_space_lyap(X.S, ASL, ASR);
rgrad.L = (egrad.L - X.L*BL)*X.invSSt;
rgrad.R = (egrad.R - X.R*BR)*X.invStS;
rgrad.S = egrad.S;
% norm(skew(X.SSt*(rgrad.L'*X.L) + rgrad.S*X.S'), 'fro')
% norm(skew(X.StS*(rgrad.R'*X.R) - X.S'*rgrad.S), 'fro')
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
SSL = X.SSt;
ASL = 2*symm(SSL*(egrad.S*X.S'));
SSR = X.StS;
ASR = 2*symm(SSR*(egrad.S'*X.S));
[BL, BR] = tangent_space_lyap(X.S, ASL, ASR);
rgrad.L = (egrad.L - X.L*BL)*X.invSSt;
rgrad.R = (egrad.R - X.R*BR)*X.invStS;
rgrad.S = egrad.S;
% Directional derivative of the Riemannian gradient
ASLdot = 2*symm((2*symm(X.S*eta.S')*(egrad.S*X.S')) + X.SSt*(ehess.S*X.S' + egrad.S*eta.S')) - 4*symm(symm(eta.S*X.S')*BL);
ASRdot = 2*symm((2*symm(X.S'*eta.S)*(egrad.S'*X.S)) + X.StS*(ehess.S'*X.S + egrad.S'*eta.S)) - 4*symm(symm(eta.S'*X.S)*BR);
% SSLdot = X.SSt;
% SSRdot = X.StS;
% BLdot = lyap(SSLdot, -ASLdot);
% BRdot = lyap(SSRdot, -ASRdot);
[BLdot, BRdot] = tangent_space_lyap(X.S, ASLdot, ASRdot);
Hess.L = (ehess.L - eta.L*BL - X.L*BLdot - 2*rgrad.L*symm(eta.S*X.S'))*X.invSSt;
Hess.R = (ehess.R - eta.R*BR - X.R*BRdot - 2*rgrad.R*symm(eta.S'*X.S))*X.invStS;
Hess.S = ehess.S;
% BM comments: Till this, everything seems correct.
% We still need a correction factor for the non-constant metric
% The correction factor owes itself to the Koszul formula...
% This is the Riemannian connection in the Euclidean space with the
% scaled metric.
Hess.L = Hess.L + (eta.L*symm(rgrad.S*X.S') + rgrad.L*symm(eta.S*X.S'))*X.invSSt;
Hess.R = Hess.R + (eta.R*symm(rgrad.S'*X.S) + rgrad.R*symm(eta.S'*X.S))*X.invStS;
Hess.S = Hess.S - symm(rgrad.L'*eta.L)*X.S - X.S*symm(rgrad.R'*eta.R);
% The Riemannian connection on the quotient space is the
% projection on the tangent space of the total space and then onto the horizontal
% space. This is accomplished by the following operation.
Hess = M.proj(X, Hess);
% norm(skew(X.SSt*(Hess.L'*X.L) + Hess.S*X.S'))
% norm(skew(X.StS*(Hess.R'*X.R) - X.S'*Hess.S))
end
M.proj = @projection;
function etaproj = projection(X, eta)
X = prepare(X);
% First, projection onto the tangent space of the total sapce
SSL = X.SSt;
ASL = 2*symm(X.SSt*(X.L'*eta.L)*X.SSt);
BL = lyap(SSL, -ASL);
eta.L = eta.L - X.L*BL*X.invSSt;
SSR = X.StS;
ASR = 2*symm(X.StS*(X.R'*eta.R)*X.StS);
BR = lyap(SSR, -ASR);
eta.R = eta.R - X.R*BR*X.invStS;
% Project onto the horizontal space
PU = skew((X.L'*eta.L)*X.SSt) + skew(X.S*eta.S');
PV = skew((X.R'*eta.R)*X.StS) + skew(X.S'*eta.S);
[Omega1, Omega2] = coupled_lyap(X.S, PU, PV);
% norm(2*skew(Omega1*X.SSt) - PU -(X.S*Omega2*X.S'),'fro' )
% norm(2*skew(Omega2*X.StS) - PV -(X.S'*Omega1*X.S),'fro' )
%
etaproj.L = eta.L - (X.L*Omega1);
etaproj.S = eta.S - (X.S*Omega2 - Omega1*X.S) ;
etaproj.R = eta.R - (X.R*Omega2);
% norm(skew(X.SSt*(etaproj.L'*X.L) + etaproj.S*X.S'))
% norm(skew(X.StS*(etaproj.R'*X.R) - X.S'*etaproj.S))
%
% norm(skew(X.SSt*(etaproj.L'*X.L) - X.S*etaproj.S'))
% norm(skew(X.StS*(etaproj.R'*X.R) + etaproj.S'*X.S))
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.S = (X.S + t*eta.S);
Y.L = uf((X.L + t*eta.L));
Y.R = uf((X.R + t*eta.R));
Y = prepare(Y);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankLSRquotientfactory:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.S(:) ; X.R(:)])];
M.rand = @random;
% Factors L and R live on Stiefel manifolds, hence we will reuse
% their random generator.
stiefelm = stiefelfactory(m, k);
stiefeln = stiefelfactory(n, k);
function X = random()
X.L = stiefelm.rand();
X.R = stiefeln.rand();
X.S = diag(1+rand(k, 1));
X = prepare(X);
end
M.randvec = @randomvec;
function eta = randomvec(X)
% A random vector on the horizontal space
eta.L = randn(m, k);
eta.R = randn(n, k);
eta.S = randn(k, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
eta.S = eta.S / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k), 'S', zeros(k, k), ...
'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.S(:); U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'S', reshape(u((m*k+1): m*k + k*k), k, k), ...
'R', reshape(u((m*k+ k*k + 1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INLSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
d.S = a1*d1.S;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
d.S = a1*d1.S + a2*d2.S;
else
error('Bad use of fixedrankLSRquotientfactory.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0); %#ok
A = L*R';
end
function[BU, BV] = tangent_space_lyap(R, E, F)
% We intent to solve RR^T BU + BU RR^T = E
% R^T R BV + BV R^T R = F
%
% This can be solved using two calls to the Matlab lyap.
% However, we can still have a more efficient implementations as shown
% below...
[U, Sigma, V] = svd(R);
E_mod = U'*E*U;
F_mod = V'*F*V;
b1 = E_mod(:);
b2 = F_mod(:);
r = size(Sigma, 1);
sig = diag(Sigma); % all the singular values in a vector
sig1 = sig*ones(1, r); % columns repeat
sig1t = sig1'; % rows repeat
s1 = sig1(:);
s2 = sig1t(:);
% The block elements
a = s1.^2 + s2.^2; % a column vector
% solve the linear system of equations
cu = b1./a; %a.\b1;
cv = b2./a; %a.\b2;
% devectorize
CU = reshape(cu, r, r);
CV = reshape(cv, r, r);
% Do the similarity transforms
BU = U*CU*U';
BV = V*CV*V';
% %% debug
%
% norm(R*R'*BU + BU*R*R' - E, 'fro');
% norm((Sigma.^2)*CU + CU*(Sigma.^2) - E_mod, 'fro');
% norm(a.*cu - b1, 'fro');
%
% norm(R'*R*BV + BV*R'*R - F, 'fro');
%
% BU1 = lyap(R*R', - E);
% norm(R*R'*BU1 + BU1*R*R' - E, 'fro');
%
% BV1 = lyap(R'*R, - F);
% norm(R'*R*BV1 + BV1*R'*R - F, 'fro');
%
% % as accurate as the lyap
% norm(BU - BU1, 'fro')
% norm(BV - BV1, 'fro')
end
function[Omega1, Omega2] = coupled_lyap(R, E, F)
% We intent to solve the coupled system of Lyapunov equations
%
% RR^T Omega1 + Omega1 RR^T - R Omega2 R^T = E
% R^T R Omega2 + Omega1 R^T R - R^T Omega2 R = F
%
% Below is an efficient implementation
[U, Sigma, V] = svd(R);
E_mod = U'*E*U;
F_mod = V'*F*V;
b1 = E_mod(:);
b2 = F_mod(:);
r = size(Sigma, 1);
sig = diag(Sigma); % all the singular values in a vector
sig1 = sig*ones(1, r); % columns repeat
sig1t = sig1'; % rows repeat
s1 = sig1(:);
s2 = sig1t(:);
% The block elements
a = s1.^2 + s2.^2; % a column vector
c = s1.*s2;
% Solve directly using the formula
% A = diag(a);
% C = diag(c);
% Y1_sol = (A*(C\A) - C) \ (b2 + A*(C\b1));
% Y2_sol = A\(b2 + C*Y1_sol);
Y1_sol = (b2 + (a./c).*b1) ./ ((a.^2)./c - c);
Y2_sol = (b2 + c.*Y1_sol)./a;
% devectorize
Omega1 = reshape(Y1_sol, r, r);
Omega2 = reshape(Y2_sol, r, r);
% Do the similarity transforms
Omega1 = U*Omega1*U';
Omega2 = V*Omega2*V';
% %% debug whether we have the right solution
% norm(R*R'*Omega1 + Omega1*R*R' - R*Omega2*R' - E, 'fro')
% norm(R'*R*Omega2 + Omega2*R'*R - R'*Omega1*R - F, 'fro')
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_2factors_subspace_projection.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankfactory_2factors_subspace_projection.m
| 6,255 |
utf_8
|
4232d28fbaabbc139761a8fbcca4ea4c
|
function M = fixedrankfactory_2factors_subspace_projection(m, n, k)
% Manifold of m-by-n matrices of rank k with quotient geometry.
%
% function M = fixedrankfactory_2factors_subspace_projection(m, n, k)
%
% This follows the quotient geometry described in the following paper:
% B. Mishra, G. Meyer, S. Bonnabel and R. Sepulchre
% "Fixed-rank matrix factorizations and Riemannian low-rank optimization",
% arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1209.0430
%
% A point X on the manifold is represented as a structure with two
% fields: L and R. The matrices L (mxk) is orthonormal,
% while the matrix R (nxk) is a full column-rank
% matrix.
%
% Tangent vectors are represented as a structure with two fields: L, R.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'RtR','invRtR'}) == 1)
X.RtR = X.R'*X.R;
X.invRtR = eye(size(X.R,2))/ X.RtR;
end
end
% The choice of the metric is motivated by symmetry and scale
% invariance in the total space
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = eta.L(:).'*zeta.L(:) + trace(X.invRtR*(eta.R'*zeta.R) );
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_2factors_subspace_projection.dist not implemented yet.');
M.typicaldist = @() 10*k;
skew = @(X) .5*(X-X');
symm = @(X) .5*(X+X');
stiefel_proj = @(L, H) H - L*symm(L'*H);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
X = prepare(X);
eta.L = stiefel_proj(X.L, eta.L);
eta.R = eta.R*X.RtR;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
rgrad = egrad2rgrad(X, egrad);
% Directional derivative of the Riemannian gradient
Hess.L = ehess.L - eta.L*symm(X.L'*egrad.L);
Hess.L = stiefel_proj(X.L, Hess.L);
Hess.R = ehess.R*X.RtR + 2*egrad.R*symm(eta.R'*X.R);
% Correction factor for the non-constant metric on the factor R
Hess.R = Hess.R - rgrad.R*((X.invRtR)*symm(X.R'*eta.R)) - eta.R*(X.invRtR*symm(X.R'*rgrad.R)) + X.R*(X.invRtR*symm(eta.R'*rgrad.R));
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
X = prepare(X);
eta.L = stiefel_proj(X.L, eta.L); % On the tangent space
SS = X.RtR;
AS1 = 2*X.RtR*skew(X.L'*eta.L)*X.RtR;
AS2 = 2*skew(X.RtR*(X.R'*eta.R));
AS = skew(AS1 + AS2);
Omega = nested_sylvester(SS,AS);
etaproj.L = eta.L - X.L*Omega;
etaproj.R = eta.R - X.R*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.L = uf(X.L + t*eta.L);
Y.R = X.R + t*eta.R;
% These are reused in the computation of the gradient and Hessian
Y = prepare(Y);
end
M.exp = @exponential;
function R = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
R = retraction(X, eta, t);
warning('manopt:fixedrankfactory_2factors_subspace_projection:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Lsed retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
M.rand = @random;
% Factors L lives on Stiefel manifold, hence we will reuse
% its random generator.
stiefelm = stiefelfactory(m, k);
function X = random()
X.L = stiefelm.rand();
X.R = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(X)
eta.L = randn(m, k);
eta.R = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k),...
'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'R', reshape(u((m*k+1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INLSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
else
error('Bad use of fixedrankfactory_2factors_subspace_projection.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0); %#ok
A = L*R';
end
function omega = nested_sylvester(sym_mat, asym_mat)
% omega=nested_sylvester(sym_mat,asym_mat)
% This function solves the system of nested Sylvester equations:
%
% X*sym_mat + sym_mat*X = asym_mat
% Omega*sym_mat+sym_mat*Omega = X
% Mishra, Meyer, Bonnabel and Sepulchre, 'Fixed-rank matrix factorizations and Riemannian low-rank optimization'
% Lses built-in lyap function, but does not exploit the fact that it's
% twice the same sym_mat matrix that comes into play.
X = lyap(sym_mat, -asym_mat);
omega = lyap(sym_mat, -X);
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_2factors_preconditioned.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankfactory_2factors_preconditioned.m
| 5,832 |
utf_8
|
de03349c31333faef49955c31b7478b1
|
function M = fixedrankfactory_2factors_preconditioned(m, n, k)
% Manifold of m-by-n matrices of rank k with new balanced quotient geometry
%
% function M = fixedrankfactory_2factors_preconditioned(m, n, k)
%
% This follows the quotient geometry described in the following paper:
% B. Mishra, K. Adithya Apuroop and R. Sepulchre,
% "A Riemannian geometry for low-rank matrix completion",
% arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1211.1550
%
% This geoemtry is tuned to least square problems such as low-rank matrix
% completion.
%
% A point X on the manifold is represented as a structure with two
% fields: L and R. The matrices L (mxk) and R (nxk) are full column-rank
% matrices.
%
% Tangent vectors are represented as a structure with two fields: L, R
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LR''(tuned for least square problems) quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'LtL','RtR','invRtR','invLtL'}))
L = X.L;
R = X.R;
X.LtL = L'*L;
X.RtR = R'*R;
X.invLtL = inv(X.LtL);
X.invRtR = inv(X.RtR);
end
end
% The choice of metric is motivated by symmetry and tuned to least square
% objective function
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = trace(X.RtR*(eta.L'*zeta.L)) + trace(X.LtL*(eta.R'*zeta.R));
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_2factors_preconditioned.dist not implemented yet.');
M.typicaldist = @() 10*k;
symm = @(M) .5*(M+M');
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
X = prepare(X);
eta.L = eta.L*X.invRtR;
eta.R = eta.R*X.invLtL;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
rgrad = egrad2rgrad(X, egrad);
% Directional derivative of the Riemannian gradient
Hess.L = ehess.L*X.invRtR - 2*egrad.L*(X.invRtR * symm(eta.R'*X.R) * X.invRtR);
Hess.R = ehess.R*X.invLtL - 2*egrad.R*(X.invLtL * symm(eta.L'*X.L) * X.invLtL);
% We still need a correction factor for the non-constant metric
Hess.L = Hess.L + rgrad.L*(symm(eta.R'*X.R)*X.invRtR) + eta.L*(symm(rgrad.R'*X.R)*X.invRtR) - X.L*(symm(eta.R'*rgrad.R)*X.invRtR);
Hess.R = Hess.R + rgrad.R*(symm(eta.L'*X.L)*X.invLtL) + eta.R*(symm(rgrad.L'*X.L)*X.invLtL) - X.R*(symm(eta.L'*rgrad.L)*X.invLtL);
% Project on the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
X = prepare(X);
Lambda = (eta.R'*X.R)*X.invRtR - X.invLtL*(X.L'*eta.L);
Lambda = Lambda/2;
etaproj.L = eta.L + X.L*Lambda;
etaproj.R = eta.R - X.R*Lambda';
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.L = X.L + t*eta.L;
Y.R = X.R + t*eta.R;
% Numerical conditioning step: A simpler version.
% We need to ensure that L and R are do not have very relative
% skewed norms.
scaling = norm(X.L, 'fro')/norm(X.R, 'fro');
scaling = sqrt(scaling);
Y.L = Y.L / scaling;
Y.R = Y.R * scaling;
% These are reused in the computation of the gradient and Hessian
Y = prepare(Y);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankfactory_2factors_preconditioned:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
M.rand = @random;
function X = random()
X.L = randn(m, k);
X.R = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(X)
eta.L = randn(m, k);
eta.R = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k),'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'R', reshape(u((m*k+1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
else
error('Bad use of fixedrankfactory_2factors_preconditioned.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
fixedrankembeddedfactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankembeddedfactory.m
| 10,833 |
utf_8
|
1c1a04e099a39f2931eaf8763455c433
|
function M = fixedrankembeddedfactory(m, n, k)
% Manifold struct to optimize fixed-rank matrices w/ an embedded geometry.
%
% function M = fixedrankembeddedfactory(m, n, k)
%
% Manifold of m-by-n real matrices of fixed rank k. This follows the
% geometry described in this paper (which for now is the documentation):
% B. Vandereycken, "Low-rank matrix completion by Riemannian optimization",
% 2011.
%
% Paper link: http://arxiv.org/pdf/1209.3834.pdf
%
% A point X on the manifold is represented as a structure with three
% fields: U, S and V. The matrices U (mxk) and V (nxk) are orthonormal,
% while the matrix S (kxk) is any /diagonal/, full rank matrix.
% Following the SVD formalism, X = U*S*V'. Note that the diagonal entries
% of S are not constrained to be nonnegative.
%
% Tangent vectors are represented as a structure with three fields: Up, M
% and Vp. The matrices Up (mxk) and Vp (mxk) obey Up'*U = 0 and Vp'*V = 0.
% The matrix M (kxk) is arbitrary. Such a structure corresponds to the
% following tangent vector in the ambient space of mxn matrices:
% Z = U*M*V' + Up*V' + U*Vp'
% where (U, S, V) is the current point and (Up, M, Vp) is the tangent
% vector at that point.
%
% Vectors in the ambient space are best represented as mxn matrices. If
% these are low-rank, they may also be represented as structures with
% U, S, V fields, such that Z = U*S*V'. Their are no resitrictions on what
% U, S and V are, as long as their product as indicated yields a real, mxn
% matrix.
%
% The chosen geometry yields a Riemannian submanifold of the embedding
% space R^(mxn) equipped with the usual trace (Frobenius) inner product.
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
%
% Feb. 20, 2014 (NB):
% Added function tangent to work with checkgradient.
% June 24, 2014 (NB):
% A couple modifications following
% Bart Vandereycken's feedback:
% - The checksum (hash) was replaced for a faster alternative: it's a
% bit less "safe" in that collisions could arise with higher
% probability, but they're still very unlikely.
% - The vector transport was changed.
% The typical distance was also modified, hopefully giving the
% trustregions method a better initial guess for the trust region
% radius, but that should be tested for different cost functions too.
% July 11, 2014 (NB):
% Added ehess2rhess and tangent2ambient, supplied by Bart.
% July 14, 2014 (NB):
% Added vec, mat and vecmatareisometries so that hessianspectrum now
% works with this geometry. Implemented the tangent function.
% Made it clearer in the code and in the documentation in what format
% ambient vectors may be supplied, and generalized some functions so
% that they should now work with both accepted formats.
% It is now clearly stated that for a point X represented as a
% triplet (U, S, V), the matrix S needs to be diagonal.
M.name = @() sprintf('Manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
M.inner = @(x, d1, d2) d1.M(:).'*d2.M(:) + d1.Up(:).'*d2.Up(:) ...
+ d1.Vp(:).'*d2.Vp(:);
M.norm = @(x, d) sqrt(M.inner(x, d, d));
M.dist = @(x, y) error('fixedrankembeddedfactory.dist not implemented yet.');
M.typicaldist = @() M.dim();
% Given Z in tangent vector format, projects the components Up and Vp
% such that they satisfy the tangent space constraints up to numerical
% errors. If Z was indeed a tangent vector at X, this should barely
% affect Z (it would not at all if we had infinite numerical accuracy).
M.tangent = @tangent;
function Z = tangent(X, Z)
Z.Up = Z.Up - X.U*(X.U'*Z.Up);
Z.Vp = Z.Vp - X.V*(X.V'*Z.Vp);
end
% For a given ambient vector Z, applies it to a matrix W. If Z is given
% as a matrix, this is straightfoward. If Z is given as a structure
% with fields U, S, V such that Z = U*S*V', the product is executed
% efficiently.
function ZW = apply_ambient(Z, W)
if ~isstruct(Z)
ZW = Z*W;
else
ZW = Z.U*(Z.S*(Z.V'*W));
end
end
% Same as apply_ambient, but applies Z' to W.
function ZtW = apply_ambient_transpose(Z, W)
if ~isstruct(Z)
ZtW = Z'*W;
else
ZtW = Z.V*(Z.S'*(Z.U'*W));
end
end
% Orthogonal projection of an ambient vector Z represented as an mxn
% matrix or as a structure with fields U, S, V to the tangent space at
% X, in a tangent vector structure format.
M.proj = @projection;
function Zproj = projection(X, Z)
ZV = apply_ambient(Z, X.V);
UtZV = X.U'*ZV;
ZtU = apply_ambient_transpose(Z, X.U);
Zproj.M = UtZV;
Zproj.Up = ZV - X.U*UtZV;
Zproj.Vp = ZtU - X.V*UtZV';
end
M.egrad2rgrad = @projection;
% Code supplied by Bart.
% Given the Euclidean gradient at X and the Euclidean Hessian at X
% along H, where egrad and ehess are vectors in the ambient space and H
% is a tangent vector at X, returns the Riemannian Hessian at X along
% H, which is a tangent vector.
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, H)
% Euclidean part
rhess = projection(X, ehess);
% Curvature part
T = apply_ambient(egrad, H.Vp)/X.S;
rhess.Up = rhess.Up + (T - X.U*(X.U'*T));
T = apply_ambient_transpose(egrad, H.Up)/X.S;
rhess.Vp = rhess.Vp + (T - X.V*(X.V'*T));
end
% Transforms a tangent vector Z represented as a structure (Up, M, Vp)
% into a structure with fields (U, S, V) that represents that same
% tangent vector in the ambient space of mxn matrices, as U*S*V'.
% This matrix is equal to X.U*Z.M*X.V' + Z.Up*X.V' + X.U*Z.Vp'. The
% latter is an mxn matrix, which could be too large to build
% explicitly, and this is why we return a low-rank representation
% instead. Note that there are no guarantees on U, S and V other than
% that USV' is the desired matrix. In particular, U and V are not (in
% general) orthonormal and S is not (in general) diagonal.
% (In this implementation, S is identity, but this might change.)
M.tangent2ambient = @tangent2ambient;
function Zambient = tangent2ambient(X, Z)
Zambient.U = [X.U*Z.M + Z.Up, X.U];
Zambient.S = eye(2*k);
Zambient.V = [X.V, Z.Vp];
end
% This retraction is second order, following general results from
% Absil, Malick, "Projection-like retractions on matrix manifolds",
% SIAM J. Optim., 22 (2012), pp. 135-158.
M.retr = @retraction;
function Y = retraction(X, Z, t)
if nargin < 3
t = 1.0;
end
% See personal notes June 28, 2012 (NB)
[Qu, Ru] = qr(Z.Up, 0);
[Qv, Rv] = qr(Z.Vp, 0);
% Calling svds or svd should yield the same result, but BV
% advocated svd is more robust, and it doesn't change the
% asymptotic complexity to call svd then trim rather than call
% svds. Also, apparently Matlab calls ARPACK in a suboptimal way
% for svds in this scenario.
% [Ut St Vt] = svds([X.S+t*Z.M , t*Rv' ; t*Ru , zeros(k)], k);
[Ut, St, Vt] = svd([X.S+t*Z.M , t*Rv' ; t*Ru , zeros(k)]);
Y.U = [X.U Qu]*Ut(:, 1:k);
Y.V = [X.V Qv]*Vt(:, 1:k);
Y.S = St(1:k, 1:k) + eps*eye(k);
% equivalent but very slow code
% [U S V] = svds(X.U*X.S*X.V' + t*(X.U*Z.M*X.V' + Z.Up*X.V' + X.U*Z.Vp'), k);
% Y.U = U; Y.V = V; Y.S = S;
end
M.exp = @exponential;
function Y = exponential(X, Z, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, Z, t);
warning('manopt:fixedrankembeddedfactory:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
% Less safe but much faster checksum, June 24, 2014.
% Older version right below.
M.hash = @(X) ['z' hashmd5([sum(X.U(:)) ; sum(X.S(:)); sum(X.V(:)) ])];
%M.hash = @(X) ['z' hashmd5([X.U(:) ; X.S(:) ; X.V(:)])];
M.rand = @random;
% Factors U and V live on Stiefel manifolds, hence we will reuse
% their random generator.
stiefelm = stiefelfactory(m, k);
stiefeln = stiefelfactory(n, k);
function X = random()
X.U = stiefelm.rand();
X.V = stiefeln.rand();
X.S = diag(sort(rand(k, 1), 1, 'descend'));
end
% Generate a random tangent vector at X.
% TODO: consider a possible imbalance between the three components Up,
% Vp and M, when m, n and k are widely different (which is typical).
M.randvec = @randomvec;
function Z = randomvec(X)
Z.Up = randn(m, k);
Z.Vp = randn(n, k);
Z.M = randn(k);
Z = tangent(X, Z);
nrm = M.norm(X, Z);
Z.Up = Z.Up / nrm;
Z.Vp = Z.Vp / nrm;
Z.M = Z.M / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('Up', zeros(m, k), 'M', zeros(k, k), ...
'Vp', zeros(n, k));
% New vector transport on June 24, 2014 (as indicated by Bart)
% Reference: Absil, Mahony, Sepulchre 2008 section 8.1.3:
% For Riemannian submanifolds of a Euclidean space, it is acceptable to
% transport simply by orthogonal projection of the tangent vector
% translated in the ambient space.
M.transp = @project_tangent;
function Z2 = project_tangent(X1, X2, Z1)
Z2 = projection(X2, tangent2ambient(X1, Z1));
end
M.vec = @vec;
function Zvec = vec(X, Z)
Zamb = tangent2ambient(X, Z);
Zamb_mat = Zamb.U*Zamb.S*Zamb.V';
Zvec = Zamb_mat(:);
end
M.mat = @(X, Zvec) projection(X, reshape(Zvec, [m, n]));
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d.Up = a1*d1.Up;
d.Vp = a1*d1.Vp;
d.M = a1*d1.M;
elseif nargin == 5
d.Up = a1*d1.Up + a2*d2.Up;
d.Vp = a1*d1.Vp + a2*d2.Vp;
d.M = a1*d1.M + a2*d2.M;
else
error('fixedrank.lincomb takes either 3 or 5 inputs.');
end
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_3factors.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankfactory_3factors.m
| 6,035 |
utf_8
|
a8c0a4812c73be5a82cf3918fe2d77c1
|
function M = fixedrankfactory_3factors(m, n, k)
% Manifold of m-by-n matrices of rank k with polar quotient geometry.
%
% function M = fixedrankfactory_3factors(m, n, k)
%
% Follows the polar quotient geometry described in the following paper:
% G. Meyer, S. Bonnabel and R. Sepulchre,
% "Linear regression under fixed-rank constraints: a Riemannian approach",
% ICML 2011.
%
% Paper link: http://www.icml-2011.org/papers/350_icmlpaper.pdf
%
% Additional reference is
%
% B. Mishra, R. Meyer, S. Bonnabel and R. Sepulchre
% "Fixed-rank matrix factorizations and Riemannian low-rank optimization",
% arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1209.0430
%
% A point X on the manifold is represented as a structure with three
% fields: L, S and R. The matrices L (mxk) and R (nxk) are orthonormal,
% while the matrix S (kxk) is a symmetric positive definite full rank
% matrix.
%
% Tangent vectors are represented as a structure with three fields: L, S
% and R.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LSR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Choice of the metric on the orthnormal space is motivated by the symmetry present in the
% space. The metric on the positive definite space is its natural metric.
M.inner = @(X, eta, zeta) eta.L(:).'*zeta.L(:) + eta.R(:).'*zeta.R(:) ...
+ trace( (X.S\eta.S) * (X.S\zeta.S) );
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_3factors.dist not implemented yet.');
M.typicaldist = @() 10*k;
skew = @(X) .5*(X-X');
symm = @(X) .5*(X+X');
stiefel_proj = @(L, H) H - L*symm(L'*H);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
eta.L = stiefel_proj(X.L, eta.L);
eta.S = X.S*symm(eta.S)*X.S;
eta.R = stiefel_proj(X.R, eta.R);
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
% Riemannian gradient for the factor S
rgrad.S = X.S*symm(egrad.S)*X.S;
% Directional derivatives of the Riemannian gradient
Hess.L = ehess.L - eta.L*symm(X.L'*egrad.L);
Hess.L = stiefel_proj(X.L, Hess.L);
Hess.R = ehess.R - eta.R*symm(X.R'*egrad.R);
Hess.R = stiefel_proj(X.R, Hess.R);
Hess.S = X.S*symm(ehess.S)*X.S + 2*symm(eta.S*symm(egrad.S)*X.S);
% Correction factor for the non-constant metric on the factor S
Hess.S = Hess.S - symm(eta.S*(X.S\rgrad.S));
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
% First, projection onto the tangent space of the total sapce
eta.L = stiefel_proj(X.L, eta.L);
eta.R = stiefel_proj(X.R, eta.R);
eta.S = symm(eta.S);
% Then, projection onto the horizontal space
SS = X.S*X.S;
AS = X.S*(skew(X.L'*eta.L) + skew(X.R'*eta.R) - 2*skew(X.S\eta.S))*X.S;
omega = lyap(SS, -AS);
etaproj.L = eta.L - X.L*omega;
etaproj.S = eta.S - (X.S*omega - omega*X.S);
etaproj.R = eta.R - X.R*omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
L = chol(X.S);
Y.S = L'*expm(L'\(t*eta.S)/L)*L;
Y.L = uf(X.L + t*eta.L);
Y.R = uf(X.R + t*eta.R);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankfactory_3factors:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Lsed retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.S(:) ; X.R(:)])];
M.rand = @random;
% Factors L and R live on Stiefel manifolds, hence we will reuse
% their random generator.
stiefelm = stiefelfactory(m, k);
stiefeln = stiefelfactory(n, k);
function X = random()
X.L = stiefelm.rand();
X.R = stiefeln.rand();
X.S = diag(1+rand(k, 1));
end
M.randvec = @randomvec;
function eta = randomvec(X)
% A random vector on the horizontal space
eta.L = randn(m, k);
eta.R = randn(n, k);
eta.S = randn(k, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
eta.S = eta.S / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k), 'S', zeros(k, k), ...
'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.S(:); U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'S', reshape(u((m*k+1): m*k + k*k), k, k), ...
'R', reshape(u((m*k+ k*k + 1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INLSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
d.S = a1*d1.S;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
d.S = a1*d1.S + a2*d2.S;
else
error('Bad use of fixedrankfactory_3factors.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0); %#ok
A = L*R';
end
|
github
|
skovnats/madmm-master
|
fixedrankMNquotientfactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankMNquotientfactory.m
| 4,472 |
utf_8
|
12343fec86ae2648fcd915623ae645c5
|
function M = fixedrankMNquotientfactory(m, n, k)
% Manifold of m-by-n matrices of rank k with quotient geometry.
%
% function M = fixedrankMNquotientfactory(m, n, k)
%
% This follows the quotient geometry described in the following paper:
% P.-A. Absil, L. Amodei and G. Meyer,
% "Two Newton methods on the manifold of fixed-rank matrices endowed
% with Riemannian quotient geometries", arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1209.0068
%
% A point X on the manifold is represented as a structure with two
% fields: M and N. The matrix M (mxk) is orthonormal, while the matrix N
% (nxk) is full-rank.
%
% Tangent vectors are represented as a structure with two fields (M, N).
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('MN'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Choice of the metric is motivated by the symmetry present in the
% space
M.inner = @(X, eta, zeta) eta.M(:).'*zeta.M(:) + eta.N(:).'*zeta.N(:);
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankMNquotientfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
symm = @(X) .5*(X+X');
stiefel_proj = @(M, H) H - M*symm(M'*H);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
eta.M = stiefel_proj(X.M, eta.M);
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
% Directional derivative of the Riemannian gradient
Hess.M = ehess.M - eta.M*symm(X.M'*egrad.M);
Hess.M = stiefel_proj(X.M, Hess.M);
Hess.N = ehess.N;
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
% Start by projecting the vector from Rmp x Rnp to the tangent
% space to the total space, that is, eta.M should be in the
% tangent space to Stiefel at X.M and eta.N is arbitrary.
eta.M = stiefel_proj(X.M, eta.M);
% Now project from the tangent space to the horizontal space, that
% is, take care of the quotient.
% First solve a Sylvester equation (A symm., B skew-symm.)
A = X.N'*X.N + eye(k);
B = eta.M'*X.M + eta.N'*X.N;
B = B-B';
omega = lyap(A, -B);
% And project along the vertical space to the horizontal space.
etaproj.M = eta.M + X.M*omega;
etaproj.N = eta.N + X.N*omega;
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
A = t*X.M'*eta.M;
S = t^2*eta.M'*eta.M;
Y.M = [X.M t*eta.M]*expm([A -S ; eye(k) A])*eye(2*k, k)*expm(-A);
% re-orthonormalize (seems necessary from time to time)
[Q R] = qr(Y.M, 0);
Y.M = Q * diag(sign(diag(R)));
Y.N = X.N + t*eta.N;
end
% Factor M lives on the Stiefel manifold, hence we will reuse its
% random generator.
stiefelm = stiefelfactory(m, k);
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.M = uf(X.M + t*eta.M); % This is a valid retraction
Y.N = X.N + t*eta.N;
end
M.hash = @(X) ['z' hashmd5([X.M(:) ; X.N(:)])];
M.rand = @random;
function X = random()
X.M = stiefelm.rand();
X.N = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(X)
eta.M = randn(m, k);
eta.N = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.M = eta.M / nrm;
eta.N = eta.N / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('M', zeros(m, k), 'N', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INMSL>
if nargin == 3
d.M = a1*d1.M;
d.N = a1*d1.N;
elseif nargin == 5
d.M = a1*d1.M + a2*d2.M;
d.N = a1*d1.N + a2*d2.N;
else
error('Bad use of fixedrankMNquotientfactory.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0);
A = L*R';
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_2factors.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/fixedrank/fixedrankfactory_2factors.m
| 5,813 |
utf_8
|
70044d83ff10591a75b81f415cb920c2
|
function M = fixedrankfactory_2factors(m, n, k)
% Manifold of m-by-n matrices of rank k with balanced quotient geometry.
%
% function M = fixedrankfactory_2factors(m, n, k)
%
% This follows the balanced quotient geometry described in the following paper:
% G. Meyer, S. Bonnabel and R. Sepulchre,
% "Linear regression under fixed-rank constraints: a Riemannian approach",
% ICML 2011.
%
% Paper link: http://www.icml-2011.org/papers/350_icmlpaper.pdf
%
% A point X on the manifold is represented as a structure with two
% fields: L and R. The matrices L (mxk) and R (nxk) are full column-rank
% matrices such that X = L*R'.
%
% Tangent vectors are represented as a structure with two fields: L, R
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
% July 10, 2013 (NB) : added vec, mat, tangent, tangent2ambient
M.name = @() sprintf('LR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'LtL','RtR','invRtR','invLtL'}))
L = X.L;
R = X.R;
X.LtL = L'*L;
X.RtR = R'*R;
X.invLtL = inv(X.LtL);
X.invRtR = inv(X.RtR);
end
end
% Choice of the metric is motivated by the symmetry present in the space
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = trace(X.invLtL*(eta.L'*zeta.L)) + trace( X.invRtR*(eta.R'*zeta.R));
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_2factors.dist not implemented yet.');
M.typicaldist = @() 10*k;
symm = @(M) .5*(M+M');
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
X = prepare(X);
eta.L = eta.L*X.LtL;
eta.R = eta.R*X.RtR;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
rgrad = egrad2rgrad(X, egrad);
% Directional derivative of the Riemannian gradient
Hess.L = ehess.L*X.LtL + 2*egrad.L*symm(eta.L'*X.L);
Hess.R = ehess.R*X.RtR + 2*egrad.R*symm(eta.R'*X.R);
% We need a correction term for the non-constant metric
Hess.L = Hess.L - rgrad.L*((X.invLtL)*symm(X.L'*eta.L)) - eta.L*(X.invLtL*symm(X.L'*rgrad.L)) + X.L*(X.invLtL*symm(eta.L'*rgrad.L));
Hess.R = Hess.R - rgrad.R*((X.invRtR)*symm(X.R'*eta.R)) - eta.R*(X.invRtR*symm(X.R'*rgrad.R)) + X.R*(X.invRtR*symm(eta.R'*rgrad.R));
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
% Projection of the vector eta onto the horizontal space
function etaproj = projection(X, eta)
X = prepare(X);
SS = (X.LtL)*(X.RtR);
AS = (X.LtL)*(X.R'*eta.R) - (eta.L'*X.L)*(X.RtR);
Omega = lyap(SS, SS,-AS);
etaproj.L = eta.L + X.L*Omega';
etaproj.R = eta.R - X.R*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.L = X.L + t*eta.L;
Y.R = X.R + t*eta.R;
% Numerical conditioning step: A simpler version.
% We need to ensure that L and R do not have very relative
% skewed norms.
scaling = norm(X.L, 'fro')/norm(X.R, 'fro');
scaling = sqrt(scaling);
Y.L = Y.L / scaling;
Y.R = Y.R * scaling;
% These are reused in the computation of the gradient and Hessian
Y = prepare(Y);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankfactory_2factors:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
M.rand = @random;
function X = random()
% A random point on the total space
X.L = randn(m, k);
X.R = randn(n, k);
X = prepare(X);
end
M.randvec = @randomvec;
function eta = randomvec(X)
% A random vector in the horizontal space
eta.L = randn(m, k);
eta.R = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k),'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'R', reshape(u((m*k+1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
else
error('Bad use of fixedrankfactory_2factors.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
obliquefactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/oblique/obliquefactory.m
| 6,609 |
utf_8
|
1031640cf68e1bf9252af77d1002836a
|
function M = obliquefactory(n, m, transposed)
% Returns a manifold struct to optimize over matrices w/ unit-norm columns.
%
% function M = obliquefactory(n, m)
% function M = obliquefactory(n, m, transposed)
%
% Oblique manifold: deals with matrices of size n x m such that each column
% has unit 2-norm, i.e., is a point on the unit sphere in R^n. The metric
% is such that the oblique manifold is a Riemannian submanifold of the
% space of nxm matrices with the usual trace inner product, i.e., the usual
% metric.
%
% If transposed is set to true (it is false by default), then the matrices
% are transposed: a point Y on the manifold is a matrix of size m x n and
% each row has unit 2-norm. It is the same geometry, just a different
% representation.
%
% See also: spherefactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
%
% July 16, 2013 (NB) :
% Added 'transposed' option, mainly for ease of comparison with the
% elliptope geometry.
%
% Nov. 29, 2013 (NB) :
% Added normalize_columns function to make it easier to exploit the
% bsxfun formulation of column normalization, which avoids using for
% loops and provides performance gains. The exponential still uses a
% for loop.
if ~exist('transposed', 'var') || isempty(transposed)
transposed = false;
end
if transposed
trnsp = @(X) X';
else
trnsp = @(X) X;
end
M.name = @() sprintf('Oblique manifold OB(%d, %d)', n, m);
M.dim = @() (n-1)*m;
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.dist = @(x, y) norm(real(acos(sum(trnsp(x).*trnsp(y), 1))));
M.typicaldist = @() pi*sqrt(m);
M.proj = @(X, U) trnsp(projection(trnsp(X), trnsp(U)));
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, U)
X = trnsp(X);
egrad = trnsp(egrad);
ehess = trnsp(ehess);
U = trnsp(U);
PXehess = projection(X, ehess);
inners = sum(X.*egrad, 1);
rhess = PXehess - bsxfun(@times, U, inners);
rhess = trnsp(rhess);
end
M.exp = @exponential;
% Exponential on the oblique manifold
function y = exponential(x, d, t)
x = trnsp(x);
d = trnsp(d);
if nargin < 3
t = 1.0;
end
m = size(x, 2);
y = zeros(size(x));
if t ~= 0
for i = 1 : m
y(:, i) = sphere_exponential(x(:, i), d(:, i), t);
end
else
y = x;
end
y = trnsp(y);
end
M.log = @logarithm;
function v = logarithm(x1, x2)
x1 = trnsp(x1);
x2 = trnsp(x2);
v = M.proj(x1, x2 - x1);
dists = acos(sum(x1.*x2, 1));
norms = sqrt(sum(v.^2, 1));
factors = dists./norms;
% factors(dists <= 1e-6) = 1;
v = bsxfun(@times, v, factors);
v = trnsp(v);
end
M.retr = @retraction;
% Retraction on the oblique manifold
function y = retraction(x, d, t)
x = trnsp(x);
d = trnsp(d);
if nargin < 3
t = 1.0;
end
m = size(x, 2);
if t ~= 0
y = normalize_columns(x + t*d);
else
y = x;
end
y = trnsp(y);
end
M.hash = @(x) ['z' hashmd5(x(:))];
M.rand = @() trnsp(random(n, m));
M.randvec = @(x) trnsp(randomvec(n, m, trnsp(x)));
M.lincomb = @lincomb;
M.zerovec = @(x) trnsp(zeros(n, m));
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function y = pairmean(x1, x2)
y = trnsp(x1+x2);
y = normalize_columns(y);
y = trnsp(y);
end
% vec returns a vector representation of an input tangent vector which
% is represented as a matrix. mat returns the original matrix
% representation of the input vector representation of a tangent
% vector. vec and mat are thus inverse of each other. They are
% furthermore isometries between a subspace of R^nm and the tangent
% space at x.
vect = @(X) X(:);
M.vec = @(x, u_mat) vect(trnsp(u_mat));
M.mat = @(x, u_vec) trnsp(reshape(u_vec, [n, m]));
M.vecmatareisometries = @() true;
end
% Given a matrix X, returns the same matrix but with each column scaled so
% that they have unit 2-norm.
function X = normalize_columns(X)
norms = sqrt(sum(X.^2, 1));
X = bsxfun(@times, X, 1./norms);
end
% Orthogonal projection of the ambient vector H onto the tangent space at X
function PXH = projection(X, H)
% Compute the inner product between each vector H(:, i) with its root
% point X(:, i), that is, X(:, i).' * H(:, i). Returns a row vector.
inners = sum(X.*H, 1);
% Subtract from H the components of the H(:, i)'s that are parallel to
% the root points X(:, i).
PXH = H - bsxfun(@times, X, inners);
% % Equivalent but slow code:
% m = size(X, 2);
% PXH = zeros(size(H));
% for i = 1 : m
% PXH(:, i) = H(:, i) - X(:, i) * (X(:, i)'*H(:, i));
% end
end
% Exponential on the sphere.
function y = sphere_exponential(x, d, t)
if nargin == 2
t = 1.0;
end
td = t*d;
nrm_td = norm(td);
if nrm_td > 1e-6
y = x*cos(nrm_td) + (td/nrm_td)*sin(nrm_td);
else
% if the step is too small, to avoid dividing by nrm_td, we choose
% to approximate with this retraction-like step.
y = x + td;
y = y / norm(y);
end
end
% Uniform random sampling on the sphere.
function x = random(n, m)
x = normalize_columns(randn(n, m));
end
% Random normalized tangent vector at x.
function d = randomvec(n, m, x)
d = randn(n, m);
d = projection(x, d);
d = d / norm(d(:));
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of oblique.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
stiefelfactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/stiefel/stiefelfactory.m
| 4,989 |
utf_8
|
5cc739262d8e75c600af8497647ee711
|
function M = stiefelfactory(n, p, k)
% Returns a manifold structure to optimize over orthonormal matrices.
%
% function M = stiefelfactory(n, p)
% function M = stiefelfactory(n, p, k)
%
% The Stiefel manifold is the set of orthonormal nxp matrices. If k
% is larger than 1, this is the Cartesian product of the Stiefel manifold
% taken k times. The metric is such that the manifold is a Riemannian
% submanifold of R^nxp equipped with the usual trace inner product, that
% is, it is the usual metric.
%
% Points are represented as matrices X of size n x p x k (or n x p if k=1,
% which is the default) such that each n x p matrix is orthonormal,
% i.e., X'*X = eye(p) if k = 1, or X(:, :, i)' * X(:, :, i) = eye(p) for
% i = 1 : k if k > 1. Tangent vectors are represented as matrices the same
% size as points.
%
% By default, k = 1.
%
% See also: grassmannfactory rotationsfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
% July 5, 2013 (NB) : Added ehess2rhess.
% Jan. 27, 2014 (BM) : Bug in ehess2rhess corrected.
% June 24, 2014 (NB) : Added true exponential map and changed the randvec
% function so that it now returns a globally
% normalized vector, not a vector where each
% component is normalized (this only matters if k>1).
if ~exist('k', 'var') || isempty(k)
k = 1;
end
if k == 1
M.name = @() sprintf('Stiefel manifold St(%d, %d)', n, p);
elseif k > 1
M.name = @() sprintf('Product Stiefel manifold St(%d, %d)^%d', n, p, k);
else
error('k must be an integer no less than 1.');
end
M.dim = @() k*(n*p - .5*p*(p+1));
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.dist = @(x, y) error('stiefel.dist not implemented yet.');
M.typicaldist = @() sqrt(p*k);
M.proj = @projection;
function Up = projection(X, U)
XtU = multiprod(multitransp(X), U);
symXtU = multisym(XtU);
Up = U - multiprod(X, symXtU);
% The code above is equivalent to, but much faster than, the code below.
%
% Up = zeros(size(U));
% function A = sym(A), A = .5*(A+A'); end
% for i = 1 : k
% Xi = X(:, :, i);
% Ui = U(:, :, i);
% Up(:, :, i) = Ui - Xi*sym(Xi'*Ui);
% end
end
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, H)
XtG = multiprod(multitransp(X), egrad);
symXtG = multisym(XtG);
HsymXtG = multiprod(H, symXtG);
rhess = projection(X, ehess - HsymXtG);
end
M.retr = @retraction;
function Y = retraction(X, U, t)
if nargin < 3
t = 1.0;
end
Y = X + t*U;
for i = 1 : k
[Q, R] = qr(Y(:, :, i), 0);
% The instruction with R assures we are not flipping signs
% of some columns, which should never happen in modern Matlab
% versions but may be an issue with older versions.
Y(:, :, i) = Q * diag(sign(sign(diag(R))+.5));
end
end
M.exp = @exponential;
function Y = exponential(X, U, t)
if nargin == 2
t = 1;
end
tU = t*U;
Y = zeros(size(X));
for i = 1 : k
% From a formula by Ross Lippert, Example 5.4.2 in AMS08.
Xi = X(:, :, i);
Ui = tU(:, :, i);
Y(:, :, i) = [Xi Ui] * ...
expm([Xi'*Ui , -Ui'*Ui ; eye(p) , Xi'*Ui]) * ...
[ expm(-Xi'*Ui) ; zeros(p) ];
end
end
M.hash = @(X) ['z' hashmd5(X(:))];
M.rand = @random;
function X = random()
X = zeros(n, p, k);
for i = 1 : k
[Q, unused] = qr(randn(n, p), 0); %#ok<NASGU>
X(:, :, i) = Q;
end
end
M.randvec = @randomvec;
function U = randomvec(X)
U = projection(X, randn(n, p, k));
U = U / norm(U(:));
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, p, k);
M.transp = @(x1, x2, d) projection(x2, d);
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, p, k]);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of stiefel.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
rotationsfactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/rotations/rotationsfactory.m
| 4,857 |
utf_8
|
421ccf6b88f519f989d6dd87fb0a1128
|
function M = rotationsfactory(n, k)
% Returns a manifold structure to optimize over rotation matrices.
%
% function M = rotationsfactory(n)
% function M = rotationsfactory(n, k)
%
% Special orthogonal group (the manifold of rotations): deals with matrices
% R of size n x n x k (or n x n if k = 1, which is the default) such that
% each n x n matrix is orthogonal, with determinant 1, i.e., X'*X = eye(n)
% if k = 1, or X(:, :, i)' * X(:, :, i) = eye(n) for i = 1 : k if k > 1.
%
% This is a description of SO(n)^k with the induced metric from the
% embedding space (R^nxn)^k, i.e., this manifold is a Riemannian
% submanifold of (R^nxn)^k endowed with the usual trace inner product.
%
% Tangent vectors are represented in the Lie algebra, i.e., as skew
% symmetric matrices. Use the function M.tangent2ambient(X, H) to switch
% from the Lie algebra representation to the embedding space
% representation.
%
% By default, k = 1.
%
% See also: stiefelfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
% Jan. 31, 2013, NB : added egrad2rgrad and ehess2rhess
if ~exist('k', 'var') || isempty(k)
k = 1;
end
if k == 1
M.name = @() sprintf('Rotations manifold SO(%d)', n);
elseif k > 1
M.name = @() sprintf('Product rotations manifold SO(%d)^%d', n, k);
else
error('k must be an integer no less than 1.');
end
M.dim = @() k*nchoosek(n, 2);
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.typicaldist = @() pi*sqrt(n*k);
M.proj = @(X, H) multiskew(multiprod(multitransp(X), H));
M.tangent = @(X, H) multiskew(H);
M.tangent2ambient = @(X, U) multiprod(X, U);
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function Rhess = ehess2rhess(X, Egrad, Ehess, H)
% Reminder : H contains skew-symmeric matrices. The actual
% direction that the point X is moved along is X*H.
Xt = multitransp(X);
XtEgrad = multiprod(Xt, Egrad);
symXtEgrad = multisym(XtEgrad);
XtEhess = multiprod(Xt, Ehess);
Rhess = multiskew( XtEhess - multiprod(H, symXtEgrad) );
end
M.retr = @retraction;
function Y = retraction(X, U, t)
if nargin == 3
tU = t*U;
else
tU = U;
end
Y = X + multiprod(X, tU);
for i = 1 : k
[Q R] = qr(Y(:, :, i));
% The instruction with R ensures we are not flipping signs
% of some columns, which should never happen in modern Matlab
% versions but may be an issue with older versions.
Y(:, :, i) = Q * diag(sign(sign(diag(R))+.5));
% This is guaranteed to always yield orthogonal matrices with
% determinant +1. Simply look at the eigenvalues of a skew
% symmetric matrix, than at those of identity plus that matrix,
% and compute their product for the determinant: it's stricly
% positive in all cases.
end
end
M.exp = @exponential;
function Y = exponential(X, U, t)
if nargin == 3
exptU = t*U;
else
exptU = U;
end
for i = 1 : k
exptU(:, :, i) = expm(exptU(:, :, i));
end
Y = multiprod(X, exptU);
end
M.log = @logarithm;
function U = logarithm(X, Y)
U = multiprod(multitransp(X), Y);
for i = 1 : k
% The result of logm should be real in theory, but it is
% numerically useful to force it.
U(:, :, i) = real(logm(U(:, :, i)));
end
% Ensure the tangent vector is in the Lie algebra.
U = multiskew(U);
end
M.hash = @(X) ['z' hashmd5(X(:))];
M.rand = @() randrot(n, k);
M.randvec = @randomvec;
function U = randomvec(X) %#ok<INUSD>
U = randskew(n, k);
nrmU = sqrt(U(:).'*U(:));
U = U / nrmU;
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, n, k);
M.transp = @(x1, x2, d) d;
M.pairmean = @pairmean;
function Y = pairmean(X1, X2)
V = M.log(X1, X2);
Y = M.exp(X1, .5*V);
end
M.dist = @(x, y) M.norm(x, M.log(x, y));
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, n, k]);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of rotations.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
spherecomplexfactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/sphere/spherecomplexfactory.m
| 3,285 |
utf_8
|
28cbdaa05de778558800a89c16acad64
|
function M = spherecomplexfactory(n, m)
% Returns a manifold struct to optimize over unit-norm complex matrices.
%
% function M = spherecomplexfactory(n)
% function M = spherecomplexfactory(n, m)
%
% Manifold of n-by-m complex matrices of unit Frobenius norm.
% By default, m = 1, which corresponds to the unit sphere in C^n. The
% metric is such that the sphere is a Riemannian submanifold of the space
% of 2nx2m real matrices with the usual trace inner product, i.e., the
% usual metric.
%
% See also: spherefactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
if ~exist('m', 'var')
m = 1;
end
if m == 1
M.name = @() sprintf('Complex sphere S^%d', n-1);
else
M.name = @() sprintf('Unit F-norm %dx%d complex matrices', n, m);
end
M.dim = @() 2*(n*m)-1;
M.inner = @(x, d1, d2) real(d1(:)'*d2(:));
M.norm = @(x, d) norm(d, 'fro');
M.dist = @(x, y) acos(real(x(:)'*y(:)));
M.typicaldist = @() pi;
M.proj = @(x, d) reshape(d(:) - x(:)*(real(x(:)'*d(:))), n, m);
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.tangent = M.proj;
M.exp = @exponential;
M.retr = @retraction;
M.log = @logarithm;
function v = logarithm(x1, x2)
error('The logarithmic map is not yet implemented for this manifold.');
end
M.hash = @(x) ['z' hashmd5([real(x(:)) ; imag(x(:))])];
M.rand = @() random(n, m);
M.randvec = @(x) randomvec(n, m, x);
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, m);
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function y = pairmean(x1, x2)
y = x1+x2;
y = y / norm(y, 'fro');
end
end
% Exponential on the sphere
function y = exponential(x, d, t)
if nargin == 2
t = 1;
end
td = t*d;
nrm_td = norm(td, 'fro');
if nrm_td > 1e-6
y = x*cos(nrm_td) + td*(sin(nrm_td)/nrm_td);
else
% If the step is too small, to avoid dividing by nrm_td, we choose
% to approximate with this retraction-like step.
y = x + td;
y = y / norm(y, 'fro');
end
end
% Retraction on the sphere
function y = retraction(x, d, t)
if nargin == 2
t = 1;
end
y = x+t*d;
y = y/norm(y, 'fro');
end
% Uniform random sampling on the sphere.
function x = random(n, m)
x = randn(n, m) + 1i*randn(n, m);
x = x/norm(x, 'fro');
end
% Random normalized tangent vector at x.
function d = randomvec(n, m, x)
d = randn(n, m) + 1i*randn(n, m);
d = reshape(d(:) - x(:)*(real(x(:)'*d(:))), n, m);
d = d / norm(d, 'fro');
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of spherecomplex.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
spherefactory.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/manifolds/sphere/spherefactory.m
| 3,447 |
utf_8
|
1b575cecaef843bcda1574bc09b4760c
|
function M = spherefactory(n, m)
% Returns a manifold struct to optimize over unit-norm vectors or matrices.
%
% function M = spherefactory(n)
% function M = spherefactory(n, m)
%
% Manifold of n-by-m real matrices of unit Frobenius norm.
% By default, m = 1, which corresponds to the unit sphere in R^n. The
% metric is such that the sphere is a Riemannian submanifold of the space
% of nxm matrices with the usual trace inner product, i.e., the usual
% metric.
%
% See also: obliquefactory spherecomplexfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
if ~exist('m', 'var')
m = 1;
end
if m == 1
M.name = @() sprintf('Sphere S^%d', n-1);
else
M.name = @() sprintf('Unit F-norm %dx%d matrices', n, m);
end
M.dim = @() n*m-1;
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d, 'fro');
M.dist = @(x, y) real(acos(x(:).'*y(:)));
M.typicaldist = @() pi;
M.proj = @(x, d) d - x*(x(:).'*d(:));
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(x, egrad, ehess, u)
rhess = M.proj(x, ehess) - (x(:)'*egrad(:))*u;
end
M.exp = @exponential;
M.retr = @retraction;
M.log = @logarithm;
function v = logarithm(x1, x2)
v = M.proj(x1, x2 - x1);
di = M.dist(x1, x2);
nv = norm(v, 'fro');
v = v * (di / nv);
end
M.hash = @(x) ['z' hashmd5(x(:))];
M.rand = @() random(n, m);
M.randvec = @(x) randomvec(n, m, x);
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, m);
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function y = pairmean(x1, x2)
y = x1+x2;
y = y / norm(y, 'fro');
end
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, m]);
M.vecmatareisometries = @() true;
end
% Exponential on the sphere
function y = exponential(x, d, t)
if nargin == 2
t = 1;
end
td = t*d;
nrm_td = norm(td, 'fro');
if nrm_td > 1e-6
y = x*cos(nrm_td) + td*(sin(nrm_td)/nrm_td);
else
% if the step is too small, to avoid dividing by nrm_td, we choose
% to approximate with this retraction-like step.
y = x + td;
y = y / norm(y, 'fro');
end
end
% Retraction on the sphere
function y = retraction(x, d, t)
if nargin == 2
t = 1;
end
y = x + t*d;
y = y / norm(y, 'fro');
end
% Uniform random sampling on the sphere.
function x = random(n, m)
x = randn(n, m);
x = x/norm(x, 'fro');
end
% Random normalized tangent vector at x.
function d = randomvec(n, m, x)
d = randn(n, m);
d = d - x*(x(:).'*d(:));
d = d / norm(d, 'fro');
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of sphere.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
trustregions.m
|
.m
|
madmm-master/functional_maps_L21norm/help_functions/manopt/manopt/solvers/trustregions/trustregions.m
| 27,503 |
utf_8
|
16c81a00a44c928fd6ca503399b04111
|
function [x, cost, info, options] = trustregions(problem, x, options)
% Riemannian trust-regions solver for optimization on manifolds.
%
% function [x, cost, info, options] = trustregions(problem)
% function [x, cost, info, options] = trustregions(problem, x0)
% function [x, cost, info, options] = trustregions(problem, x0, options)
% function [x, cost, info, options] = trustregions(problem, [], options)
%
% This is the Riemannian Trust-Region solver (with tCG inner solve), named
% RTR. This solver will attempt to minimize the cost function described in
% the problem structure. It requires the availability of the cost function
% and of its gradient. It will issue calls for the Hessian. If no Hessian
% nor approximate Hessian is provided, a standard approximation of the
% Hessian based on the gradient will be computed. If a preconditioner for
% the Hessian is provided, it will be used.
%
% For a description of the algorithm and theorems offering convergence
% guarantees, see the references below. Documentation for this solver is
% available online at:
%
% http://www.manopt.org/solver_documentation_trustregions.html
%
%
% The initial iterate is x0 if it is provided. Otherwise, a random point on
% the manifold is picked. To specify options whilst not specifying an
% initial iterate, give x0 as [] (the empty matrix).
%
% The two outputs 'x' and 'cost' are the last reached point on the manifold
% and its cost. Notice that x is not necessarily the best reached point,
% because this solver is not forced to be a descent method. In particular,
% very close to convergence, it is sometimes preferable to accept very
% slight increases in the cost value (on the order of the machine epsilon)
% in the process of reaching fine convergence. In practice, this is not a
% limiting factor, as normally one does not need fine enough convergence
% that this becomes an issue.
%
% The output 'info' is a struct-array which contains information about the
% iterations:
% iter (integer)
% The (outer) iteration number, or number of steps considered
% (whether accepted or rejected). The initial guess is 0.
% cost (double)
% The corresponding cost value.
% gradnorm (double)
% The (Riemannian) norm of the gradient.
% numinner (integer)
% The number of inner iterations executed to compute this iterate.
% Inner iterations are truncated-CG steps. Each one requires a
% Hessian (or approximate Hessian) evaluation.
% time (double)
% The total elapsed time in seconds to reach the corresponding cost.
% rho (double)
% The performance ratio for the iterate.
% rhonum, rhoden (double)
% Regularized numerator and denominator of the performance ratio:
% rho = rhonum/rhoden. See options.rho_regularization.
% accepted (boolean)
% Whether the proposed iterate was accepted or not.
% stepsize (double)
% The (Riemannian) norm of the vector returned by the inner solver
% tCG and which is retracted to obtain the proposed next iterate. If
% accepted = true for the corresponding iterate, this is the size of
% the step from the previous to the new iterate. If accepted is
% false, the step was not executed and this is the size of the
% rejected step.
% Delta (double)
% The trust-region radius at the outer iteration.
% cauchy (boolean)
% Whether the Cauchy point was used or not (if useRand is true).
% And possibly additional information logged by options.statsfun.
% For example, type [info.gradnorm] to obtain a vector of the successive
% gradient norms reached at each (outer) iteration.
%
% The options structure is used to overwrite the default values. All
% options have a default value and are hence optional. To force an option
% value, pass an options structure with a field options.optionname, where
% optionname is one of the following and the default value is indicated
% between parentheses:
%
% tolgradnorm (1e-6)
% The algorithm terminates if the norm of the gradient drops below
% this. For well-scaled problems, a rule of thumb is that you can
% expect to reduce the gradient norm by 8 orders of magnitude
% (sqrt(eps)) compared to the gradient norm at a "typical" point (a
% rough initial iterate for example). Further decrease is sometimes
% possible, but inexact floating point arithmetic will eventually
% limit the final accuracy. If tolgradnorm is set too low, the
% algorithm may end up iterating forever (or at least until another
% stopping criterion triggers).
% maxiter (1000)
% The algorithm terminates if maxiter (outer) iterations were executed.
% maxtime (Inf)
% The algorithm terminates if maxtime seconds elapsed.
% miniter (3)
% Minimum number of outer iterations (used only if useRand is true).
% mininner (1)
% Minimum number of inner iterations (for tCG).
% maxinner (problem.M.dim() : the manifold's dimension)
% Maximum number of inner iterations (for tCG).
% Delta_bar (problem.M.typicaldist() or sqrt(problem.M.dim()))
% Maximum trust-region radius. If you specify this parameter but not
% Delta0, then Delta0 will be set to 1/8 times this parameter.
% Delta0 (Delta_bar/8)
% Initial trust-region radius. If you observe a long plateau at the
% beginning of the convergence plot (gradient norm VS iteration), it
% may pay off to try to tune this parameter to shorten the plateau.
% You should not set this parameter without setting Delta_bar.
% useRand (false)
% Set to true if the trust-region solve is to be initiated with a
% random tangent vector. If set to true, no preconditioner will be
% used. This option is set to true in some scenarios to escape saddle
% points, but is otherwise seldom activated.
% kappa (0.1)
% Inner kappa convergence tolerance.
% theta (1.0)
% Inner theta convergence tolerance.
% rho_prime (0.1)
% Accept/reject ratio : if rho is at least rho_prime, the outer
% iteration is accepted. Otherwise, it is rejected. In case it is
% rejected, the trust-region radius will have been decreased.
% To ensure this, rho_prime must be strictly smaller than 1/4.
% rho_regularization (1e3)
% Close to convergence, evaluating the performance ratio rho is
% numerically challenging. Meanwhile, close to convergence, the
% quadratic model should be a good fit and the steps should be
% accepted. Regularization lets rho go to 1 as the model decrease and
% the actual decrease go to zero. Set this option to zero to disable
% regularization (not recommended). See in-code for the specifics.
% statsfun (none)
% Function handle to a function that will be called after each
% iteration to provide the opportunity to log additional statistics.
% They will be returned in the info struct. See the generic Manopt
% documentation about solvers for further information. statsfun is
% called with the point x that was reached last, after the
% accept/reject decision. See comment below.
% stopfun (none)
% Function handle to a function that will be called at each iteration
% to provide the opportunity to specify additional stopping criteria.
% See the generic Manopt documentation about solvers for further
% information.
% verbosity (2)
% Integer number used to tune the amount of output the algorithm
% generates during execution (mostly as text in the command window).
% The higher, the more output. 0 means silent. 3 and above includes a
% display of the options structure at the beginning of the execution.
% debug (false)
% Set to true to allow the algorithm to perform additional
% computations for debugging purposes. If a debugging test fails, you
% will be informed of it, usually via the command window. Be aware
% that these additional computations appear in the algorithm timings
% too.
% storedepth (20)
% Maximum number of different points x of the manifold for which a
% store structure will be kept in memory in the storedb. If the
% caching features of Manopt are not used, this is irrelevant. If
% memory usage is an issue, you may try to lower this number.
% Profiling may then help to investigate if a performance hit was
% incured as a result.
%
% Notice that statsfun is called with the point x that was reached last,
% after the accept/reject decision. Hence: if the step was accepted, we get
% that new x, with a store which only saw the call for the cost and for the
% gradient. If the step was rejected, we get the same x as previously, with
% the store structure containing everything that was computed at that point
% (possibly including previous rejects at that same point). Hence, statsfun
% should not be used in conjunction with the store to count operations for
% example. Instead, you could use a global variable and increment that
% variable directly from the cost related functions. It is however possible
% to use statsfun with the store to compute, for example, alternate merit
% functions on the point x.
%
% See also: steepestdescent conjugategradient manopt/examples
% This file is part of Manopt: www.manopt.org.
% This code is an adaptation to Manopt of the original GenRTR code:
% RTR - Riemannian Trust-Region
% (c) 2004-2007, P.-A. Absil, C. G. Baker, K. A. Gallivan
% Florida State University
% School of Computational Science
% (http://www.math.fsu.edu/~cbaker/GenRTR/?page=download)
% See accompanying license file.
% The adaptation was executed by Nicolas Boumal.
%
% Change log:
%
% NB April 3, 2013:
% tCG now returns the Hessian along the returned direction eta, so
% that we do not compute that Hessian redundantly: some savings at
% each iteration. Similarly, if the useRand flag is on, we spare an
% extra Hessian computation at each outer iteration too, owing to
% some modifications in the Cauchy point section of the code specific
% to useRand = true.
%
% NB Aug. 22, 2013:
% This function is now Octave compatible. The transition called for
% two changes which would otherwise not be advisable. (1) tic/toc is
% now used as is, as opposed to the safer way:
% t = tic(); elapsed = toc(t);
% And (2), the (formerly inner) function savestats was moved outside
% the main function to not be nested anymore. This is arguably less
% elegant, but Octave does not (and likely will not) support nested
% functions.
%
% NB Dec. 2, 2013:
% The in-code documentation was largely revised and expanded.
%
% NB Dec. 2, 2013:
% The former heuristic which triggered when rhonum was very small and
% forced rho = 1 has been replaced by a smoother heuristic which
% consists in regularizing rhonum and rhoden before computing their
% ratio. It is tunable via options.rho_regularization. Furthermore,
% the solver now detects if tCG did not obtain a model decrease
% (which is theoretically impossible but may happen because of
% numerical errors and/or because of a nonlinear/nonsymmetric Hessian
% operator, which is the case for finite difference approximations).
% When such an anomaly is detected, the step is rejected and the
% trust region radius is decreased.
%
% NB Dec. 3, 2013:
% The stepsize is now registered at each iteration, at a small
% additional cost. The defaults for Delta_bar and Delta0 are better
% defined. Setting Delta_bar in the options will automatically set
% Delta0 accordingly. In Manopt 1.0.4, the defaults for these options
% were not treated appropriately because of an incorrect use of the
% isfield() built-in function.
% Verify that the problem description is sufficient for the solver.
if ~canGetCost(problem)
warning('manopt:getCost', ...
'No cost provided. The algorithm will likely abort.');
end
if ~canGetGradient(problem)
warning('manopt:getGradient', ...
'No gradient provided. The algorithm will likely abort.');
end
if ~canGetHessian(problem)
warning('manopt:getHessian:approx', ...
'No Hessian provided. Using an approximation instead.');
end
% Define some strings for display
tcg_stop_reason = {'negative curvature',...
'exceeded trust region',...
'reached target residual-kappa',...
'reached target residual-theta',...
'dimension exceeded',...
'model increased'};
% Set local defaults here
localdefaults.verbosity = 2;
localdefaults.maxtime = inf;
localdefaults.miniter = 3;
localdefaults.maxiter = 1000;
localdefaults.mininner = 1;
localdefaults.maxinner = problem.M.dim();
localdefaults.tolgradnorm = 1e-6;
localdefaults.kappa = 0.1;
localdefaults.theta = 1.0;
localdefaults.rho_prime = 0.1;
localdefaults.useRand = false;
localdefaults.rho_regularization = 1e3;
% Merge global and local defaults, then merge w/ user options, if any.
localdefaults = mergeOptions(getGlobalDefaults(), localdefaults);
if ~exist('options', 'var') || isempty(options)
options = struct();
end
options = mergeOptions(localdefaults, options);
% Set default Delta_bar and Delta0 separately to deal with additional
% logic: if Delta_bar is provided but not Delta0, let Delta0 automatically
% be some fraction of the provided Delta_bar.
if ~isfield(options, 'Delta_bar')
if isfield(problem.M, 'typicaldist')
options.Delta_bar = problem.M.typicaldist();
else
options.Delta_bar = sqrt(problem.M.dim());
end
end
if ~isfield(options,'Delta0')
options.Delta0 = options.Delta_bar / 8;
end
% Check some option values
assert(options.rho_prime < 1/4, ...
'options.rho_prime must be strictly smaller than 1/4.');
assert(options.Delta_bar > 0, ...
'options.Delta_bar must be positive.');
assert(options.Delta0 > 0 && options.Delta0 < options.Delta_bar, ...
'options.Delta0 must be positive and smaller than Delta_bar.');
% It is sometimes useful to check what the actual option values are.
if options.verbosity >= 3
disp(options);
end
% Create a store database
storedb = struct();
tic();
% If no initial point x is given by the user, generate one at random.
if ~exist('x', 'var') || isempty(x)
x = problem.M.rand();
end
%% Initializations
% k counts the outer (TR) iterations. The semantic is that k counts the
% number of iterations fully executed so far.
k = 0;
% initialize solution and companion measures: f(x), fgrad(x)
[fx fgradx storedb] = getCostGrad(problem, x, storedb);
norm_grad = problem.M.norm(x, fgradx);
% initialize trust-region radius
Delta = options.Delta0;
% Save stats in a struct array info, and preallocate
% (see http://people.csail.mit.edu/jskelly/blog/?x=entry:entry091030-033941)
if ~exist('used_cauchy', 'var')
used_cauchy = [];
end
stats = savestats(problem, x, storedb, options, k, fx, norm_grad, Delta);
info(1) = stats;
info(min(10000, options.maxiter+1)).iter = [];
% ** Display:
if options.verbosity == 2
fprintf(['%3s %3s %5s %5s ',...
'f: %e |grad|: %e\n'],...
' ',' ',' ',' ', fx, norm_grad);
elseif options.verbosity > 2
fprintf('************************************************************************\n');
fprintf('%3s %3s k: %5s num_inner: %5s %s\n',...
'','','______','______','');
fprintf(' f(x) : %e |grad| : %e\n', fx, norm_grad);
fprintf(' Delta : %f\n', Delta);
end
% **********************
% ** Start of TR loop **
% **********************
while true
% Start clock for this outer iteration
tic();
% Run standard stopping criterion checks
[stop reason] = stoppingcriterion(problem, x, options, info, k+1);
% If the stopping criterion that triggered is the tolerance on the
% gradient norm but we are using randomization, make sure we make at
% least miniter iterations to give randomization a chance at escaping
% saddle points.
if stop == 2 && options.useRand && k < options.miniter
stop = 0;
end
if stop
if options.verbosity >= 1
fprintf([reason '\n']);
end
break;
end
if options.verbosity > 2 || options.debug > 0
fprintf('************************************************************************\n');
end
% *************************
% ** Begin TR Subproblem **
% *************************
% Determine eta0
if ~options.useRand
% Pick the zero vector
eta = problem.M.zerovec(x);
else
% Random vector in T_x M (this has to be very small)
eta = problem.M.lincomb(x, 1e-6, problem.M.randvec(x));
% Must be inside trust-region
while problem.M.norm(x, eta) > Delta
eta = problem.M.lincomb(x, sqrt(sqrt(eps)), eta);
end
end
% solve TR subproblem
[eta Heta numit stop_inner storedb] = ...
tCG(problem, x, fgradx, eta, Delta, options, storedb);
srstr = tcg_stop_reason{stop_inner};
% This is only computed for logging purposes, because it may be useful
% for some user-defined stopping criteria. If this is not cheap for
% specific application (compared to evaluating the cost), we should
% reconsider this.
norm_eta = problem.M.norm(x, eta);
if options.debug > 0
testangle = problem.M.inner(x, eta, fgradx) / (norm_eta*norm_grad);
end
% If using randomized approach, compare result with the Cauchy point.
% Convergence proofs assume that we achieve at least the reduction of
% the Cauchy point. After this if-block, either all eta-related
% quantities have been changed consistently, or none of them have
% changed.
if options.useRand
used_cauchy = false;
% Check the curvature,
[Hg storedb] = getHessian(problem, x, fgradx, storedb);
g_Hg = problem.M.inner(x, fgradx, Hg);
if g_Hg <= 0
tau_c = 1;
else
tau_c = min( norm_grad^3/(Delta*g_Hg) , 1);
end
% and generate the Cauchy point.
eta_c = problem.M.lincomb(x, -tau_c * Delta / norm_grad, fgradx);
Heta_c = problem.M.lincomb(x, -tau_c * Delta / norm_grad, Hg);
% Now that we have computed the Cauchy point in addition to the
% returned eta, we might as well keep the best of them.
mdle = fx + problem.M.inner(x, fgradx, eta) ...
+ .5*problem.M.inner(x, Heta, eta);
mdlec = fx + problem.M.inner(x, fgradx, eta_c) ...
+ .5*problem.M.inner(x, Heta_c, eta_c);
if mdle > mdlec
eta = eta_c;
Heta = Heta_c; % added April 11, 2012
used_cauchy = true;
end
end
% Compute the retraction of the proposal
x_prop = problem.M.retr(x, eta);
% Compute the function value of the proposal
[fx_prop storedb] = getCost(problem, x_prop, storedb);
% Will we accept the proposed solution or not?
% Check the performance of the quadratic model against the actual cost.
rhonum = fx - fx_prop;
rhoden = -problem.M.inner(x, fgradx, eta) ...
-.5*problem.M.inner(x, eta, Heta);
% Heuristic -- added Dec. 2, 2013 (NB) to replace the former heuristic.
% This heuristic is documented in the book by Conn Gould and Toint on
% trust-region methods, section 17.4.2.
% rhonum measures the difference between two numbers. Close to
% convergence, these two numbers are very close to each other, so
% that computing their difference is numerically challenging: there may
% be a significant loss in accuracy. Since the acceptance or rejection
% of the step is conditioned on the ratio between rhonum and rhoden,
% large errors in rhonum result in a large error in rho, hence in
% erratic acceptance / rejection. Meanwhile, close to convergence,
% steps are usually trustworthy and we should transition to a Newton-
% like method, with rho=1 consistently. The heuristic thus shifts both
% rhonum and rhoden by a small amount such that far from convergence,
% the shift is irrelevant and close to convergence, the ratio rho goes
% to 1, effectively promoting acceptance of the step.
% The rationale is that close to convergence, both rhonum and rhoden
% are quadratic in the distance between x and x_prop. Thus, when this
% distance is on the order of sqrt(eps), the value of rhonum and rhoden
% is on the order of eps, which is indistinguishable from the numerical
% error, resulting in badly estimated rho's.
% For abs(fx) < 1, this heuristic is invariant under offsets of f but
% not under scaling of f. For abs(fx) > 1, the opposite holds. This
% should not alarm us, as this heuristic only triggers at the very last
% iterations if very fine convergence is demanded.
rho_reg = max(1, abs(fx)) * eps * options.rho_regularization;
rhonum = rhonum + rho_reg;
rhoden = rhoden + rho_reg;
if options.debug > 0
fprintf('DBG: rhonum : %e\n', rhonum);
fprintf('DBG: rhoden : %e\n', rhoden);
end
% This is always true if a linear, symmetric operator is used for the
% Hessian (approximation) and if we had infinite numerical precision.
% In practice, nonlinear approximations of the Hessian such as the
% built-in finite difference approximation and finite numerical
% accuracy can cause the model to increase. In such scenarios, we
% decide to force a rejection of the step and a reduction of the
% trust-region radius. We test the sign of the regularized rhoden since
% the regularization is supposed to capture the accuracy to which
% rhoden is computed: if rhoden were negative before regularization but
% not after, that should not be (and is not) detected as a failure.
model_decreased = (rhoden >= 0);
if ~model_decreased
srstr = [srstr ', model did not decrease']; %#ok<AGROW>
end
rho = rhonum / rhoden;
if options.debug > 0
m = @(x, eta) ...
getCost(problem, x, storedb) + ...
getDirectionalDerivative(problem, x, eta, storedb) + ...
.5*problem.M.inner(x, getHessian(problem, x, eta, storedb), eta);
zerovec = problem.M.zerovec(x);
actrho = (fx - fx_prop) / (m(x, zerovec) - m(x, eta));
fprintf('DBG: new f(x) : %e\n', fx_prop);
fprintf('DBG: actual rho : %e\n', actrho);
fprintf('DBG: used rho : %e\n', rho);
end
% Choose the new TR radius based on the model performance
trstr = ' ';
% If the actual decrease is smaller than 1/4 of the predicted decrease,
% then reduce the TR radius.
if rho < 1/4 || ~model_decreased
trstr = 'TR-';
Delta = Delta/4;
% If the actual decrease is at least 3/4 of the precicted decrease and
% the tCG (inner solve) hit the TR boundary, increase the TR radius.
elseif rho > 3/4 && (stop_inner == 1 || stop_inner == 2)
trstr = 'TR+';
Delta = min(2*Delta, options.Delta_bar);
end
% Otherwise, keep the TR radius constant.
% Choose to accept or reject the proposed step based on the model
% performance.
if model_decreased && rho > options.rho_prime
accept = true;
accstr = 'acc';
x = x_prop;
fx = fx_prop;
[fgradx storedb] = getGradient(problem, x, storedb);
norm_grad = problem.M.norm(x, fgradx);
else
accept = false;
accstr = 'REJ';
end
% Make sure we don't use too much memory for the store database
storedb = purgeStoredb(storedb, options.storedepth);
% k is the number of iterations we have accomplished.
k = k + 1;
% Log statistics for freshly executed iteration.
% Everything after this in the loop is not accounted for in the timing.
stats = savestats(problem, x, storedb, options, k, fx, norm_grad, ...
Delta, info, rho, rhonum, rhoden, accept, numit, ...
norm_eta, used_cauchy);
info(k+1) = stats; %#ok<AGROW>
% ** Display:
if options.verbosity == 2,
fprintf(['%3s %3s k: %5d num_inner: %5d ', ...
'f: %e |grad|: %e %s\n'], ...
accstr,trstr,k,numit,fx,norm_grad,srstr);
elseif options.verbosity > 2,
if options.useRand && used_cauchy,
fprintf('USED CAUCHY POINT\n');
end
fprintf('%3s %3s k: %5d num_inner: %5d %s\n', ...
accstr, trstr, k, numit, srstr);
fprintf(' f(x) : %e |grad| : %e\n',fx,norm_grad);
if options.debug > 0
fprintf(' Delta : %f |eta| : %e\n',Delta,norm_eta);
end
fprintf(' rho : %e\n',rho);
end
if options.debug > 0,
fprintf('DBG: cos ang(eta,gradf): %d\n',testangle);
if rho == 0
fprintf('DBG: rho = 0, this will likely hinder further convergence.\n');
end
end
end % of TR loop (counter: k)
% Restrict info struct-array to useful part
info = info(1:k+1);
if (options.verbosity > 2) || (options.debug > 0),
fprintf('************************************************************************\n');
end
if (options.verbosity > 0) || (options.debug > 0)
fprintf('Total time is %f [s] (excludes statsfun)\n', info(end).time);
end
% Return the best cost reached
cost = fx;
end
% Routine in charge of collecting the current iteration stats
function stats = savestats(problem, x, storedb, options, k, fx, ...
norm_grad, Delta, info, rho, rhonum, ...
rhoden, accept, numit, norm_eta, used_cauchy)
stats.iter = k;
stats.cost = fx;
stats.gradnorm = norm_grad;
stats.Delta = Delta;
if k == 0
stats.time = toc();
stats.rho = inf;
stats.rhonum = NaN;
stats.rhoden = NaN;
stats.accepted = true;
stats.numinner = NaN;
stats.stepsize = NaN;
if options.useRand
stats.cauchy = false;
end
else
stats.time = info(k).time + toc();
stats.rho = rho;
stats.rhonum = rhonum;
stats.rhoden = rhoden;
stats.accepted = accept;
stats.numinner = numit;
stats.stepsize = norm_eta;
if options.useRand,
stats.cauchy = used_cauchy;
end
end
% See comment about statsfun above: the x and store passed to statsfun
% are that of the most recently accepted point after the iteration
% fully executed.
stats = applyStatsfun(problem, x, storedb, options, stats);
end
|
github
|
skovnats/madmm-master
|
MADMM_comptr.m
|
.m
|
madmm-master/compressed_modes/MADMM_comptr.m
| 2,379 |
utf_8
|
6b9420c94a0f051a1efd4b0488e967c4
|
function [X,Xcost bm tm] = MADMM_comptr(L,N,lambda,rho,steps,it,X0)
% Manifold ADMM method
% Minimizes lambda*|X|_1+trace(X'LX)
% on the manifold of n x N- orthogonal matrices.
% INPUT:
% L: is the discretized Hamiltonian, a n x n- matrix
% N: number of colums of X (approximate eigenvectors)
% lambda: parameter in cost function
% rho>0: penalty parameter for ADMM
% steps: number of inner (Manopt) iterations
% it: is the number of outer iterations (updating the Lagrange vector U)
% OUTPUT:
% X is the optimum matrix of the main variable
% Xcost is the optimal value of the cost function
% bm: history of function values (outer iteration)
n=size(L,1);
% Initializing all variables
%X=polar_svd(rand(n,N));
%X=rand(n,N);
%[X SX]=eigs(H,N);
if exist('X0','var')
X=X0;
else
[X,~] = svd(randn(n,N),0);
end
Z=X;
U=zeros(n,N);
%
bm=lambda*sum(abs(X(:)))+trace(X.'*L*X);
tm=0;
% ADMM outer iteration
t_=cputime;
% t=tic;
for i=1:it
X=iterX(L,N,X,Z,U,lambda,rho,steps);
Z=iterZ(X,U,lambda,rho);
U=U+X-Z;
Xcost=lambda*sum(abs(X(:)))+trace(X.'*L*X);
bm=[bm Xcost];
% tm=[tm toc];
tm=[tm cputime-t_];
fprintf('%d:%f\n',i,Xcost);
end;
% tm=tm-t_;
end
function X=iterX(L,N,X,Z,U,lambda,rho,steps)
n=size(X,1);
% Create the problem structure.
manifold = stiefelfactory(n, N, 1);
problem.M = manifold;
% Define the problem cost function and its gradient.
problem.cost = @(X) trace(X'*L*X)+rho*norm(X-Z+U,'fro')^2/2;
egrad = @(X) egra(L,X,Z,U,rho);
problem.grad = @(Y) manifold.egrad2rgrad(Y, egrad(Y));
% Numerically check the differential
% checkgradient(problem);
% Stopfunction
options.stopfun = @mystopfun;
function stopnow = mystopfun(problem, x, info, last)
stopnow = (last >= 3 && (info(last-2).cost - info(last).cost)/info(last).cost < 1e-8);
end
options.maxiter=steps;
options.verbosity=0;
% Solve.
warning off
[X, Xcost, info, options] = trustregions(problem,X,options);
% [X, Xcost, info, options] = conjugategradient(problem,X,options);
warning on
Xcost=lambda*sum(abs(X(:)))+trace(X'*L*X);
end
function Z=iterZ(X,U,lambda,rho)
%
Z=shrink(X+U,lambda/rho);
function [z]=shrink(z,l)
z=sign(z).*max(0,abs(z)-l);
end
end
function eg=egra(L,X,Z,U,rho)
% gradient of cost function
[n m]=size(X);
g1=(L+L')*X;
g2=rho*(X-Z+U);
eg=g1+g2;
end
|
github
|
skovnats/madmm-master
|
dsh.m
|
.m
|
madmm-master/compressed_modes/dsh.m
| 2,720 |
utf_8
|
4280c07d43da54dee64aaed8d33fe7b8
|
% script for dispalying shape
function [] = dsh( varargin )
% input:
%{
{1} - title
{2} - if save
%}
vector = false;
flag = true;
name = [];
switch nargin
case 1
shape = varargin{ 1 };
case 2
shape = varargin{ 1 };
tname = varargin{ 2 };
if isnumeric(tname)
vector = true;
else
vector = false;
end
case 3
shape = varargin{ 1 };
tname = varargin{ 2 };
issave = varargin{ 3 };
if isnumeric(tname)
vector = true;
else
vector = false;
end
if isstr( issave )
name = issave;
issave = false;
end
case 4
shape = varargin{ 1 };
tname = varargin{ 2 };
vector = true;
name = varargin{ 3 };
issave = varargin{ 4 };
end
if iscell( shape )
flag = false;
for i = 1:length( shape )
dsh( shape{ i } );
title( sprintf( 'shape %d/%d', i, length( shape ) ) );
waitforbuttonpress;
end
end
if flag
if ~vector
% displaying
if ~isfield( shape, 'C' )
trisurf( shape.TRIV, shape.X, shape.Y, shape.Z, ones(size((shape.X))) ), ...
end
else
trisurf( shape.TRIV, shape.X, shape.Y, shape.Z, full(tname) ), ...
end
if isfield( shape, 'C' )
if ~vector
try
%%
% UPD: 15.11.2011
lab = [shape.L, shape.a, shape.b];
lab = colorspace( 'lab->rgb', lab );
shape.C = lab;
trisurf( shape.TRIV, shape.X, shape.Y, shape.Z, 1:(length(shape.X)) )
catch
end
%%
colormap(shape.C),
% colormap(ones( length(shape.X), 3 )),
end
axis off, axis image, shading interp;
% lighting phong, camlight('headlight'); % was commented
else
if ~vector
colormap(ones( length(shape.X), 3 )),
end
axis off, axis image, shading interp, lighting phong, camlight('headlight');
end
switch nargin
case 2
if ~vector
title(tname);
end
case 3
if ~vector
title(tname);
end
%
if isstr( name )
title(name);
end
if issave
saveas( gcf, [tname '.png'] )
end
case 4
title( name );
if issave
saveas( gcf, [name '.png'] )
end
end
set(gcf,'Color','w');
cameratoolbar;
%%
% try
% caxis( [-max(abs(tname)), max(abs(tname))] );
% colormap(temp(64));
% catch
% end
%%
end
|
github
|
skovnats/madmm-master
|
MADMM_comp.m
|
.m
|
madmm-master/compressed_modes/MADMM_comp.m
| 2,354 |
utf_8
|
69c9f062e75434e34f506a3e9d02c53f
|
function [X,Xcost bm tm] = MADMM_comp(L,N,lambda,rho,steps,it,X0)
% Manifold ADMM method
% Minimizes lambda*|X|_1+trace(X'LX)
% on the manifold of n x N- orthogonal matrices.
% INPUT:
% L: is the discretized Hamiltonian, a n x n- matrix
% N: number of colums of X (approximate eigenvectors)
% lambda: parameter in cost function
% rho>0: penalty parameter for ADMM
% steps: number of inner (Manopt) iterations
% it: is the number of outer iterations (updating the Lagrange vector U)
% OUTPUT:
% X is the optimum matrix of the main variable
% Xcost is the optimal value of the cost function
% bm: history of function values (outer iteration)
n=size(L,1);
% Initializing all variables
%X=polar_svd(rand(n,N));
%X=rand(n,N);
%[X SX]=eigs(H,N);
if exist('X0','var')
X=X0;
else
[X,~] = svd(randn(n,N),0);
end
Z=X;
U=zeros(n,N);
%
bm=lambda*sum(abs(X(:)))+trace(X.'*L*X);
tm=0;
% ADMM outer iteration
t_=cputime;
% t=tic;
for i=1:it
X=iterX(L,N,X,Z,U,lambda,rho,steps);
Z=iterZ(X,U,lambda,rho);
U=U+X-Z;
Xcost=lambda*sum(abs(X(:)))+trace(X.'*L*X);
bm=[bm Xcost];
% tm=[tm toc];
tm=[tm cputime-t_];
fprintf('%d:%f\n',i,Xcost);
end;
% tm=tm-t_;
end
function X=iterX(L,N,X,Z,U,lambda,rho,steps)
n=size(X,1);
% Create the problem structure.
manifold = stiefelfactory(n, N, 1);
problem.M = manifold;
% Define the problem cost function and its gradient.
problem.cost = @(X) trace(X'*L*X)+rho*norm(X-Z+U,'fro')^2/2;
egrad = @(X) egra(L,X,Z,U,rho);
problem.grad = @(Y) manifold.egrad2rgrad(Y, egrad(Y));
% Numerically check the differential
% checkgradient(problem);
% Stopfunction
options.stopfun = @mystopfun;
function stopnow = mystopfun(problem, x, info, last)
stopnow = (last >= 3 && (info(last-2).cost - info(last).cost)/info(last).cost < 1e-8);
end
options.maxiter=steps;
options.verbosity=0;
% Solve.
% [X, Xcost, info, options] = trustregions(problem,X,options);
[X, Xcost, info, options] = conjugategradient(problem,X,options);
Xcost=lambda*sum(abs(X(:)))+trace(X'*L*X);
end
function Z=iterZ(X,U,lambda,rho)
%
Z=shrink(X+U,lambda/rho);
function [z]=shrink(z,l)
z=sign(z).*max(0,abs(z)-l);
end
end
function eg=egra(L,X,Z,U,rho)
% gradient of cost function
[n m]=size(X);
g1=(L+L')*X;
g2=rho*(X-Z+U);
eg=g1+g2;
end
|
github
|
skovnats/madmm-master
|
SL1_Manopt.m
|
.m
|
madmm-master/compressed_modes/SL1_Manopt.m
| 2,109 |
utf_8
|
3640ab4e879252685fd9afa563f8fa2c
|
function [X, Xcost b0 t0]=SL1_Manopt(H,N,mu,eps,it,X0)
% Minimizes |X|_eps/mu+trace(X'HX)
% on the Stiefel manifold X'*X=I
% Here |.|_eps is the smoothed L1- norm |x|=sqrt(x^2+eps), eps>0.
% INPUT:
% H: discrete Hamiltonian
% N: number of colums of X
% eps: smoothing parameter for L1 norm (eps = 10^(-6) )
% it: number of iterations of MANOPT conjugate gradient algorithm
% OUTPUT:
% X solution of smoothed L1 minimization by MANOPT
% Xcost optimal value of cost function f(X)=|X|_eps/mu+trace(X'HX)
warning('off');
n=size(H,1);
% Start matrix
%[X0 SX]=eigs(H,N);
if exist('X0','var')
X=X0;
else
[X,~] = svd(randn(n,N),0);
end
%X0=rand(n,N);
b0=cost(H,X0,mu,eps);
t0=0;
% Create the problem structure.
manifold = stiefelfactory(n, N, 1);
problem.M = manifold;
% Define the problem cost function and its gradient.
problem.cost = @(X) cost(H,X,mu,eps);
egrad = @(X) egra(H,X,mu,eps);
problem.grad = @(Y) manifold.egrad2rgrad(Y, egrad(Y));
% Numerically check the differential
% checkgradient(problem);
% Stopfunction
% options.stopfun = @mystopfun;
function stopnow = mystopfun(problem, x, info, last)
stopnow = (last >= 3 && (info(last-2).cost - info(last).cost)/info(last).cost < 1e-8);
end
options.maxiter=it;
% options.minstepsize=1e-1;
% options.verbosity=0;
options.verbosity=2;
% Solve.
problem.ff = @(X) trace(X.'*H*X) + sum(abs(X(:)))/mu;
[X, Xcost, info, options] = trustregions(problem,X0,options);
% [X, Xcost, info, options] = conjugategradient(problem,X0,options);
Xcost=cost(H,X,mu,eps);
% t0=[];
% b0=[];
for i=1:size(info,2)
b0=[b0, info(i).cost];
t0=[t0, info(i).time];
end;
end
function cc=cost(H,X,mu,eps);
% smoothed L1 norm in cost function
cc = sum(sqrt(X(:).^2+eps))/mu+trace(X'*H*X);
end
function eg=egra(H,X,mu,eps)
% gradient of cost function
[n m]=size(X);
g1=X./sqrt(X.*X+eps*ones(n,m));
g2=(H+H')*X;
eg=g1/mu+g2;
end
|
github
|
skovnats/madmm-master
|
NEUMANN.m
|
.m
|
madmm-master/compressed_modes/NEUMANN.m
| 1,914 |
utf_8
|
3c56c114705af05e0b37e8334ed39359
|
function [X,Xcost bo to] = NEUMANN(L,N,lambda,rho,it,X0);
% Neumann's ADMM method
% Minimizes lambda*|X|_1+trace(X'LX)
% on the manifold of n x N- orthogonal matrices.
% INPUT:
% L: is the discretized Hamiltonian, a n x n- matrix
% N: number of colums of X (approximate eigenvectors)
% lambda: parameter in cost function
% rho>0: penalty parameter for ADMM
% steps NOT USED (it is for the inner iteration for minimizing wrt. E)
% IN OUR PROGRAM WE SOLVE THE LIN. EQU. FOR E EXACTLY (USING
% LINSOLVE()).
% it: is the number of outer iterations (updating the Lagrange vector U)
% OUTPUT:
% X is the optimum matrix of the main variable
% Xcost is the optimal value of the cost function
% bo: history of function values (outer iteration)
n=size(L,1);
% Initializing all variables
%X=polar_svd(rand(n,N));
%X=rand(n,N);
%[X SX]=eigs(H,N);
if exist('X0','var')
X=X0;
else
[X,~] = svd(randn(n,N),0);
end
E=X;
S=X;
Ue=zeros(n,N);
Us=zeros(n,N);
%
bo=lambda*sum(abs(X0(:)))-trace(X0'*L*X0);
to=0;
% ADMM outer iteration
t_=cputime;
% tic;
for i=1:it
X=iterX(L,S,E,Us,Ue);
E=iterE(L,X,Ue,rho);
S=iterS(X,Us,lambda,rho);
Ue=Ue+X-E;
Us=Us+X-S;
Xcost=lambda*sum(abs(X(:)))-trace(X'*L*X);
bo=[bo Xcost];
to=[to cputime-t_];
% to=[to toc];
fprintf('%d:%f\n',i,Xcost);
end;
end
function X=iterX(L,S,E,Us,Ue,rho) % formula (14)
n=size(E,1);
Y=(S-Us+E-Ue)/2; %
% try
[U S V]=svd(Y,'econ');
% catch
% save('Y','Y');
% pause;
% end
X=U*V';
end
function E=iterE(L,X,Ue,rho) % formula (17)
n=size(L,1);
R=rho*(X+Ue);
A=(rho*eye(n)-L-L');
E=linsolve(A,R);
% try
[U S V]=svd(E,'econ');
% catch
% save('E','E');
% pause;
% end
end
function S=iterS(X,Us,lambda,rho)
%
S=shrink(X+Us,lambda/rho);
function [z]=shrink(z,l)
z=sign(z).*max(0,abs(z)-l);
end
end
|
github
|
skovnats/madmm-master
|
OSHER.m
|
.m
|
madmm-master/compressed_modes/OSHER.m
| 1,885 |
utf_8
|
51162b58ba309528a609bc58d3cfaa10
|
function [X,Xcost bo, to] = OSHER(H,N,mu,lambda,rho,it,X0);
% Osher's ADMM method
% Minimizes |X|_1/mu+trace(X'HX)
% on the manifold of n x N- matrices.
% INPUT:
% H: is the discretized Hamiltonian, a n x n- matrix
% N: number of colums of X (approximate eigenvectors)
% mu: penalty parameter in cost function
% lambda>0: penalty parameter for ADMM
% rho>0: second penalty parameter
% steps NOT USED (is for the inner iteration for minimizing wrt. X)
% it: is the number of outer iterations (updating the Lagrange vector U)
% OUTPUT:
% X is the optimum matrix of the main variable
% Xcost is the optimal value of the cost function
% bo: history of function values (outer iteration)
n=size(H,1);
% Initializing all variables
%X=polar_svd(rand(n,N));
%X=rand(n,N);
%[X SX]=eigs(H,N);
if exist('X0','var')
X=X0;
else
[X,~] = svd(randn(n,N),0);
end
P=X;
Q=X;
B=zeros(n,N);
b=zeros(n,N);
%
bo=sum(abs(X(:)))/mu+trace(X'*H*X);
to=0;
% ADMM outer iteration
% tic;
% to=[];
t_=cputime;
for i=1:it
X=iterX(H,P,Q,B,b,lambda,rho);
P=iterP(X,B);
Q=iterQ(X,b,mu,lambda);
B=B+X-P;
b=b+X-Q;
Xcost=sum(abs(X(:)))/mu+trace(X'*H*X);
bo=[bo Xcost];
to=[to, cputime-t_];
% to=[to, toc];
%
fprintf('%d:%f\n',i,Xcost);
end;
end
function X=iterX(H,P,Q,B,b,lambda,rho)
warning('off');
% Dimensions of data
n=size(P,1);
R=rho*(P-B)+lambda*(Q-b);
A=2*H+(lambda+rho)*eye(n,n);
X=linsolve(A,R);
end
function Q=iterQ(X,b,mu,lambda)
% This is the shrinking operation on the variable Q
Q=shrink(X+b,1/(mu*lambda));
function [z]=shrink(z,l)
z=sign(z).*max(0,abs(z)-l);
end
end
function P=iterP(X,B)
[U S V]=svd(X+B,'econ');
P=U*V';
end
|
github
|
skovnats/madmm-master
|
MADMM_compcg.m
|
.m
|
madmm-master/compressed_modes/MADMM_compcg.m
| 2,524 |
utf_8
|
958d53554d16a56d1712b9673ee29188
|
function [X,Xcost bm tm] = MADMM_compcg(L,N,lambda,rho,steps,it,X0)
% Manifold ADMM method
% Minimizes lambda*|X|_1+trace(X'LX)
% on the manifold of n x N- orthogonal matrices.
% INPUT:
% L: is the discretized Hamiltonian, a n x n- matrix
% N: number of colums of X (approximate eigenvectors)
% lambda: parameter in cost function
% rho>0: penalty parameter for ADMM
% steps: number of inner (Manopt) iterations
% it: is the number of outer iterations (updating the Lagrange vector U)
% OUTPUT:
% X is the optimum matrix of the main variable
% Xcost is the optimal value of the cost function
% bm: history of function values (outer iteration)
n=size(L,1);
tol=1e-6;
% Initializing all variables
%X=polar_svd(rand(n,N));
%X=rand(n,N);
%[X SX]=eigs(H,N);
if exist('X0','var')
X=X0;
else
[X,~] = svd(randn(n,N),0);
end
Z=X;
U=zeros(n,N);
%
bm=lambda*sum(abs(X(:)))+trace(X.'*L*X);
tm=0;
% ADMM outer iteration
t_=cputime;
% t=tic;
for i=1:it
X=iterX(L,N,X,Z,U,lambda,rho,steps);
Z=iterZ(X,U,lambda,rho);
U=U+X-Z;
Xcost=lambda*sum(abs(X(:)))+trace(X.'*L*X);
bm=[bm Xcost];
% tm=[tm toc];
tm=[tm cputime-t_];
fprintf('%d:%f\n',i,Xcost);
%%abs(bm(end-1)-bm(end)
% abs(bm(end-1)-bm(end))
%{
if i>4
if (abs(bm(end-3)-bm(end))/bm(end))<tol
return;
end
end
%}
end;
% tm=tm-t_;
end
function X=iterX(L,N,X,Z,U,lambda,rho,steps)
n=size(X,1);
% Create the problem structure.
manifold = stiefelfactory(n, N, 1);
problem.M = manifold;
% Define the problem cost function and its gradient.
problem.cost = @(X) trace(X'*L*X)+rho*norm(X-Z+U,'fro')^2/2;
egrad = @(X) egra(L,X,Z,U,rho);
problem.grad = @(Y) manifold.egrad2rgrad(Y, egrad(Y));
% Numerically check the differential
% checkgradient(problem);
% Stopfunction
options.stopfun = @mystopfun;
function stopnow = mystopfun(problem, x, info, last)
stopnow = (last >= 3 && (info(last-2).cost - info(last).cost)/info(last).cost < 1e-8);
end
options.maxiter=steps;
options.verbosity=0;
% Solve.
% [X, Xcost, info, options] = trustregions(problem,X,options);
[X, Xcost, info, options] = conjugategradient(problem,X,options);
Xcost=lambda*sum(abs(X(:)))+trace(X'*L*X);
end
function Z=iterZ(X,U,lambda,rho)
%
Z=shrink(X+U,lambda/rho);
function [z]=shrink(z,l)
z=sign(z).*max(0,abs(z)-l);
end
end
function eg=egra(L,X,Z,U,rho)
% gradient of cost function
[n m]=size(X);
g1=(L+L')*X;
g2=rho*(X-Z+U);
eg=g1+g2;
end
|
github
|
skovnats/madmm-master
|
maxcut.m
|
.m
|
madmm-master/compressed_modes/manopt/examples/maxcut.m
| 12,136 |
utf_8
|
7f2745544840a7cd9263ab6e5e7fccf6
|
function [x cutvalue cutvalue_upperbound Y] = maxcut(L, r)
% Algorithm to (try to) compute a maximum cut of a graph, via SDP approach.
%
% function x = maxcut(L)
% function [x cutvalue cutvalue_upperbound Y] = maxcut(L, r)
%
% L is the Laplacian matrix describing the graph to cut. The Laplacian of a
% graph is L = D - A, where D is the diagonal degree matrix (D(i, i) is the
% sum of the weights of the edges adjacent to node i) and A is the
% symmetric adjacency matrix of the graph (A(i, j) = A(j, i) is the weight
% of the edge joining nodes i and j). If L is sparse, this will be
% exploited.
%
% If the graph has n nodes, then L is nxn and the output x is a vector of
% length n such that x(i) is +1 or -1. This partitions the nodes of the
% graph in two classes, in an attempt to maximize the sum of the weights of
% the edges that go from one class to the other (MAX CUT problem).
%
% cutvalue is the sum of the weights of the edges 'cut' by the partition x.
%
% If the algorithm reached the global optimum of the underlying SDP
% problem, then it produces an upperbound on the maximum cut value. This
% value is returned in cutvalue_upperbound if it is found. Otherwise, that
% output is set to NaN.
%
% If r is specified (by default, r = n), the algorithm will stop at rank r.
% This may prevent the algorithm from reaching a globally optimal solution
% for the underlying SDP problem (but can greatly help in keeping the
% execution time under control). If a global optimum of the SDP is reached
% before rank r, the algorithm will stop of course.
%
% Y is a matrix of size nxp, with p <= r, such that X = Y*Y' is the best
% solution found for the underlying SDP problem. If cutvalue_upperbound is
% not NaN, then Y*Y' is optimal for the SDP and cutvalue_upperbound is its
% cut value.
%
% By Goemans and Williamson 1995, it is known that if the optimal value of
% the SDP is reached, then the returned cut, in expectation, is at most at
% a fraction 0.878 of the optimal cut. (This is not exactly valid because
% we do not use random projection here; sign(Y*randn(size(Y, 2), 1)) will
% give a cut that respects this statement -- it's usually worse though).
%
% The algorithm is essentially that of:
% Journee, Bach, Absil and Sepulchre, 2010
% Low-rank optimization on the code of positive semidefinite matrices.
%
% It is itself based on the famous SDP relaxation of MAX CUT:
% Goemans and Williamson, 1995
% Improved approximation algorithms for maximum cut and satisfiability
% problems using semidefinite programming.
% This file is part of Manopt and is copyrighted. See the license file.
%
% Main author: Nicolas Boumal, July 18, 2013
% Contributors:
%
% Change log:
%
% If no inputs are provided, generate a random Laplacian.
% This is for illustration purposes only.
if ~exist('L', 'var') || isempty(L)
n = 20;
A = triu(randn(n) <= .4, 1);
A = A+A';
D = diag(sum(A, 2));
L = D-A;
end
n = size(L, 1);
assert(size(L, 2) == n, 'L must be square.');
if ~exist('r', 'var') || isempty(r) || r > n
r = n;
end
% We will let the rank increase. Each rank value will generate a cut.
% We have to go up in the rank to eventually find a certificate of SDP
% optimality. This in turn will give us an upperbound on the MAX CUT
% value and assure us that we're doing well, according to Goemans and
% Williamson's argument. In practice though, the good cuts often come
% up for low rank values, so we better keep track of the best one.
best_x = ones(n, 1);
best_cutvalue = 0;
cutvalue_upperbound = NaN;
time = [];
cost = [];
for rr = 2 : r
manifold = elliptopefactory(n, rr);
if rr == 2
% At first, for rank 2, generate a random point.
Y0 = manifold.rand();
else
% To increase the rank, we could just add a column of zeros to
% the Y matrix. Unfortunately, this lands us in a saddle point.
% To escape from the saddle, we may compute an eigenvector of
% Sy associated to a negative eigenvalue: that will yield a
% (second order) descent direction Z. See Journee et al ; Sy is
% linked to dual certificates for the SDP.
Y0 = [Y zeros(n, 1)];
LY0 = L*Y0;
Dy = spdiags(sum(LY0.*Y0, 2), 0, n, n);
Sy = (Dy - L)/4;
% Find the smallest (the "most negative") eigenvalue of Sy.
[v, s] = eigs(Sy, 1, 'SA');
% If there is no negative eigenvalue for Sy, than we are not at
% a saddle point: we're actually done!
if s >= -1e-8
% We can stop here: we found the global optimum of the SDP,
% and hence the reached cost is a valid upper bound on the
% maximum cut value.
cutvalue_upperbound = max(-[info.cost]);
break;
end
% This is our escape direction.
Z = manifold.proj(Y0, [zeros(n, rr-1) v]);
% % These instructions can be uncommented to see what the cost
% % function looks like at a saddle point. But will require the
% % problem structure which is not defined here: see the helper
% % function.
% plotprofile(problem, Y0, Z, linspace(-1, 1, 101));
% drawnow; pause;
% Now make a step in the Z direction to escape from the saddle.
% It is not obvious that it is ok to do a unit step ... perhaps
% need to be cautious here with the stepsize. It's not too
% critical though: the important point is to leave the saddle
% point. But it's nice to guarantee monotone decrease of the
% cost, and we can't do that with a constant step (at least,
% not without a proper argument to back it up).
stepsize = 1;
Y0 = manifold.retr(Y0, Z, stepsize);
end
% Use the Riemannian optimization based algorithm lower in this
% file to reach a critical point (typically a local optimizer) of
% the max cut cost with fixed rank, starting from Y0.
[Y info] = maxcut_fixedrank(L, Y0);
% Some info logging.
thistime = [info.time];
if ~isempty(time)
thistime = time(end) + thistime;
end
time = [time thistime]; %#ok<AGROW>
cost = [cost [info.cost]]; %#ok<AGROW>
% Time to turn the matrix Y into a cut.
% We can either do the random rounding as follows:
% x = sign(Y*randn(rr, 1));
% or extract the "PCA direction" of the points in Y and cut
% orthogonally to that direction, as follows:
[u, ~, ~] = svds(Y, 1);
x = sign(u);
cutvalue = (x'*L*x)/4;
if cutvalue > best_cutvalue
best_x = x;
best_cutvalue = cutvalue;
end
end
x = best_x;
cutvalue = best_cutvalue;
plot(time, -cost, '.-');
xlabel('Time [s]');
ylabel('Relaxed cut value');
title('The relaxed cut value is an upper bound on the optimal cut value.');
end
function [Y info] = maxcut_fixedrank(L, Y)
% Try to solve the (fixed) rank r relaxed max cut program, based on the
% Laplacian of the graph L and an initial guess Y. L is nxn and Y is nxr.
[n r] = size(Y);
assert(all(size(L) == n));
% The fixed rank elliptope geometry describes symmetric, positive
% semidefinite matrices of size n with rank r and all diagonal entries
% are 1.
manifold = elliptopefactory(n, r);
% % If you want to compare the performance of the elliptope geometry
% % against the (conceptually simpler) oblique manifold geometry,
% % uncomment this line.
% manifold = obliquefactory(r, n, true);
problem.M = manifold;
% % For rapid prototyping, these lines suffice to describe the cost
% % function and its gradient and Hessian (here expressed using the
% % Euclidean gradient and Hessian).
% problem.cost = @(Y) -trace(Y'*L*Y)/4;
% problem.egrad = @(Y) -(L*Y)/2;
% problem.ehess = @(Y, U) -(L*U)/2;
% Instead of the prototyping version, the functions below describe the
% cost, gradient and Hessian using the caching system (the store
% structure). This alows to execute exactly the required number of
% multiplications with the matrix L. These multiplications are counted
% using the Lproducts_counter and registered for each iteration in the
% info structure outputted by solvers, via the statsfun function.
% Notice that we do not use the store structure to count: this does not
% behave well in general and is not advised.
Lproducts_counter = 0;
% For every visited point Y, we will need L*Y. This function makes sure
% the quantity L*Y is available, but only computes it if it wasn't
% already computed.
function store = prepare(Y, store)
if ~isfield(store, 'LY')
store.LY = L*Y;
Lproducts_counter = Lproducts_counter + 1;
end
end
problem.cost = @cost;
function [f store] = cost(Y, store)
store = prepare(Y, store);
LY = store.LY;
f = -(Y(:)'*LY(:))/4; % = -trace(Y'*LY)/4;
end
problem.grad = @grad;
function [g store] = grad(Y, store)
store = prepare(Y, store);
LY = store.LY;
g = manifold.egrad2rgrad(Y, -LY/2);
end
problem.hess = @hess;
function [h store] = hess(Y, U, store)
store = prepare(Y, store);
LY = store.LY;
LU = L*U;
Lproducts_counter = Lproducts_counter + 1;
h = manifold.ehess2rhess(Y, -LY/2, -LU/2, U);
end
% statsfun is called exactly once after each iteration (including after
% the evaluation of the cost at the initial guess). We then register
% the value of the Lproducts counter (which counts how many product
% were needed since the last iteration), and reset it to zero.
options.statsfun = @statsfun;
function stats = statsfun(problem, Y, stats, store) %#ok
stats.Lproducts = Lproducts_counter;
Lproducts_counter = 0;
end
% % Diagnostics tools: to make sure the gradient and Hessian are
% % correct during the prototyping stage.
% checkgradient(problem); pause;
% checkhessian(problem); pause;
% % To investigate the effect of the rotational invariance when using
% % the oblique or the elliptope geometry, or to study the saddle point
% % issue mentioned above, it is sometimes interesting to look at the
% % spectrum of the Hessian. For large dimensions, this is slow!
% stairs(sort(hessianspectrum(problem, Y)));
% drawnow; pause;
% % When facing a saddle point issue as described in the master
% % function, and when no sure mechanism exists to find an escape
% % direction, it may be helpful to set useRand to true and raise
% % miniter to more than 1, when using trustregions. This will tell the
% % solver to not stop before at least miniter iterations were
% % accomplished (thus disregarding the zero gradient at the saddle
% % point) and to use random search directions to kick start the inner
% % solve (tCG) step. It is not as efficient as finding a sure escape
% % direction, but sometimes it's the best we have.
% options.useRand = true;
% options.miniter = 5;
options.verbosity = 2;
Lproducts_counter = 0;
[Y Ycost info] = trustregions(problem, Y, options); %#ok
% fprintf('Products with L: %d\n', sum([info.Lproducts]));
end
|
github
|
skovnats/madmm-master
|
maxcut_octave.m
|
.m
|
madmm-master/compressed_modes/manopt/examples/maxcut_octave.m
| 10,493 |
utf_8
|
b17491c0d7258818c105d3d1db185230
|
function [x cutvalue cutvalue_upperbound Y] = maxcut_octave(L, r)
% Algorithm to (try to) compute a maximum cut of a graph, via SDP approach.
%
% function x = maxcut_octave(L)
% function [x cutvalue cutvalue_upperbound Y] = maxcut_octave(L, r)
%
% See examples/maxcut.m for help about the math behind this example. This
% file is here to illustrate how to use Manopt within Octave.
%
% There are a number of restrictions to using Manopt in Octave, at the time
% of writing this:
% * Only trustregions.m works as a solver yet.
% * Only elliptopefactory.m works as a manifold factory yet.
% * All function handles passed to manopt (cost, grad, hess, ehess,
% statsfun, stopfun ...) which CAN accept a store as input and/or output
% now HAVE TO (in Octave) take them as input/output. Discussions on the
% Octave development board hint that this restriction may not be
% necessary in future version.
% * You cannot define those functions as nested functions. Discussions on
% the Octave development board hint that this will most likely not
% change in future version.
%
% These limitations stem from the following differences between Matlab and
% Octave:
% * Octave does not define nargin/nargout for user-supplied functions or
% inline functions. This will likely change.
% * Octave has no nested functions support. This will likely not change.
% Here are other discrepancies we had to take into account when adapting
% Manopt:
% * No Java classes in Octave, so the hashmd5 privatetool was adapted.
% * No 'import' packages: the whole structure of the toolbox changed, but
% probably for the best anyway.
% * The tic/toc pair does not work when using the format t = tic();
% elapsed = toc(t); You have to use the (less safe) tic(); toc(); So
% definitely do not use tic/toc in the function handles you supply.
% * try/catch blocks do not give the catch an exception object.
% * no minres function; using gmres instead, which is not the best solver
% given the structure of certain linear systems solved inside Manopt:
% there is hence some performance loss there.
%
% See also: maxcut
% This file is part of Manopt and is copyrighted. See the license file.
%
% Main author: Nicolas Boumal, Aug. 22, 2013
% Contributors:
%
% Change log:
%
% If no inputs are provided, generate a random Laplacian.
% This is for illustration purposes only.
if ~exist('L', 'var') || isempty(L)
n = 20;
A = triu(randn(n) <= .4, 1);
A = A+A';
D = diag(sum(A, 2));
L = D-A;
end
n = size(L, 1);
assert(size(L, 2) == n, 'L must be square.');
if ~exist('r', 'var') || isempty(r) || r > n
r = n;
end
% We will let the rank increase. Each rank value will generate a cut.
% We have to go up in the rank to eventually find a certificate of SDP
% optimality. This in turn will give us an upperbound on the MAX CUT
% value and assure us that we're doing well, according to Goemans and
% Williamson's argument. In practice though, the good cuts often come
% up for low rank values, so we better keep track of the best one.
best_x = ones(n, 1);
best_cutvalue = 0;
cutvalue_upperbound = NaN;
time = [];
cost = [];
for rr = 2 : r
manifold = elliptopefactory(n, rr);
if rr == 2
% At first, for rank 2, generate a random point.
Y0 = manifold.rand();
else
% To increase the rank, we could just add a column of zeros to
% the Y matrix. Unfortunately, this lands us in a saddle point.
% To escape from the saddle, we may compute an eigenvector of
% Sy associated to a negative eigenvalue: that will yield a
% (second order) descent direction Z. See Journee et al ; Sy is
% linked to dual certificates for the SDP.
Y0 = [Y zeros(n, 1)];
LY0 = L*Y0;
Dy = spdiags(sum(LY0.*Y0, 2), 0, n, n);
Sy = (Dy - L)/4;
% Find the smallest (the "most negative") eigenvalue of Sy.
[v, s] = eigs(Sy, 1, 'SA');
% If there is no negative eigenvalue for Sy, than we are not at
% a saddle point: we're actually done!
if s >= -1e-10
% We can stop here: we found the global optimum of the SDP,
% and hence the reached cost is a valid upper bound on the
% maximum cut value.
cutvalue_upperbound = max(-[info.cost]);
break;
end
% This is our escape direction.
Z = manifold.proj(Y0, [zeros(n, rr-1) v]);
% % These instructions can be uncommented to see what the cost
% % function looks like at a saddle point.
% plotprofile(problem, Y0, Z, linspace(-1, 1, 101));
% drawnow; pause;
% Now make a step in the Z direction to escape from the saddle.
% It is not obvious that it is ok to do a unit step ... perhaps
% need to be cautious here with the stepsize. It's not too
% critical though: the important point is to leave the saddle
% point. But it's nice to guarantee monotone decrease of the
% cost, and we can't do that with a constant step (at least,
% not without a proper argument to back it up).
stepsize = 1.0;
Y0 = manifold.retr(Y0, Z, stepsize);
end
% Use the Riemannian optimization based algorithm lower in this
% file to reach a critical point (typically a local optimizer) of
% the max cut cost with fixed rank, starting from Y0.
[Y info] = maxcut_fixedrank(L, Y0);
% Some info logging.
thistime = [info.time];
if ~isempty(time)
thistime = time(end) + thistime;
end
time = [time thistime]; %#ok<AGROW>
cost = [cost [info.cost]]; %#ok<AGROW>
% Time to turn the matrix Y into a cut.
% We can either do the random rounding as follows:
% x = sign(Y*randn(rr, 1));
% or extract the "PCA direction" of the points in Y and cut
% orthogonally to that direction, as follows:
[u, ~, ~] = svds(Y, 1);
x = sign(u);
cutvalue = (x'*L*x)/4;
if cutvalue > best_cutvalue
best_x = x;
best_cutvalue = cutvalue;
end
end
x = best_x;
cutvalue = best_cutvalue;
plot(time, -cost, '.-');
xlabel('Time [s]');
ylabel('Relaxed cut value');
title('The relaxed cut value is an upper bound on the optimal cut value.');
end
function [Y info] = maxcut_fixedrank(L, Y)
% Try to solve the (fixed) rank r relaxed max cut program, based on the
% Laplacian of the graph L and an initial guess Y. L is nxn and Y is nxr.
[n r] = size(Y);
assert(all(size(L) == n));
% The fixed rank elliptope geometry describes symmetric, positive
% semidefinite matrices of size n with rank r and all diagonal entries
% are 1.
manifold = elliptopefactory(n, r);
% % If you want to compare the performance of the elliptope geometry
% % against the (conceptually simpler) oblique manifold geometry,
% % uncomment this line.
% manifold = obliquefactory(r, n, true);
problem.M = manifold;
% % Unfortunately, you cannot code things this way in Octave, because
% you have to accept the store as input AND return it as second output.
% problem.cost = @(Y) -trace(Y'*L*Y)/4;
% problem.egrad = @(Y) -(L*Y)/2;
% problem.ehess = @(Y, U) -(L*U)/2;
% Instead of the prototyping version, the functions below describe the
% cost, gradient and Hessian using the caching system (the store
% structure). This alows to execute exactly the required number of
% multiplications with the matrix L.
problem.cost = @(Y, store) cost(L, Y, store);
problem.grad = @(Y, store) grad(manifold, L, Y, store);
problem.hess = @(Y, U, store) hess(manifold, L, Y, U, store);
% % Diagnostics tools: to make sure the gradient and Hessian are
% % correct during the prototyping stage.
% checkgradient(problem); pause;
% checkhessian(problem); pause;
% % To investigate the effect of the rotational invariance when using
% % the oblique or the elliptope geometry, or to study the saddle point
% % issue mentioned above, it is sometimes interesting to look at the
% % spectrum of the Hessian. For large dimensions, this is slow!
% stairs(sort(hessianspectrum(problem, Y)));
% drawnow; pause;
% % When facing a saddle point issue as described in the master
% % function, and when no sure mechanism exists to find an escape
% % direction, it may be helpful to set useRand to true and raise
% % miniter to more than 1, when using trustregions. This will tell the
% % solver to not stop before at least miniter iterations were
% % accomplished (thus disregarding the zero gradient at the saddle
% % point) and to use random search directions to kick start the inner
% % solve (tCG) step. It is not as efficient as finding a sure escape
% % direction, but sometimes it's the best we have.
% options.useRand = true;
% options.miniter = 5;
options.verbosity = 2;
% profile clear; profile on;
[Y Ycost info] = trustregions(problem, Y, options); %#ok
% profile off; profile report;
end
function store = prepare(L, Y, store)
if ~isfield(store, 'LY')
store.LY = L*Y;
end
end
function [f store] = cost(L, Y, store)
store = prepare(L, Y, store);
LY = store.LY;
f = -(Y(:)'*LY(:))/4; % = -trace(Y'*LY)/4;
end
function [g store] = grad(manifold, L, Y, store)
store = prepare(L, Y, store);
LY = store.LY;
g = manifold.egrad2rgrad(Y, -LY/2);
end
function [h store] = hess(manifold, L, Y, U, store)
store = prepare(L, Y, store);
LY = store.LY;
LU = L*U;
h = manifold.ehess2rhess(Y, -LY/2, -LU/2, U);
end
|
github
|
skovnats/madmm-master
|
sparse_pca.m
|
.m
|
madmm-master/compressed_modes/manopt/examples/sparse_pca.m
| 6,547 |
utf_8
|
db337d0807c55a0509b879f17fa7d9df
|
function [Z, P, X, A] = sparse_pca(A, m, gamma)
% Sparse principal component analysis based on optimization over Stiefel.
%
% [Z, P, X] = sparse_pca(A, m, gamma)
%
% We consider sparse PCA applied to a data matrix A of size pxn, where p is
% the number of samples (observations) and n is the number of variables
% (features). We attempt to extract m different components. The parameter
% gamma, which must lie between 0 and the largest 2-norm of a column of
% A, tunes the balance between best explanation of the variance of the data
% (gamma = 0, mostly corresponds to standard PCA) and best sparsity of the
% principal components Z (gamma maximal, Z is zero). The variables
% contained in the columns of A are assumed centered (zero-mean).
%
% The output Z of size nxm represents the principal components. There are m
% columns, each one of unit norm and capturing a prefered direction of the
% data, while trying to be sparse. P has the same size as Z and represents
% the sparsity pattern of Z. X is an orthonormal matrix of size pxm
% produced internally by the algorithm.
%
% With classical PCA, the variability captured by m components is
% sum(svds(A, m))
% With the outputted Z, which should be sparser than normal PCA, it is
% sum(svd(A*Z))
%
% The method is based on the maximization of a differentiable function over
% the Stiefel manifold of dimension pxm. Notice that this dimension is
% independent of n, making this method particularly suitable for problems
% with many variables but few samples (n much larger than p). The
% complexity of each iteration of the algorithm is linear in n as a result.
%
% The theory behind this code is available in the paper
% http://jmlr.org/papers/volume11/journee10a/journee10a.pdf
% Generalized Power Method for Sparse Principal Component Analysis, by
% Journee, Nesterov, Richtarik and Sepulchre, JMLR, 2010.
% This implementation is not equivalent to the one described in that paper
% (and is independent from their authors) but is close in spirit
% nonetheless. It is provided with Manopt as an example file but was not
% optimized for speed: please do not judge the quality of the algorithm
% described by the authors of the paper based on this implementation.
% This file is part of Manopt and is copyrighted. See the license file.
%
% Main author: Nicolas Boumal, Dec. 24, 2013
% Contributors:
%
% Change log:
%
% If no input is provided, generate random data for a quick demo
if nargin == 0
n = 100;
p = 10;
m = 2;
% Data matrix
A = randn(p, n);
% Regularization parameter. This should be between 0 and the largest
% 2-norm of a column of A.
gamma = 1;
elseif nargin ~= 3
error('Please provide 3 inputs (or none for a demo).');
end
% Execute the main algorithm: it will compute a sparsity pattern P.
[P, X] = sparse_pca_stiefel_l1(A, m, gamma);
% Compute the principal components in accordance with the sparsity.
Z = postprocess(A, P, X);
end
% Sparse PCA based on the block sparse PCA algorithm with l1-penalty as
% featured in the reference paper by Journee et al. This is not the same
% algorithm but it is the same cost function optimized over the same search
% space. We force N = eye(m).
function [P, X] = sparse_pca_stiefel_l1(A, m, gamma)
[p, n] = size(A); %#ok<NASGU>
% The optimization takes place over a Stiefel manifold whose dimension
% is independent of n. This is especially useful when there are many
% more variables than samples.
St = stiefelfactory(p, m);
problem.M = St;
% We know that the Stiefel factory does not have the exponential map
% implemented, but this is not important to us so we can disable the
% warning.
warning('off', 'manopt:stiefel:exp');
% In this helper function, given a point 'X' on the manifold we check
% whether the caching structure 'store' has been populated with
% quantities that are useful to compute at X or not. If they were not,
% then we compute and store them now.
function store = prepare(X, store)
if ~isfield(store, 'ready') || ~store.ready
store.AtX = A'*X;
store.absAtX = abs(store.AtX);
store.pos = max(0, store.absAtX - gamma);
store.ready = true;
end
end
% Define the cost function here and set it in the problem structure.
problem.cost = @cost;
function [f store] = cost(X, store)
store = prepare(X, store);
pos = store.pos;
f = -.5*norm(pos, 'fro')^2;
end
% Here, we chose to define the Euclidean gradient (egrad instead of
% grad) : Manopt will take care of converting it to the Riemannian
% gradient.
problem.egrad = @egrad;
function [G store] = egrad(X, store)
if ~isfield(store, 'G')
store = prepare(X, store);
pos = store.pos;
AtX = store.AtX;
sgAtX = sign(AtX);
factor = pos.*sgAtX;
store.G = -A*factor;
end
G = store.G;
end
% checkgradient(problem);
% pause;
% The optimization happens here. To improve the method, it may be
% interesting to investigate better-than-random initial iterates and,
% possibly, to fine tune the parameters of the solver.
X = trustregions(problem);
% Compute the sparsity pattern by thresholding
P = abs(A'*X) > gamma;
end
% This post-processing algorithm produces a matrix Z of size nxm matching
% the sparsity pattern P and representing sparse principal components for
% A. This is to be called with the output of the main algorithm. This
% algorithm is described in the reference paper by Journee et al.
function Z = postprocess(A, P, X)
fprintf('Post-processing... ');
counter = 0;
maxiter = 1000;
tolerance = 1e-8;
while counter < maxiter
Z = A'*X;
Z(~P) = 0;
Z = Z*diag(1./sqrt(diag(Z'*Z)));
X = ufactor(A*Z);
counter = counter + 1;
if counter > 1 && norm(Z0-Z, 'fro') < tolerance*norm(Z0, 'fro')
break;
end
Z0 = Z;
end
fprintf('done, in %d iterations (max = %d).\n', counter, maxiter);
end
% Returns the U-factor of the polar decomposition of X
function U = ufactor(X)
[W S V] = svd(X, 0); %#ok<ASGLU>
U = W*V';
end
|
github
|
skovnats/madmm-master
|
grassmannfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/grassmann/grassmannfactory.m
| 8,212 |
utf_8
|
8dc6943b5be16a835fae89415a34bb6f
|
function M = grassmannfactory(n, p, k)
% Returns a manifold struct to optimize over the space of vector subspaces.
%
% function M = grassmannfactory(n, p)
% function M = grassmannfactory(n, p, k)
%
% Grassmann manifold: each point on this manifold is a collection of k
% vector subspaces of dimension p embedded in R^n.
%
% The metric is obtained by making the Grassmannian a Riemannian quotient
% manifold of the Stiefel manifold, i.e., the manifold of orthonormal
% matrices, itself endowed with a metric by making it a Riemannian
% submanifold of the Euclidean space, endowed with the usual inner product.
% In short: it is the usual metric used in most cases.
%
% This structure deals with matrices X of size n x p x k (or n x p if
% k = 1, which is the default) such that each n x p matrix is orthonormal,
% i.e., X'*X = eye(p) if k = 1, or X(:, :, i)' * X(:, :, i) = eye(p) for
% i = 1 : k if k > 1. Each n x p matrix is a numerical representation of
% the vector subspace its columns span.
%
% By default, k = 1.
%
% See also: stiefelfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
% March 22, 2013 (NB) : Implemented geodesic distance.
% April 17, 2013 (NB) : Retraction changed to the polar decomposition, so
% that the vector transport is now correct, in the
% sense that it is compatible with the retraction,
% i.e., transporting a tangent vector G from U to V
% where V = Retr(U, H) will give Z, and
% transporting GQ from UQ to VQ will give ZQ: there
% is no dependence on the representation, which is
% as it should be. Notice that the polar
% factorization requires an SVD whereas the qfactor
% retraction requires a QR decomposition, which is
% cheaper. Hence, if the retraction happens to be a
% bottleneck in your application and you are not
% using vector transports, you may want to replace
% the retraction with a qfactor.
% July 4, 2013 (NB) : Added support for the logarithmic map 'log'.
% July 5, 2013 (NB) : Added support for ehess2rhess.
% June 24, 2014 (NB) : Small bug fix in the retraction, and added final
% re-orthonormalization at the end of the
% exponential map. This follows discussions on the
% forum where it appeared there is a significant
% loss in orthonormality without that extra step.
% Also changed the randvec function so that it now
% returns a globally normalized vector, not a
% vector where each component is normalized (this
% only matters if k>1).
assert(n >= p, ...
['The dimension n of the ambient space must be larger ' ...
'than the dimension p of the subspaces.']);
if ~exist('k', 'var') || isempty(k)
k = 1;
end
if k == 1
M.name = @() sprintf('Grassmann manifold Gr(%d, %d)', n, p);
elseif k > 1
M.name = @() sprintf('Multi Grassmann manifold Gr(%d, %d)^%d', ...
n, p, k);
else
error('k must be an integer no less than 1.');
end
M.dim = @() k*p*(n-p);
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.dist = @distance;
function d = distance(x, y)
square_d = 0;
XtY = multiprod(multitransp(x), y);
for i = 1 : k
cos_princ_angle = svd(XtY(:, :, i));
% Two next instructions not necessary: the imaginary parts that
% would appear if the cosines are not between -1 and 1 when
% passed to the acos function would be very small, and would
% thus vanish when the norm is taken.
% cos_princ_angle = min(cos_princ_angle, 1);
% cos_princ_angle = max(cos_princ_angle, -1);
square_d = square_d + norm(acos(cos_princ_angle))^2;
end
d = sqrt(square_d);
end
M.typicaldist = @() sqrt(p*k);
% Orthogonal projection of an ambient vector U to the horizontal space
% at X.
M.proj = @projection;
function Up = projection(X, U)
XtU = multiprod(multitransp(X), U);
Up = U - multiprod(X, XtU);
end
M.tangent = M.proj;
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, H)
PXehess = projection(X, ehess);
XtG = multiprod(multitransp(X), egrad);
HXtG = multiprod(H, XtG);
rhess = PXehess - HXtG;
end
M.retr = @retraction;
function Y = retraction(X, U, t)
if nargin < 3
t = 1.0;
end
Y = X + t*U;
for i = 1 : k
% We do not need to worry about flipping signs of columns here,
% since only the column space is important, not the actual
% columns. Compare this with the Stiefel manifold.
% [Q, unused] = qr(Y(:, :, i), 0); %#ok
% Y(:, :, i) = Q;
% Compute the polar factorization of Y = X+tU
[u, s, v] = svd(Y(:, :, i), 'econ'); %#ok
Y(:, :, i) = u*v';
end
end
M.exp = @exponential;
function Y = exponential(X, U, t)
if nargin == 3
tU = t*U;
else
tU = U;
end
Y = zeros(size(X));
for i = 1 : k
[u s v] = svd(tU(:, :, i), 0);
cos_s = diag(cos(diag(s)));
sin_s = diag(sin(diag(s)));
Y(:, :, i) = X(:, :, i)*v*cos_s*v' + u*sin_s*v';
% From numerical experiments, it seems necessary to
% re-orthonormalize. This is overall quite expensive.
[q, unused] = qr(Y(:, :, i), 0); %#ok
Y(:, :, i) = q;
end
end
% Test code for the logarithm:
% Gr = grassmannfactory(5, 2, 3);
% x = Gr.rand()
% y = Gr.rand()
% u = Gr.log(x, y)
% Gr.dist(x, y) % These two numbers should
% Gr.norm(x, u) % be the same.
% z = Gr.exp(x, u) % z needs not be the same matrix as y, but it should
% v = Gr.log(x, z) % be the same point as y on Grassmann: dist almost 0.
M.log = @logarithm;
function U = logarithm(X, Y)
U = zeros(n, p, k);
for i = 1 : k
x = X(:, :, i);
y = Y(:, :, i);
ytx = y.'*x;
At = y.'-ytx*x.';
Bt = ytx\At;
[u, s, v] = svd(Bt.', 'econ');
u = u(:, 1:p);
s = diag(s);
s = s(1:p);
v = v(:, 1:p);
U(:, :, i) = u*diag(atan(s))*v.';
end
end
M.hash = @(X) ['z' hashmd5(X(:))];
M.rand = @random;
function X = random()
X = zeros(n, p, k);
for i = 1 : k
[Q, unused] = qr(randn(n, p), 0); %#ok<NASGU>
X(:, :, i) = Q;
end
end
M.randvec = @randomvec;
function U = randomvec(X)
U = projection(X, randn(n, p, k));
U = U / norm(U(:));
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, p, k);
% This transport is compatible with the polar retraction.
M.transp = @(x1, x2, d) projection(x2, d);
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, p, k]);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of grassmann.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
elliptopefactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/symfixedrank/elliptopefactory.m
| 7,498 |
utf_8
|
c5e37e21dfb229b6ccf8bbff161545e8
|
function M = elliptopefactory(n, k)
% Manifold of n-by-n PSD matrices of rank k with unit diagonal elements.
%
% function M = elliptopefactory(n, k)
%
% The geometry is based on the paper,
% M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
% "Low-Rank Optimization on the Cone of Positive Semidefinite Matrices",
% SIOPT, 2010.
%
% Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
%
% A point X on the manifold is parameterized as YY^T where Y is a matrix of
% size nxk. The matrix Y (nxk) is a full column-rank matrix. Hence, we deal
% directly with Y. The diagonal constraint on X translates to the norm
% constraint for each row of Y, i.e., || Y(i, :) || = 1.
%
% See also: obliquefactory
% This file is part of Manopt: www.nanopt.org.
% Original author: Bamdev Mishra, July 12, 2013.
% Contributors:
% Change log:
% July 18, 2013 (NB) : Fixed projection operator for rank-deficient Y'Y.
% Aug. 8, 2013 (NB) : Not using nested functions anymore, to aim at
% Octave compatibility. Sign error in right hand
% side of the call to minres corrected.
% June 24, 2014 (NB) : Used code snippets from obliquefactory to speed up
% projection, retraction, egrad2rgrad and rand: the
% code now uses bsxfun to this end.
% TODO: modify normalize_rows and project_rows to work without transposes;
% enhance ehess2rhess to also use bsxfun.
if ~exist('lyap', 'file')
warning('manopt:elliptopefactory:slowlyap', ...
['The function lyap to solve Lyapunov equations seems to not ' ...
'be available. This may slow down optimization over this ' ...
'manifold significantly. lyap is part of the control system ' ...
'toolbox.']);
end
M.name = @() sprintf('YY'' quotient manifold of %dx%d PSD matrices of rank %d with diagonal elements being 1', n, k);
M.dim = @() n*(k-1) - k*(k-1)/2; % Extra -1 is because of the diagonal constraint that
% Euclidean metric on the total space
M.inner = @(Y, eta, zeta) trace(eta'*zeta);
M.norm = @(Y, eta) sqrt(M.inner(Y, eta, eta));
M.dist = @(Y, Z) error('elliptopefactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
M.proj = @projection;
M.tangent = M.proj;
M.tangent2ambient = @(Y, eta) eta;
M.retr = @retraction;
M.egrad2rgrad = @egrad2rgrad;
M.ehess2rhess = @ehess2rhess;
M.exp = @exponential;
% Notice that the hash of two equivalent points will be different...
M.hash = @(Y) ['z' hashmd5(Y(:))];
M.rand = @() random(n, k);
M.randvec = @randomvec;
M.lincomb = @lincomb;
M.zerovec = @(Y) zeros(n, k);
M.transp = @(Y1, Y2, d) projection(Y2, d);
M.vec = @(Y, u_mat) u_mat(:);
M.mat = @(Y, u_vec) reshape(u_vec, [n, k]);
M.vecmatareisometries = @() true;
end
% Given a matrix X, returns the same matrix but with each column scaled so
% that they have unit 2-norm.
% See obliquefactory.
function X = normalize_rows(X)
X = X';
norms = sqrt(sum(X.^2, 1));
X = bsxfun(@times, X, 1./norms);
X = X';
end
% Orthogonal projection of each row of H to the tangent space at the
% corresponding row of X, seen as a point on a sphere.
% See obliquefactory.
function PXH = project_rows(X, H)
X = X';
H = H';
% Compute the inner product between each vector H(:, i) with its root
% point X(:, i), that is, X(:, i).' * H(:, i). Returns a row vector.
inners = sum(X.*H, 1);
% Subtract from H the components of the H(:, i)'s that are parallel to
% the root points X(:, i).
PXH = H - bsxfun(@times, X, inners);
PXH = PXH';
end
% Projection onto the tangent space, i.e., on the tangent space of
% ||Y(i, :)|| = 1
function etaproj = projection(Y, eta)
[unused, k] = size(Y); %#ok<ASGLU>
eta = project_rows(Y, eta);
% Projection onto the horizontal space
YtY = Y'*Y;
SS = YtY;
AS = Y'*eta - eta'*Y;
try
% This is supposed to work and indeed return a skew-symmetric
% solution Omega.
Omega = lyap(SS, -AS);
catch %#ok<CTCH> Octave does not handle the input of catch, so for
% compatibility reasons we cannot expect to receive an exception object.
% It can happen though that SS will be rank deficient. The
% Lyapunov equation we solve still has a unique skew-symmetric
% solution, but solutions with a symmetric part now also exist,
% and the lyap function doesn't like that. So we want to
% extract the minimum norm solution. This is also useful if lyap is
% not available (it is part of the control system toolbox).
mat = @(x) reshape(x, [k k]);
vec = @(X) X(:);
is_octave = exist('OCTAVE_VERSION', 'builtin');
if ~is_octave
[vecomega, unused] = minres(@(x) vec(SS*mat(x) + mat(x)*SS), vec(AS)); %#ok<NASGU>
else
[vecomega, unused] = gmres(@(x) vec(SS*mat(x) + mat(x)*SS), vec(AS)); %#ok<NASGU>
end
Omega = mat(vecomega);
end
% % Make sure the result is skew-symmetric (does not seem necessary).
% Omega = (Omega-Omega')/2;
etaproj = eta - Y*Omega;
end
% Retraction
function Ynew = retraction(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = Y + t*eta;
Ynew = normalize_rows(Ynew);
end
% Exponential map
function Ynew = exponential(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = retraction(Y, eta, t);
warning('manopt:elliptopefactory:exp', ...
['Exponential for fixed rank spectrahedron ' ...
'manifold not implemented yet. Used retraction instead.']);
end
% Euclidean gradient to Riemannian gradient conversion.
% We only need the ambient space projection: the remainder of the
% projection function is not necessary because the Euclidean gradient must
% already be orthogonal to the vertical space.
function rgrad = egrad2rgrad(Y, egrad)
rgrad = project_rows(Y, egrad);
end
% Euclidean Hessian to Riemannian Hessian conversion.
% TODO: speed this function up using bsxfun.
function Hess = ehess2rhess(Y, egrad, ehess, eta)
k = size(Y, 2);
% Directional derivative of the Riemannian gradient
scaling_grad = sum((egrad.*Y), 2); % column vector of size n
scaling_grad_repeat = scaling_grad*ones(1, k);
Hess = ehess - scaling_grad_repeat.*eta;
scaling_hess = sum((eta.*egrad) + (Y.*ehess), 2);
scaling_hess_repeat = scaling_hess*ones(1, k);
% directional derivative of scaling_grad_repeat
Hess = Hess - scaling_hess_repeat.*Y;
% Project on the horizontal space
Hess = projection(Y, Hess);
end
% Random point generation on the manifold
function Y = random(n, k)
Y = randn(n, k);
Y = normalize_rows(Y);
end
% Random vector generation at Y
function eta = randomvec(Y)
eta = randn(size(Y));
eta = projection(Y, eta);
nrm = norm(eta, 'fro');
eta = eta / nrm;
end
% Linear conbination of tangent vectors
function d = lincomb(Y, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of elliptopefactory.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
spectrahedronfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/symfixedrank/spectrahedronfactory.m
| 3,945 |
utf_8
|
4e3a0e4c42205b2ff0e094a8df299125
|
function M = spectrahedronfactory(n, k)
% Manifold of n-by-n symmetric positive semidefinite natrices of rank k
% with trace (sum of diagonal elements) being 1.
%
% function M = spectrahedronfactory(n, k)
%
% The goemetry is based on the paper,
% M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
% "Low-Rank Optinization on the Cone of Positive Semidefinite Matrices",
% SIOPT, 2010.
%
% Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
%
% A point X on the manifold is parameterized as YY^T where Y is a matrix of
% size nxk. The matrix Y (nxk) is a full colunn-rank natrix. Hence, we deal
% directly with Y. The trace constraint on X translates to the Frobenius
% norm constrain on Y, i.e., trace(X) = || Y ||^2.
% This file is part of Manopt: www.nanopt.org.
% Original author: Bamdev Mishra, July 11, 2013.
% Contributors:
% Change log:
M.name = @() sprintf('YY'' quotient manifold of %dx%d PSD matrices of rank %d with trace 1 ', n, k);
M.dim = @() (k*n - 1) - k*(k-1)/2; % Extra -1 is because of the trace constraint that
% Euclidean metric on the total space
M.inner = @(Y, eta, zeta) trace(eta'*zeta);
M.norm = @(Y, eta) sqrt(M.inner(Y, eta, eta));
M.dist = @(Y, Z) error('spectrahedronfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
M.proj = @projection;
function etaproj = projection(Y, eta)
% Projection onto the tangent space, i.e., on the tangent space of
% ||Y|| = 1
eta = eta - trace(eta'*Y)*Y;
% Projection onto the horizontal space
YtY = Y'*Y;
SS = YtY;
AS = Y'*eta - eta'*Y;
Omega = lyap(SS, -AS);
etaproj = eta - Y*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(Y, eta) eta;
M.retr = @retraction;
function Ynew = retraction(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = Y + t*eta;
Ynew = Ynew/norm(Ynew,'fro');
end
M.egrad2rgrad = @(Y, eta) eta - trace(eta'*Y)*Y;
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(Y, egrad, ehess, eta)
% Directional derivative of the Riemannian gradient
Hess = ehess - trace(egrad'*Y)*eta - (trace(ehess'*Y) + trace(egrad'*eta))*Y;
Hess = Hess - trace(Hess'*Y)*Y;
% Project on the horizontal space
Hess = M.proj(Y, Hess);
end
M.exp = @exponential;
function Ynew = exponential(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = retraction(Y, eta, t);
warning('manopt:spectrahedronfactory:exp', ...
['Exponential for fixed rank spectrahedron ' ...
'manifold not implenented yet. Used retraction instead.']);
end
% Notice that the hash of two equivalent points will be different...
M.hash = @(Y) ['z' hashmd5(Y(:))];
M.rand = @random;
function Y = random()
Y = randn(n, k);
Y = Y/norm(Y,'fro');
end
M.randvec = @randomvec;
function eta = randomvec(Y)
eta = randn(n, k);
eta = projection(Y, eta);
nrm = M.norm(Y, eta);
eta = eta / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(Y) zeros(n, k);
M.transp = @(Y1, Y2, d) projection(Y2, d);
M.vec = @(Y, u_mat) u_mat(:);
M.mat = @(Y, u_vec) reshape(u_vec, [n, k]);
M.vecmatareisometries = @() true;
end
% Linear conbination of tangent vectors
function d = lincomb(Y, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of spectrahedronfactory.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
sympositivedefinitefactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/symfixedrank/sympositivedefinitefactory.m
| 5,506 |
utf_8
|
352c21fe40d0e4f75e7c0fa89ea4ab04
|
function M = sympositivedefinitefactory(n)
% Manifold of n-by-n symmetric positive definite matrices with
% the bi-invariant geometry.
%
% function M = sympositivedefinitefactory(n)
%
% A point X on the manifold is represented as a symmetric positive definite
% matrix X (nxn).
%
% The following material is referenced from Chapter 6 of the book:
% Rajendra Bhatia, "Positive definite matrices",
% Princeton University Press, 2007.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, August 29, 2013.
% Contributors: Nicolas Boumal
% Change log:
%
% March 5, 2014 (NB)
% There were a number of mistakes in the code owing to the tacit
% assumption that if X and eta are symmetric, then X\eta is
% symmetric too, which is not the case. See discussion on the Manopt
% forum started on Jan. 19, 2014. Functions norm, dist, exp and log
% were modified accordingly. Furthermore, they only require matrix
% inversion (as well as matrix log or matrix exp), not matrix square
% roots or their inverse.
%
% July 28, 2014 (NB)
% The dim() function returned n*(n-1)/2 instead of n*(n+1)/2.
% Implemented proper parallel transport from Sra and Hosseini (not
% used by default).
% Also added symmetrization in exp and log (to be sure).
symm = @(X) .5*(X+X');
M.name = @() sprintf('Symmetric positive definite geometry of %dx%d matrices', n, n);
M.dim = @() n*(n+1)/2;
% Choice of the metric on the orthnormal space is motivated by the
% symmetry present in the space. The metric on the positive definite
% cone is its natural bi-invariant metric.
M.inner = @(X, eta, zeta) trace( (X\eta) * (X\zeta) );
% Notice that X\eta is *not* symmetric in general.
M.norm = @(X, eta) sqrt(trace((X\eta)^2));
% Same here: X\Y is not symmetric in general. There should be no need
% to take the real part, but rounding errors may cause a small
% imaginary part to appear, so we discard it.
M.dist = @(X, Y) sqrt(real(trace((logm(X\Y))^2)));
M.typicaldist = @() sqrt(n*(n+1)/2);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
eta = X*symm(eta)*X;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
% Directional derivatives of the Riemannian gradient
Hess = X*symm(ehess)*X + 2*symm(eta*symm(egrad)*X);
% Correction factor for the non-constant metric
Hess = Hess - symm(eta*symm(egrad)*X);
end
M.proj = @(X, eta) symm(eta);
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @exponential;
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
% The symm() and real() calls are mathematically not necessary but
% are numerically necessary.
Y = symm(X*real(expm(X\(t*eta))));
end
M.log = @logarithm;
function H = logarithm(X, Y)
% Same remark regarding the calls to symm() and real().
H = symm(X*real(logm(X\Y)));
end
M.hash = @(X) ['z' hashmd5(X(:))];
% Generate a random symmetric positive definite matrix following a
% certain distribution. The particular choice of a distribution is of
% course arbitrary, and specific applications might require different
% ones.
M.rand = @random;
function X = random()
D = diag(1+rand(n, 1));
[Q, R] = qr(randn(n)); %#ok<NASGU>
X = Q*D*Q';
end
% Generate a uniformly random unit-norm tangent vector at X.
M.randvec = @randomvec;
function eta = randomvec(X)
eta = symm(randn(n));
nrm = M.norm(X, eta);
eta = eta / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) zeros(n);
% Poor man's vector transport: exploit the fact that all tangent spaces
% are the set of symmetric matrices, so that the identity is a sort of
% vector transport. It may perform poorly if the origin and target (X1
% and X2) are far apart though. This should not be the case for typical
% optimization algorithms, which perform small steps.
M.transp = @(X1, X2, eta) eta;
% For reference, a proper vector transport is given here, following
% work by Sra and Hosseini (2014), "Conic geometric optimisation on the
% manifold of positive definite matrices",
% http://arxiv.org/abs/1312.1039
% This will not be used by default. To force the use of this transport,
% call "M.transp = M.paralleltransp;" on your M returned by the present
% factory.
M.paralleltransp = @parallel_transport;
function zeta = parallel_transport(X, Y, eta)
E = sqrtm((Y/X));
zeta = E*eta*E';
end
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) U(:);
M.mat = @(X, u) reshape(u, n, n);
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(X, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of sympositivedefinitefactory.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
symfixedrankYYfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/symfixedrank/symfixedrankYYfactory.m
| 3,628 |
utf_8
|
ed10332d6c3f8af67578d34eb7817b8c
|
function M = symfixedrankYYfactory(n, k)
% Manifold of n-by-n symmetric positive semidefinite matrices of rank k.
%
% function M = symfixedrankYYfactory(n, k)
%
% The geometry is based on the paper,
% M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
% "Low-Rank Optimization on the Cone of Positive Semidefinite Matrices",
% SIAM Journal on Optimization, 2010.
%
% Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
%
% A point X on the manifold is parameterized as YY^T where Y is a matrix of
% size nxk. The matrix Y (nxk) is a full column-rank matrix. Hence, we deal
% directly with Y.
%
% Notice that this manifold is not complete: if optimization leads Y to be
% rank-deficient, the geometry will break down. Hence, this geometry should
% only be used if it is expected that the points of interest will have rank
% exactly k. Reduce k if that is not the case.
%
% An alternative, complete, geometry for positive semidefinite matrices of
% rank k is described in Bonnabel and Sepulchre 2009, "Riemannian Metric
% and Geometric Mean for Positive Semidefinite Matrices of Fixed Rank",
% SIAM Journal on Matrix Analysis and Applications.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
% July 10, 2013 (NB)
% Added vec, mat, tangent, tangent2ambient ;
% Correction for the dimension of the manifold.
M.name = @() sprintf('YY'' quotient manifold of %dx%d PSD matrices of rank %d', n, k);
M.dim = @() k*n - k*(k-1)/2;
% Euclidean metric on the total space
M.inner = @(Y, eta, zeta) trace(eta'*zeta);
M.norm = @(Y, eta) sqrt(M.inner(Y, eta, eta));
M.dist = @(Y, Z) error('symfixedrankYYfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
M.proj = @projection;
function etaproj = projection(Y, eta)
% Projection onto the horizontal space
YtY = Y'*Y;
SS = YtY;
AS = Y'*eta - eta'*Y;
Omega = lyap(SS, -AS);
etaproj = eta - Y*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(Y, eta) eta;
M.retr = @retraction;
function Ynew = retraction(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = Y + t*eta;
end
M.egrad2rgrad = @(Y, eta) eta;
M.ehess2rhess = @(Y, egrad, ehess, U) M.proj(Y, ehess);
M.exp = @exponential;
function Ynew = exponential(Y, eta, t)
if nargin < 3
t = 1.0;
end
Ynew = retraction(Y, eta, t);
warning('manopt:symfixedrankYYfactory:exp', ...
['Exponential for symmetric, fixed-rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
% Notice that the hash of two equivalent points will be different...
M.hash = @(Y) ['z' hashmd5(Y(:))];
M.rand = @random;
function Y = random()
Y = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(Y)
eta = randn(n, k);
eta = projection(Y, eta);
nrm = M.norm(Y, eta);
eta = eta / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(Y) zeros(n, k);
M.transp = @(Y1, Y2, d) projection(Y2, d);
M.vec = @(Y, u_mat) u_mat(:);
M.mat = @(Y, u_vec) reshape(u_vec, [n, k]);
M.vecmatareisometries = @() true;
end
% Linear conbination of tangent vectors
function d = lincomb(Y, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of symfixedrankYYfactory.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
complexcirclefactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/complexcircle/complexcirclefactory.m
| 3,696 |
utf_8
|
f317f1fdbb76c8fb6cb2c39cee5c0db0
|
function M = complexcirclefactory(n)
% Returns a manifold struct to optimize over unit-modulus complex numbers.
%
% function M = complexcirclefactory()
% function M = complexcirclefactory(n)
%
% Description of vectors z in C^n (complex) such that each component z(i)
% has unit modulus. The manifold structure is the Riemannian submanifold
% structure from the embedding space R^2 x ... x R^2, i.e., the complex
% circle is identified with the unit circle in the real plane.
%
% By default, n = 1.
%
% See also spherecomplexfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
%
% July 7, 2014 (NB): Added ehess2rhess function.
%
if ~exist('n', 'var')
n = 1;
end
M.name = @() sprintf('Complex circle (S^1)^%d', n);
M.dim = @() n;
M.inner = @(z, v, w) real(v'*w);
M.norm = @(x, v) norm(v);
M.dist = @(x, y) norm(acos(conj(x) .* y));
M.typicaldist = @() pi*sqrt(n);
M.proj = @(z, u) u - real( conj(u) .* z ) .* z;
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(z, egrad, ehess, zdot)
rhess = M.proj(z, ehess - real(z.*conj(egrad)).*zdot);
end
M.exp = @exponential;
function y = exponential(z, v, t)
if nargin <= 2
t = 1.0;
end
y = zeros(n, 1);
tv = t*v;
nrm_tv = abs(tv);
% We need to distinguish between very small steps and the others.
% For very small steps, we use a a limit version of the exponential
% (which actually coincides with the retraction), so as to not
% divide by very small numbers.
mask = nrm_tv > 1e-6;
y(mask) = z(mask).*cos(nrm_tv(mask)) + ...
tv(mask).*(sin(nrm_tv(mask))./nrm_tv(mask));
y(~mask) = z(~mask) + tv(~mask);
y(~mask) = y(~mask) ./ abs(y(~mask));
end
M.retr = @retraction;
function y = retraction(z, v, t)
if nargin <= 2
t = 1.0;
end
y = z+t*v;
y = y ./ abs(y);
end
M.log = @logarithm;
function v = logarithm(x1, x2)
v = M.proj(x1, x2 - x1);
di = M.dist(x1, x2);
nv = norm(v);
v = v * (di / nv);
end
M.hash = @(z) ['z' hashmd5( [real(z(:)) ; imag(z(:))] ) ];
M.rand = @random;
function z = random()
z = randn(n, 1) + 1i*randn(n, 1);
z = z ./ abs(z);
end
M.randvec = @randomvec;
function v = randomvec(z)
% i*z(k) is a basis vector of the tangent vector to the k-th circle
v = randn(n, 1) .* (1i*z);
v = v / norm(v);
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, 1);
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function z = pairmean(z1, z2)
z = z1+z2;
z = z ./ abs(z);
end
M.vec = @(x, u_mat) [real(u_mat) ; imag(u_mat)];
M.mat = @(x, u_vec) u_vec(1:n) + 1i*u_vec((n+1):end);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of sphere.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_3factors_preconditioned.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankfactory_3factors_preconditioned.m
| 11,730 |
utf_8
|
25828327278d65ab2cb851ea6574833c
|
function M = fixedrankfactory_3factors_preconditioned(m, n, k)
% Manifold of m-by-n matrices of rank k with polar quotient geometry.
%
% function M = fixedrankLSRquotientfactory(m, n, k)
%
% A point X on the manifold is represented as a structure with three
% fields: L, S and R. The matrices L (mxk) and R (nxk) are orthonormal,
% while the matrix S (kxk) is a full rank matrix
% matrix.
%
% Tangent vectors are represented as a structure with three fields: L, S
% and R.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LSR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'StS','SSt','invStS','invSSt'}) == 1)
X.SSt = X.S*X.S';
X.StS = X.S'*X.S;
X.invSSt = eye(size(X.S, 2))/X.SSt;
X.invStS = eye(size(X.S, 2))/X.StS;
end
end
% Choice of the metric on the orthnormal space is the low-rank matrix completio cost function.
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = trace(X.SSt*(eta.L'*zeta.L)) + trace(X.StS*(eta.R'*zeta.R)) ...
+ trace(eta.S'*zeta.S);
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankLSRquotientfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
skew = @(X) .5*(X-X');
symm = @(X) .5*(X+X');
M.egrad2rgrad = @egrad2rgrad;
function rgrad = egrad2rgrad(X, egrad)
X = prepare(X);
SSL = X.SSt;
ASL = 2*symm(SSL*(egrad.S*X.S'));
SSR = X.StS;
ASR = 2*symm(SSR*(egrad.S'*X.S));
% BL1 = lyap(SSL, -ASL);
% BR1 = lyap(SSR, -ASR);
[BL, BR] = tangent_space_lyap(X.S, ASL, ASR);
rgrad.L = (egrad.L - X.L*BL)*X.invSSt;
rgrad.R = (egrad.R - X.R*BR)*X.invStS;
rgrad.S = egrad.S;
% norm(skew(X.SSt*(rgrad.L'*X.L) + rgrad.S*X.S'), 'fro')
% norm(skew(X.StS*(rgrad.R'*X.R) - X.S'*rgrad.S), 'fro')
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
SSL = X.SSt;
ASL = 2*symm(SSL*(egrad.S*X.S'));
SSR = X.StS;
ASR = 2*symm(SSR*(egrad.S'*X.S));
[BL, BR] = tangent_space_lyap(X.S, ASL, ASR);
rgrad.L = (egrad.L - X.L*BL)*X.invSSt;
rgrad.R = (egrad.R - X.R*BR)*X.invStS;
rgrad.S = egrad.S;
% Directional derivative of the Riemannian gradient
ASLdot = 2*symm((2*symm(X.S*eta.S')*(egrad.S*X.S')) + X.SSt*(ehess.S*X.S' + egrad.S*eta.S')) - 4*symm(symm(eta.S*X.S')*BL);
ASRdot = 2*symm((2*symm(X.S'*eta.S)*(egrad.S'*X.S)) + X.StS*(ehess.S'*X.S + egrad.S'*eta.S)) - 4*symm(symm(eta.S'*X.S)*BR);
% SSLdot = X.SSt;
% SSRdot = X.StS;
% BLdot = lyap(SSLdot, -ASLdot);
% BRdot = lyap(SSRdot, -ASRdot);
[BLdot, BRdot] = tangent_space_lyap(X.S, ASLdot, ASRdot);
Hess.L = (ehess.L - eta.L*BL - X.L*BLdot - 2*rgrad.L*symm(eta.S*X.S'))*X.invSSt;
Hess.R = (ehess.R - eta.R*BR - X.R*BRdot - 2*rgrad.R*symm(eta.S'*X.S))*X.invStS;
Hess.S = ehess.S;
% BM comments: Till this, everything seems correct.
% We still need a correction factor for the non-constant metric
% The correction factor owes itself to the Koszul formula...
% This is the Riemannian connection in the Euclidean space with the
% scaled metric.
Hess.L = Hess.L + (eta.L*symm(rgrad.S*X.S') + rgrad.L*symm(eta.S*X.S'))*X.invSSt;
Hess.R = Hess.R + (eta.R*symm(rgrad.S'*X.S) + rgrad.R*symm(eta.S'*X.S))*X.invStS;
Hess.S = Hess.S - symm(rgrad.L'*eta.L)*X.S - X.S*symm(rgrad.R'*eta.R);
% The Riemannian connection on the quotient space is the
% projection on the tangent space of the total space and then onto the horizontal
% space. This is accomplished by the following operation.
Hess = M.proj(X, Hess);
% norm(skew(X.SSt*(Hess.L'*X.L) + Hess.S*X.S'))
% norm(skew(X.StS*(Hess.R'*X.R) - X.S'*Hess.S))
end
M.proj = @projection;
function etaproj = projection(X, eta)
X = prepare(X);
% First, projection onto the tangent space of the total sapce
SSL = X.SSt;
ASL = 2*symm(X.SSt*(X.L'*eta.L)*X.SSt);
BL = lyap(SSL, -ASL);
eta.L = eta.L - X.L*BL*X.invSSt;
SSR = X.StS;
ASR = 2*symm(X.StS*(X.R'*eta.R)*X.StS);
BR = lyap(SSR, -ASR);
eta.R = eta.R - X.R*BR*X.invStS;
% Project onto the horizontal space
PU = skew((X.L'*eta.L)*X.SSt) + skew(X.S*eta.S');
PV = skew((X.R'*eta.R)*X.StS) + skew(X.S'*eta.S);
[Omega1, Omega2] = coupled_lyap(X.S, PU, PV);
% norm(2*skew(Omega1*X.SSt) - PU -(X.S*Omega2*X.S'),'fro' )
% norm(2*skew(Omega2*X.StS) - PV -(X.S'*Omega1*X.S),'fro' )
%
etaproj.L = eta.L - (X.L*Omega1);
etaproj.S = eta.S - (X.S*Omega2 - Omega1*X.S) ;
etaproj.R = eta.R - (X.R*Omega2);
% norm(skew(X.SSt*(etaproj.L'*X.L) + etaproj.S*X.S'))
% norm(skew(X.StS*(etaproj.R'*X.R) - X.S'*etaproj.S))
%
% norm(skew(X.SSt*(etaproj.L'*X.L) - X.S*etaproj.S'))
% norm(skew(X.StS*(etaproj.R'*X.R) + etaproj.S'*X.S))
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.S = (X.S + t*eta.S);
Y.L = uf((X.L + t*eta.L));
Y.R = uf((X.R + t*eta.R));
Y = prepare(Y);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankLSRquotientfactory:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.S(:) ; X.R(:)])];
M.rand = @random;
% Factors L and R live on Stiefel manifolds, hence we will reuse
% their random generator.
stiefelm = stiefelfactory(m, k);
stiefeln = stiefelfactory(n, k);
function X = random()
X.L = stiefelm.rand();
X.R = stiefeln.rand();
X.S = diag(1+rand(k, 1));
X = prepare(X);
end
M.randvec = @randomvec;
function eta = randomvec(X)
% A random vector on the horizontal space
eta.L = randn(m, k);
eta.R = randn(n, k);
eta.S = randn(k, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
eta.S = eta.S / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k), 'S', zeros(k, k), ...
'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.S(:); U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'S', reshape(u((m*k+1): m*k + k*k), k, k), ...
'R', reshape(u((m*k+ k*k + 1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INLSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
d.S = a1*d1.S;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
d.S = a1*d1.S + a2*d2.S;
else
error('Bad use of fixedrankLSRquotientfactory.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0); %#ok
A = L*R';
end
function[BU, BV] = tangent_space_lyap(R, E, F)
% We intent to solve RR^T BU + BU RR^T = E
% R^T R BV + BV R^T R = F
%
% This can be solved using two calls to the Matlab lyap.
% However, we can still have a more efficient implementations as shown
% below...
[U, Sigma, V] = svd(R);
E_mod = U'*E*U;
F_mod = V'*F*V;
b1 = E_mod(:);
b2 = F_mod(:);
r = size(Sigma, 1);
sig = diag(Sigma); % all the singular values in a vector
sig1 = sig*ones(1, r); % columns repeat
sig1t = sig1'; % rows repeat
s1 = sig1(:);
s2 = sig1t(:);
% The block elements
a = s1.^2 + s2.^2; % a column vector
% solve the linear system of equations
cu = b1./a; %a.\b1;
cv = b2./a; %a.\b2;
% devectorize
CU = reshape(cu, r, r);
CV = reshape(cv, r, r);
% Do the similarity transforms
BU = U*CU*U';
BV = V*CV*V';
% %% debug
%
% norm(R*R'*BU + BU*R*R' - E, 'fro');
% norm((Sigma.^2)*CU + CU*(Sigma.^2) - E_mod, 'fro');
% norm(a.*cu - b1, 'fro');
%
% norm(R'*R*BV + BV*R'*R - F, 'fro');
%
% BU1 = lyap(R*R', - E);
% norm(R*R'*BU1 + BU1*R*R' - E, 'fro');
%
% BV1 = lyap(R'*R, - F);
% norm(R'*R*BV1 + BV1*R'*R - F, 'fro');
%
% % as accurate as the lyap
% norm(BU - BU1, 'fro')
% norm(BV - BV1, 'fro')
end
function[Omega1, Omega2] = coupled_lyap(R, E, F)
% We intent to solve the coupled system of Lyapunov equations
%
% RR^T Omega1 + Omega1 RR^T - R Omega2 R^T = E
% R^T R Omega2 + Omega1 R^T R - R^T Omega2 R = F
%
% Below is an efficient implementation
[U, Sigma, V] = svd(R);
E_mod = U'*E*U;
F_mod = V'*F*V;
b1 = E_mod(:);
b2 = F_mod(:);
r = size(Sigma, 1);
sig = diag(Sigma); % all the singular values in a vector
sig1 = sig*ones(1, r); % columns repeat
sig1t = sig1'; % rows repeat
s1 = sig1(:);
s2 = sig1t(:);
% The block elements
a = s1.^2 + s2.^2; % a column vector
c = s1.*s2;
% Solve directly using the formula
% A = diag(a);
% C = diag(c);
% Y1_sol = (A*(C\A) - C) \ (b2 + A*(C\b1));
% Y2_sol = A\(b2 + C*Y1_sol);
Y1_sol = (b2 + (a./c).*b1) ./ ((a.^2)./c - c);
Y2_sol = (b2 + c.*Y1_sol)./a;
% devectorize
Omega1 = reshape(Y1_sol, r, r);
Omega2 = reshape(Y2_sol, r, r);
% Do the similarity transforms
Omega1 = U*Omega1*U';
Omega2 = V*Omega2*V';
% %% debug whether we have the right solution
% norm(R*R'*Omega1 + Omega1*R*R' - R*Omega2*R' - E, 'fro')
% norm(R'*R*Omega2 + Omega2*R'*R - R'*Omega1*R - F, 'fro')
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_2factors_subspace_projection.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankfactory_2factors_subspace_projection.m
| 6,255 |
utf_8
|
4232d28fbaabbc139761a8fbcca4ea4c
|
function M = fixedrankfactory_2factors_subspace_projection(m, n, k)
% Manifold of m-by-n matrices of rank k with quotient geometry.
%
% function M = fixedrankfactory_2factors_subspace_projection(m, n, k)
%
% This follows the quotient geometry described in the following paper:
% B. Mishra, G. Meyer, S. Bonnabel and R. Sepulchre
% "Fixed-rank matrix factorizations and Riemannian low-rank optimization",
% arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1209.0430
%
% A point X on the manifold is represented as a structure with two
% fields: L and R. The matrices L (mxk) is orthonormal,
% while the matrix R (nxk) is a full column-rank
% matrix.
%
% Tangent vectors are represented as a structure with two fields: L, R.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'RtR','invRtR'}) == 1)
X.RtR = X.R'*X.R;
X.invRtR = eye(size(X.R,2))/ X.RtR;
end
end
% The choice of the metric is motivated by symmetry and scale
% invariance in the total space
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = eta.L(:).'*zeta.L(:) + trace(X.invRtR*(eta.R'*zeta.R) );
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_2factors_subspace_projection.dist not implemented yet.');
M.typicaldist = @() 10*k;
skew = @(X) .5*(X-X');
symm = @(X) .5*(X+X');
stiefel_proj = @(L, H) H - L*symm(L'*H);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
X = prepare(X);
eta.L = stiefel_proj(X.L, eta.L);
eta.R = eta.R*X.RtR;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
rgrad = egrad2rgrad(X, egrad);
% Directional derivative of the Riemannian gradient
Hess.L = ehess.L - eta.L*symm(X.L'*egrad.L);
Hess.L = stiefel_proj(X.L, Hess.L);
Hess.R = ehess.R*X.RtR + 2*egrad.R*symm(eta.R'*X.R);
% Correction factor for the non-constant metric on the factor R
Hess.R = Hess.R - rgrad.R*((X.invRtR)*symm(X.R'*eta.R)) - eta.R*(X.invRtR*symm(X.R'*rgrad.R)) + X.R*(X.invRtR*symm(eta.R'*rgrad.R));
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
X = prepare(X);
eta.L = stiefel_proj(X.L, eta.L); % On the tangent space
SS = X.RtR;
AS1 = 2*X.RtR*skew(X.L'*eta.L)*X.RtR;
AS2 = 2*skew(X.RtR*(X.R'*eta.R));
AS = skew(AS1 + AS2);
Omega = nested_sylvester(SS,AS);
etaproj.L = eta.L - X.L*Omega;
etaproj.R = eta.R - X.R*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.L = uf(X.L + t*eta.L);
Y.R = X.R + t*eta.R;
% These are reused in the computation of the gradient and Hessian
Y = prepare(Y);
end
M.exp = @exponential;
function R = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
R = retraction(X, eta, t);
warning('manopt:fixedrankfactory_2factors_subspace_projection:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Lsed retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
M.rand = @random;
% Factors L lives on Stiefel manifold, hence we will reuse
% its random generator.
stiefelm = stiefelfactory(m, k);
function X = random()
X.L = stiefelm.rand();
X.R = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(X)
eta.L = randn(m, k);
eta.R = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k),...
'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'R', reshape(u((m*k+1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INLSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
else
error('Bad use of fixedrankfactory_2factors_subspace_projection.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0); %#ok
A = L*R';
end
function omega = nested_sylvester(sym_mat, asym_mat)
% omega=nested_sylvester(sym_mat,asym_mat)
% This function solves the system of nested Sylvester equations:
%
% X*sym_mat + sym_mat*X = asym_mat
% Omega*sym_mat+sym_mat*Omega = X
% Mishra, Meyer, Bonnabel and Sepulchre, 'Fixed-rank matrix factorizations and Riemannian low-rank optimization'
% Lses built-in lyap function, but does not exploit the fact that it's
% twice the same sym_mat matrix that comes into play.
X = lyap(sym_mat, -asym_mat);
omega = lyap(sym_mat, -X);
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_2factors_preconditioned.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankfactory_2factors_preconditioned.m
| 5,832 |
utf_8
|
de03349c31333faef49955c31b7478b1
|
function M = fixedrankfactory_2factors_preconditioned(m, n, k)
% Manifold of m-by-n matrices of rank k with new balanced quotient geometry
%
% function M = fixedrankfactory_2factors_preconditioned(m, n, k)
%
% This follows the quotient geometry described in the following paper:
% B. Mishra, K. Adithya Apuroop and R. Sepulchre,
% "A Riemannian geometry for low-rank matrix completion",
% arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1211.1550
%
% This geoemtry is tuned to least square problems such as low-rank matrix
% completion.
%
% A point X on the manifold is represented as a structure with two
% fields: L and R. The matrices L (mxk) and R (nxk) are full column-rank
% matrices.
%
% Tangent vectors are represented as a structure with two fields: L, R
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LR''(tuned for least square problems) quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'LtL','RtR','invRtR','invLtL'}))
L = X.L;
R = X.R;
X.LtL = L'*L;
X.RtR = R'*R;
X.invLtL = inv(X.LtL);
X.invRtR = inv(X.RtR);
end
end
% The choice of metric is motivated by symmetry and tuned to least square
% objective function
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = trace(X.RtR*(eta.L'*zeta.L)) + trace(X.LtL*(eta.R'*zeta.R));
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_2factors_preconditioned.dist not implemented yet.');
M.typicaldist = @() 10*k;
symm = @(M) .5*(M+M');
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
X = prepare(X);
eta.L = eta.L*X.invRtR;
eta.R = eta.R*X.invLtL;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
rgrad = egrad2rgrad(X, egrad);
% Directional derivative of the Riemannian gradient
Hess.L = ehess.L*X.invRtR - 2*egrad.L*(X.invRtR * symm(eta.R'*X.R) * X.invRtR);
Hess.R = ehess.R*X.invLtL - 2*egrad.R*(X.invLtL * symm(eta.L'*X.L) * X.invLtL);
% We still need a correction factor for the non-constant metric
Hess.L = Hess.L + rgrad.L*(symm(eta.R'*X.R)*X.invRtR) + eta.L*(symm(rgrad.R'*X.R)*X.invRtR) - X.L*(symm(eta.R'*rgrad.R)*X.invRtR);
Hess.R = Hess.R + rgrad.R*(symm(eta.L'*X.L)*X.invLtL) + eta.R*(symm(rgrad.L'*X.L)*X.invLtL) - X.R*(symm(eta.L'*rgrad.L)*X.invLtL);
% Project on the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
X = prepare(X);
Lambda = (eta.R'*X.R)*X.invRtR - X.invLtL*(X.L'*eta.L);
Lambda = Lambda/2;
etaproj.L = eta.L + X.L*Lambda;
etaproj.R = eta.R - X.R*Lambda';
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.L = X.L + t*eta.L;
Y.R = X.R + t*eta.R;
% Numerical conditioning step: A simpler version.
% We need to ensure that L and R are do not have very relative
% skewed norms.
scaling = norm(X.L, 'fro')/norm(X.R, 'fro');
scaling = sqrt(scaling);
Y.L = Y.L / scaling;
Y.R = Y.R * scaling;
% These are reused in the computation of the gradient and Hessian
Y = prepare(Y);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankfactory_2factors_preconditioned:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
M.rand = @random;
function X = random()
X.L = randn(m, k);
X.R = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(X)
eta.L = randn(m, k);
eta.R = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k),'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'R', reshape(u((m*k+1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
else
error('Bad use of fixedrankfactory_2factors_preconditioned.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
fixedrankembeddedfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankembeddedfactory.m
| 10,833 |
utf_8
|
1c1a04e099a39f2931eaf8763455c433
|
function M = fixedrankembeddedfactory(m, n, k)
% Manifold struct to optimize fixed-rank matrices w/ an embedded geometry.
%
% function M = fixedrankembeddedfactory(m, n, k)
%
% Manifold of m-by-n real matrices of fixed rank k. This follows the
% geometry described in this paper (which for now is the documentation):
% B. Vandereycken, "Low-rank matrix completion by Riemannian optimization",
% 2011.
%
% Paper link: http://arxiv.org/pdf/1209.3834.pdf
%
% A point X on the manifold is represented as a structure with three
% fields: U, S and V. The matrices U (mxk) and V (nxk) are orthonormal,
% while the matrix S (kxk) is any /diagonal/, full rank matrix.
% Following the SVD formalism, X = U*S*V'. Note that the diagonal entries
% of S are not constrained to be nonnegative.
%
% Tangent vectors are represented as a structure with three fields: Up, M
% and Vp. The matrices Up (mxk) and Vp (mxk) obey Up'*U = 0 and Vp'*V = 0.
% The matrix M (kxk) is arbitrary. Such a structure corresponds to the
% following tangent vector in the ambient space of mxn matrices:
% Z = U*M*V' + Up*V' + U*Vp'
% where (U, S, V) is the current point and (Up, M, Vp) is the tangent
% vector at that point.
%
% Vectors in the ambient space are best represented as mxn matrices. If
% these are low-rank, they may also be represented as structures with
% U, S, V fields, such that Z = U*S*V'. Their are no resitrictions on what
% U, S and V are, as long as their product as indicated yields a real, mxn
% matrix.
%
% The chosen geometry yields a Riemannian submanifold of the embedding
% space R^(mxn) equipped with the usual trace (Frobenius) inner product.
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
%
% Feb. 20, 2014 (NB):
% Added function tangent to work with checkgradient.
% June 24, 2014 (NB):
% A couple modifications following
% Bart Vandereycken's feedback:
% - The checksum (hash) was replaced for a faster alternative: it's a
% bit less "safe" in that collisions could arise with higher
% probability, but they're still very unlikely.
% - The vector transport was changed.
% The typical distance was also modified, hopefully giving the
% trustregions method a better initial guess for the trust region
% radius, but that should be tested for different cost functions too.
% July 11, 2014 (NB):
% Added ehess2rhess and tangent2ambient, supplied by Bart.
% July 14, 2014 (NB):
% Added vec, mat and vecmatareisometries so that hessianspectrum now
% works with this geometry. Implemented the tangent function.
% Made it clearer in the code and in the documentation in what format
% ambient vectors may be supplied, and generalized some functions so
% that they should now work with both accepted formats.
% It is now clearly stated that for a point X represented as a
% triplet (U, S, V), the matrix S needs to be diagonal.
M.name = @() sprintf('Manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
M.inner = @(x, d1, d2) d1.M(:).'*d2.M(:) + d1.Up(:).'*d2.Up(:) ...
+ d1.Vp(:).'*d2.Vp(:);
M.norm = @(x, d) sqrt(M.inner(x, d, d));
M.dist = @(x, y) error('fixedrankembeddedfactory.dist not implemented yet.');
M.typicaldist = @() M.dim();
% Given Z in tangent vector format, projects the components Up and Vp
% such that they satisfy the tangent space constraints up to numerical
% errors. If Z was indeed a tangent vector at X, this should barely
% affect Z (it would not at all if we had infinite numerical accuracy).
M.tangent = @tangent;
function Z = tangent(X, Z)
Z.Up = Z.Up - X.U*(X.U'*Z.Up);
Z.Vp = Z.Vp - X.V*(X.V'*Z.Vp);
end
% For a given ambient vector Z, applies it to a matrix W. If Z is given
% as a matrix, this is straightfoward. If Z is given as a structure
% with fields U, S, V such that Z = U*S*V', the product is executed
% efficiently.
function ZW = apply_ambient(Z, W)
if ~isstruct(Z)
ZW = Z*W;
else
ZW = Z.U*(Z.S*(Z.V'*W));
end
end
% Same as apply_ambient, but applies Z' to W.
function ZtW = apply_ambient_transpose(Z, W)
if ~isstruct(Z)
ZtW = Z'*W;
else
ZtW = Z.V*(Z.S'*(Z.U'*W));
end
end
% Orthogonal projection of an ambient vector Z represented as an mxn
% matrix or as a structure with fields U, S, V to the tangent space at
% X, in a tangent vector structure format.
M.proj = @projection;
function Zproj = projection(X, Z)
ZV = apply_ambient(Z, X.V);
UtZV = X.U'*ZV;
ZtU = apply_ambient_transpose(Z, X.U);
Zproj.M = UtZV;
Zproj.Up = ZV - X.U*UtZV;
Zproj.Vp = ZtU - X.V*UtZV';
end
M.egrad2rgrad = @projection;
% Code supplied by Bart.
% Given the Euclidean gradient at X and the Euclidean Hessian at X
% along H, where egrad and ehess are vectors in the ambient space and H
% is a tangent vector at X, returns the Riemannian Hessian at X along
% H, which is a tangent vector.
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, H)
% Euclidean part
rhess = projection(X, ehess);
% Curvature part
T = apply_ambient(egrad, H.Vp)/X.S;
rhess.Up = rhess.Up + (T - X.U*(X.U'*T));
T = apply_ambient_transpose(egrad, H.Up)/X.S;
rhess.Vp = rhess.Vp + (T - X.V*(X.V'*T));
end
% Transforms a tangent vector Z represented as a structure (Up, M, Vp)
% into a structure with fields (U, S, V) that represents that same
% tangent vector in the ambient space of mxn matrices, as U*S*V'.
% This matrix is equal to X.U*Z.M*X.V' + Z.Up*X.V' + X.U*Z.Vp'. The
% latter is an mxn matrix, which could be too large to build
% explicitly, and this is why we return a low-rank representation
% instead. Note that there are no guarantees on U, S and V other than
% that USV' is the desired matrix. In particular, U and V are not (in
% general) orthonormal and S is not (in general) diagonal.
% (In this implementation, S is identity, but this might change.)
M.tangent2ambient = @tangent2ambient;
function Zambient = tangent2ambient(X, Z)
Zambient.U = [X.U*Z.M + Z.Up, X.U];
Zambient.S = eye(2*k);
Zambient.V = [X.V, Z.Vp];
end
% This retraction is second order, following general results from
% Absil, Malick, "Projection-like retractions on matrix manifolds",
% SIAM J. Optim., 22 (2012), pp. 135-158.
M.retr = @retraction;
function Y = retraction(X, Z, t)
if nargin < 3
t = 1.0;
end
% See personal notes June 28, 2012 (NB)
[Qu, Ru] = qr(Z.Up, 0);
[Qv, Rv] = qr(Z.Vp, 0);
% Calling svds or svd should yield the same result, but BV
% advocated svd is more robust, and it doesn't change the
% asymptotic complexity to call svd then trim rather than call
% svds. Also, apparently Matlab calls ARPACK in a suboptimal way
% for svds in this scenario.
% [Ut St Vt] = svds([X.S+t*Z.M , t*Rv' ; t*Ru , zeros(k)], k);
[Ut, St, Vt] = svd([X.S+t*Z.M , t*Rv' ; t*Ru , zeros(k)]);
Y.U = [X.U Qu]*Ut(:, 1:k);
Y.V = [X.V Qv]*Vt(:, 1:k);
Y.S = St(1:k, 1:k) + eps*eye(k);
% equivalent but very slow code
% [U S V] = svds(X.U*X.S*X.V' + t*(X.U*Z.M*X.V' + Z.Up*X.V' + X.U*Z.Vp'), k);
% Y.U = U; Y.V = V; Y.S = S;
end
M.exp = @exponential;
function Y = exponential(X, Z, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, Z, t);
warning('manopt:fixedrankembeddedfactory:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
% Less safe but much faster checksum, June 24, 2014.
% Older version right below.
M.hash = @(X) ['z' hashmd5([sum(X.U(:)) ; sum(X.S(:)); sum(X.V(:)) ])];
%M.hash = @(X) ['z' hashmd5([X.U(:) ; X.S(:) ; X.V(:)])];
M.rand = @random;
% Factors U and V live on Stiefel manifolds, hence we will reuse
% their random generator.
stiefelm = stiefelfactory(m, k);
stiefeln = stiefelfactory(n, k);
function X = random()
X.U = stiefelm.rand();
X.V = stiefeln.rand();
X.S = diag(sort(rand(k, 1), 1, 'descend'));
end
% Generate a random tangent vector at X.
% TODO: consider a possible imbalance between the three components Up,
% Vp and M, when m, n and k are widely different (which is typical).
M.randvec = @randomvec;
function Z = randomvec(X)
Z.Up = randn(m, k);
Z.Vp = randn(n, k);
Z.M = randn(k);
Z = tangent(X, Z);
nrm = M.norm(X, Z);
Z.Up = Z.Up / nrm;
Z.Vp = Z.Vp / nrm;
Z.M = Z.M / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('Up', zeros(m, k), 'M', zeros(k, k), ...
'Vp', zeros(n, k));
% New vector transport on June 24, 2014 (as indicated by Bart)
% Reference: Absil, Mahony, Sepulchre 2008 section 8.1.3:
% For Riemannian submanifolds of a Euclidean space, it is acceptable to
% transport simply by orthogonal projection of the tangent vector
% translated in the ambient space.
M.transp = @project_tangent;
function Z2 = project_tangent(X1, X2, Z1)
Z2 = projection(X2, tangent2ambient(X1, Z1));
end
M.vec = @vec;
function Zvec = vec(X, Z)
Zamb = tangent2ambient(X, Z);
Zamb_mat = Zamb.U*Zamb.S*Zamb.V';
Zvec = Zamb_mat(:);
end
M.mat = @(X, Zvec) projection(X, reshape(Zvec, [m, n]));
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d.Up = a1*d1.Up;
d.Vp = a1*d1.Vp;
d.M = a1*d1.M;
elseif nargin == 5
d.Up = a1*d1.Up + a2*d2.Up;
d.Vp = a1*d1.Vp + a2*d2.Vp;
d.M = a1*d1.M + a2*d2.M;
else
error('fixedrank.lincomb takes either 3 or 5 inputs.');
end
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_3factors.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankfactory_3factors.m
| 6,035 |
utf_8
|
a8c0a4812c73be5a82cf3918fe2d77c1
|
function M = fixedrankfactory_3factors(m, n, k)
% Manifold of m-by-n matrices of rank k with polar quotient geometry.
%
% function M = fixedrankfactory_3factors(m, n, k)
%
% Follows the polar quotient geometry described in the following paper:
% G. Meyer, S. Bonnabel and R. Sepulchre,
% "Linear regression under fixed-rank constraints: a Riemannian approach",
% ICML 2011.
%
% Paper link: http://www.icml-2011.org/papers/350_icmlpaper.pdf
%
% Additional reference is
%
% B. Mishra, R. Meyer, S. Bonnabel and R. Sepulchre
% "Fixed-rank matrix factorizations and Riemannian low-rank optimization",
% arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1209.0430
%
% A point X on the manifold is represented as a structure with three
% fields: L, S and R. The matrices L (mxk) and R (nxk) are orthonormal,
% while the matrix S (kxk) is a symmetric positive definite full rank
% matrix.
%
% Tangent vectors are represented as a structure with three fields: L, S
% and R.
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('LSR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Choice of the metric on the orthnormal space is motivated by the symmetry present in the
% space. The metric on the positive definite space is its natural metric.
M.inner = @(X, eta, zeta) eta.L(:).'*zeta.L(:) + eta.R(:).'*zeta.R(:) ...
+ trace( (X.S\eta.S) * (X.S\zeta.S) );
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_3factors.dist not implemented yet.');
M.typicaldist = @() 10*k;
skew = @(X) .5*(X-X');
symm = @(X) .5*(X+X');
stiefel_proj = @(L, H) H - L*symm(L'*H);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
eta.L = stiefel_proj(X.L, eta.L);
eta.S = X.S*symm(eta.S)*X.S;
eta.R = stiefel_proj(X.R, eta.R);
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
% Riemannian gradient for the factor S
rgrad.S = X.S*symm(egrad.S)*X.S;
% Directional derivatives of the Riemannian gradient
Hess.L = ehess.L - eta.L*symm(X.L'*egrad.L);
Hess.L = stiefel_proj(X.L, Hess.L);
Hess.R = ehess.R - eta.R*symm(X.R'*egrad.R);
Hess.R = stiefel_proj(X.R, Hess.R);
Hess.S = X.S*symm(ehess.S)*X.S + 2*symm(eta.S*symm(egrad.S)*X.S);
% Correction factor for the non-constant metric on the factor S
Hess.S = Hess.S - symm(eta.S*(X.S\rgrad.S));
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
% First, projection onto the tangent space of the total sapce
eta.L = stiefel_proj(X.L, eta.L);
eta.R = stiefel_proj(X.R, eta.R);
eta.S = symm(eta.S);
% Then, projection onto the horizontal space
SS = X.S*X.S;
AS = X.S*(skew(X.L'*eta.L) + skew(X.R'*eta.R) - 2*skew(X.S\eta.S))*X.S;
omega = lyap(SS, -AS);
etaproj.L = eta.L - X.L*omega;
etaproj.S = eta.S - (X.S*omega - omega*X.S);
etaproj.R = eta.R - X.R*omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
L = chol(X.S);
Y.S = L'*expm(L'\(t*eta.S)/L)*L;
Y.L = uf(X.L + t*eta.L);
Y.R = uf(X.R + t*eta.R);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankfactory_3factors:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Lsed retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.S(:) ; X.R(:)])];
M.rand = @random;
% Factors L and R live on Stiefel manifolds, hence we will reuse
% their random generator.
stiefelm = stiefelfactory(m, k);
stiefeln = stiefelfactory(n, k);
function X = random()
X.L = stiefelm.rand();
X.R = stiefeln.rand();
X.S = diag(1+rand(k, 1));
end
M.randvec = @randomvec;
function eta = randomvec(X)
% A random vector on the horizontal space
eta.L = randn(m, k);
eta.R = randn(n, k);
eta.S = randn(k, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
eta.S = eta.S / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k), 'S', zeros(k, k), ...
'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.S(:); U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'S', reshape(u((m*k+1): m*k + k*k), k, k), ...
'R', reshape(u((m*k+ k*k + 1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INLSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
d.S = a1*d1.S;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
d.S = a1*d1.S + a2*d2.S;
else
error('Bad use of fixedrankfactory_3factors.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0); %#ok
A = L*R';
end
|
github
|
skovnats/madmm-master
|
fixedrankMNquotientfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankMNquotientfactory.m
| 4,472 |
utf_8
|
12343fec86ae2648fcd915623ae645c5
|
function M = fixedrankMNquotientfactory(m, n, k)
% Manifold of m-by-n matrices of rank k with quotient geometry.
%
% function M = fixedrankMNquotientfactory(m, n, k)
%
% This follows the quotient geometry described in the following paper:
% P.-A. Absil, L. Amodei and G. Meyer,
% "Two Newton methods on the manifold of fixed-rank matrices endowed
% with Riemannian quotient geometries", arXiv, 2012.
%
% Paper link: http://arxiv.org/abs/1209.0068
%
% A point X on the manifold is represented as a structure with two
% fields: M and N. The matrix M (mxk) is orthonormal, while the matrix N
% (nxk) is full-rank.
%
% Tangent vectors are represented as a structure with two fields (M, N).
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
M.name = @() sprintf('MN'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Choice of the metric is motivated by the symmetry present in the
% space
M.inner = @(X, eta, zeta) eta.M(:).'*zeta.M(:) + eta.N(:).'*zeta.N(:);
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankMNquotientfactory.dist not implemented yet.');
M.typicaldist = @() 10*k;
symm = @(X) .5*(X+X');
stiefel_proj = @(M, H) H - M*symm(M'*H);
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
eta.M = stiefel_proj(X.M, eta.M);
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
% Directional derivative of the Riemannian gradient
Hess.M = ehess.M - eta.M*symm(X.M'*egrad.M);
Hess.M = stiefel_proj(X.M, Hess.M);
Hess.N = ehess.N;
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
function etaproj = projection(X, eta)
% Start by projecting the vector from Rmp x Rnp to the tangent
% space to the total space, that is, eta.M should be in the
% tangent space to Stiefel at X.M and eta.N is arbitrary.
eta.M = stiefel_proj(X.M, eta.M);
% Now project from the tangent space to the horizontal space, that
% is, take care of the quotient.
% First solve a Sylvester equation (A symm., B skew-symm.)
A = X.N'*X.N + eye(k);
B = eta.M'*X.M + eta.N'*X.N;
B = B-B';
omega = lyap(A, -B);
% And project along the vertical space to the horizontal space.
etaproj.M = eta.M + X.M*omega;
etaproj.N = eta.N + X.N*omega;
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
A = t*X.M'*eta.M;
S = t^2*eta.M'*eta.M;
Y.M = [X.M t*eta.M]*expm([A -S ; eye(k) A])*eye(2*k, k)*expm(-A);
% re-orthonormalize (seems necessary from time to time)
[Q R] = qr(Y.M, 0);
Y.M = Q * diag(sign(diag(R)));
Y.N = X.N + t*eta.N;
end
% Factor M lives on the Stiefel manifold, hence we will reuse its
% random generator.
stiefelm = stiefelfactory(m, k);
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.M = uf(X.M + t*eta.M); % This is a valid retraction
Y.N = X.N + t*eta.N;
end
M.hash = @(X) ['z' hashmd5([X.M(:) ; X.N(:)])];
M.rand = @random;
function X = random()
X.M = stiefelm.rand();
X.N = randn(n, k);
end
M.randvec = @randomvec;
function eta = randomvec(X)
eta.M = randn(m, k);
eta.N = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.M = eta.M / nrm;
eta.N = eta.N / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('M', zeros(m, k), 'N', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INMSL>
if nargin == 3
d.M = a1*d1.M;
d.N = a1*d1.N;
elseif nargin == 5
d.M = a1*d1.M + a2*d2.M;
d.N = a1*d1.N + a2*d2.N;
else
error('Bad use of fixedrankMNquotientfactory.lincomb.');
end
end
function A = uf(A)
[L, unused, R] = svd(A, 0);
A = L*R';
end
|
github
|
skovnats/madmm-master
|
fixedrankfactory_2factors.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/fixedrank/fixedrankfactory_2factors.m
| 5,813 |
utf_8
|
70044d83ff10591a75b81f415cb920c2
|
function M = fixedrankfactory_2factors(m, n, k)
% Manifold of m-by-n matrices of rank k with balanced quotient geometry.
%
% function M = fixedrankfactory_2factors(m, n, k)
%
% This follows the balanced quotient geometry described in the following paper:
% G. Meyer, S. Bonnabel and R. Sepulchre,
% "Linear regression under fixed-rank constraints: a Riemannian approach",
% ICML 2011.
%
% Paper link: http://www.icml-2011.org/papers/350_icmlpaper.pdf
%
% A point X on the manifold is represented as a structure with two
% fields: L and R. The matrices L (mxk) and R (nxk) are full column-rank
% matrices such that X = L*R'.
%
% Tangent vectors are represented as a structure with two fields: L, R
% This file is part of Manopt: www.manopt.org.
% Original author: Bamdev Mishra, Dec. 30, 2012.
% Contributors:
% Change log:
% July 10, 2013 (NB) : added vec, mat, tangent, tangent2ambient
M.name = @() sprintf('LR'' quotient manifold of %dx%d matrices of rank %d', m, n, k);
M.dim = @() (m+n-k)*k;
% Some precomputations at the point X to be used in the inner product (and
% pretty much everywhere else).
function X = prepare(X)
if ~all(isfield(X,{'LtL','RtR','invRtR','invLtL'}))
L = X.L;
R = X.R;
X.LtL = L'*L;
X.RtR = R'*R;
X.invLtL = inv(X.LtL);
X.invRtR = inv(X.RtR);
end
end
% Choice of the metric is motivated by the symmetry present in the space
M.inner = @iproduct;
function ip = iproduct(X, eta, zeta)
X = prepare(X);
ip = trace(X.invLtL*(eta.L'*zeta.L)) + trace( X.invRtR*(eta.R'*zeta.R));
end
M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
M.dist = @(x, y) error('fixedrankfactory_2factors.dist not implemented yet.');
M.typicaldist = @() 10*k;
symm = @(M) .5*(M+M');
M.egrad2rgrad = @egrad2rgrad;
function eta = egrad2rgrad(X, eta)
X = prepare(X);
eta.L = eta.L*X.LtL;
eta.R = eta.R*X.RtR;
end
M.ehess2rhess = @ehess2rhess;
function Hess = ehess2rhess(X, egrad, ehess, eta)
X = prepare(X);
% Riemannian gradient
rgrad = egrad2rgrad(X, egrad);
% Directional derivative of the Riemannian gradient
Hess.L = ehess.L*X.LtL + 2*egrad.L*symm(eta.L'*X.L);
Hess.R = ehess.R*X.RtR + 2*egrad.R*symm(eta.R'*X.R);
% We need a correction term for the non-constant metric
Hess.L = Hess.L - rgrad.L*((X.invLtL)*symm(X.L'*eta.L)) - eta.L*(X.invLtL*symm(X.L'*rgrad.L)) + X.L*(X.invLtL*symm(eta.L'*rgrad.L));
Hess.R = Hess.R - rgrad.R*((X.invRtR)*symm(X.R'*eta.R)) - eta.R*(X.invRtR*symm(X.R'*rgrad.R)) + X.R*(X.invRtR*symm(eta.R'*rgrad.R));
% Projection onto the horizontal space
Hess = M.proj(X, Hess);
end
M.proj = @projection;
% Projection of the vector eta onto the horizontal space
function etaproj = projection(X, eta)
X = prepare(X);
SS = (X.LtL)*(X.RtR);
AS = (X.LtL)*(X.R'*eta.R) - (eta.L'*X.L)*(X.RtR);
Omega = lyap(SS, SS,-AS);
etaproj.L = eta.L + X.L*Omega';
etaproj.R = eta.R - X.R*Omega;
end
M.tangent = M.proj;
M.tangent2ambient = @(X, eta) eta;
M.retr = @retraction;
function Y = retraction(X, eta, t)
if nargin < 3
t = 1.0;
end
Y.L = X.L + t*eta.L;
Y.R = X.R + t*eta.R;
% Numerical conditioning step: A simpler version.
% We need to ensure that L and R do not have very relative
% skewed norms.
scaling = norm(X.L, 'fro')/norm(X.R, 'fro');
scaling = sqrt(scaling);
Y.L = Y.L / scaling;
Y.R = Y.R * scaling;
% These are reused in the computation of the gradient and Hessian
Y = prepare(Y);
end
M.exp = @exponential;
function Y = exponential(X, eta, t)
if nargin < 3
t = 1.0;
end
Y = retraction(X, eta, t);
warning('manopt:fixedrankfactory_2factors:exp', ...
['Exponential for fixed rank ' ...
'manifold not implemented yet. Used retraction instead.']);
end
M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
M.rand = @random;
function X = random()
% A random point on the total space
X.L = randn(m, k);
X.R = randn(n, k);
X = prepare(X);
end
M.randvec = @randomvec;
function eta = randomvec(X)
% A random vector in the horizontal space
eta.L = randn(m, k);
eta.R = randn(n, k);
eta = projection(X, eta);
nrm = M.norm(X, eta);
eta.L = eta.L / nrm;
eta.R = eta.R / nrm;
end
M.lincomb = @lincomb;
M.zerovec = @(X) struct('L', zeros(m, k),'R', zeros(n, k));
M.transp = @(x1, x2, d) projection(x2, d);
% vec and mat are not isometries, because of the unusual inner metric.
M.vec = @(X, U) [U.L(:) ; U.R(:)];
M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
'R', reshape(u((m*k+1):end), n, k));
M.vecmatareisometries = @() false;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d.L = a1*d1.L;
d.R = a1*d1.R;
elseif nargin == 5
d.L = a1*d1.L + a2*d2.L;
d.R = a1*d1.R + a2*d2.R;
else
error('Bad use of fixedrankfactory_2factors.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
obliquefactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/oblique/obliquefactory.m
| 6,609 |
utf_8
|
1031640cf68e1bf9252af77d1002836a
|
function M = obliquefactory(n, m, transposed)
% Returns a manifold struct to optimize over matrices w/ unit-norm columns.
%
% function M = obliquefactory(n, m)
% function M = obliquefactory(n, m, transposed)
%
% Oblique manifold: deals with matrices of size n x m such that each column
% has unit 2-norm, i.e., is a point on the unit sphere in R^n. The metric
% is such that the oblique manifold is a Riemannian submanifold of the
% space of nxm matrices with the usual trace inner product, i.e., the usual
% metric.
%
% If transposed is set to true (it is false by default), then the matrices
% are transposed: a point Y on the manifold is a matrix of size m x n and
% each row has unit 2-norm. It is the same geometry, just a different
% representation.
%
% See also: spherefactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
%
% July 16, 2013 (NB) :
% Added 'transposed' option, mainly for ease of comparison with the
% elliptope geometry.
%
% Nov. 29, 2013 (NB) :
% Added normalize_columns function to make it easier to exploit the
% bsxfun formulation of column normalization, which avoids using for
% loops and provides performance gains. The exponential still uses a
% for loop.
if ~exist('transposed', 'var') || isempty(transposed)
transposed = false;
end
if transposed
trnsp = @(X) X';
else
trnsp = @(X) X;
end
M.name = @() sprintf('Oblique manifold OB(%d, %d)', n, m);
M.dim = @() (n-1)*m;
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.dist = @(x, y) norm(real(acos(sum(trnsp(x).*trnsp(y), 1))));
M.typicaldist = @() pi*sqrt(m);
M.proj = @(X, U) trnsp(projection(trnsp(X), trnsp(U)));
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, U)
X = trnsp(X);
egrad = trnsp(egrad);
ehess = trnsp(ehess);
U = trnsp(U);
PXehess = projection(X, ehess);
inners = sum(X.*egrad, 1);
rhess = PXehess - bsxfun(@times, U, inners);
rhess = trnsp(rhess);
end
M.exp = @exponential;
% Exponential on the oblique manifold
function y = exponential(x, d, t)
x = trnsp(x);
d = trnsp(d);
if nargin < 3
t = 1.0;
end
m = size(x, 2);
y = zeros(size(x));
if t ~= 0
for i = 1 : m
y(:, i) = sphere_exponential(x(:, i), d(:, i), t);
end
else
y = x;
end
y = trnsp(y);
end
M.log = @logarithm;
function v = logarithm(x1, x2)
x1 = trnsp(x1);
x2 = trnsp(x2);
v = M.proj(x1, x2 - x1);
dists = acos(sum(x1.*x2, 1));
norms = sqrt(sum(v.^2, 1));
factors = dists./norms;
% factors(dists <= 1e-6) = 1;
v = bsxfun(@times, v, factors);
v = trnsp(v);
end
M.retr = @retraction;
% Retraction on the oblique manifold
function y = retraction(x, d, t)
x = trnsp(x);
d = trnsp(d);
if nargin < 3
t = 1.0;
end
m = size(x, 2);
if t ~= 0
y = normalize_columns(x + t*d);
else
y = x;
end
y = trnsp(y);
end
M.hash = @(x) ['z' hashmd5(x(:))];
M.rand = @() trnsp(random(n, m));
M.randvec = @(x) trnsp(randomvec(n, m, trnsp(x)));
M.lincomb = @lincomb;
M.zerovec = @(x) trnsp(zeros(n, m));
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function y = pairmean(x1, x2)
y = trnsp(x1+x2);
y = normalize_columns(y);
y = trnsp(y);
end
% vec returns a vector representation of an input tangent vector which
% is represented as a matrix. mat returns the original matrix
% representation of the input vector representation of a tangent
% vector. vec and mat are thus inverse of each other. They are
% furthermore isometries between a subspace of R^nm and the tangent
% space at x.
vect = @(X) X(:);
M.vec = @(x, u_mat) vect(trnsp(u_mat));
M.mat = @(x, u_vec) trnsp(reshape(u_vec, [n, m]));
M.vecmatareisometries = @() true;
end
% Given a matrix X, returns the same matrix but with each column scaled so
% that they have unit 2-norm.
function X = normalize_columns(X)
norms = sqrt(sum(X.^2, 1));
X = bsxfun(@times, X, 1./norms);
end
% Orthogonal projection of the ambient vector H onto the tangent space at X
function PXH = projection(X, H)
% Compute the inner product between each vector H(:, i) with its root
% point X(:, i), that is, X(:, i).' * H(:, i). Returns a row vector.
inners = sum(X.*H, 1);
% Subtract from H the components of the H(:, i)'s that are parallel to
% the root points X(:, i).
PXH = H - bsxfun(@times, X, inners);
% % Equivalent but slow code:
% m = size(X, 2);
% PXH = zeros(size(H));
% for i = 1 : m
% PXH(:, i) = H(:, i) - X(:, i) * (X(:, i)'*H(:, i));
% end
end
% Exponential on the sphere.
function y = sphere_exponential(x, d, t)
if nargin == 2
t = 1.0;
end
td = t*d;
nrm_td = norm(td);
if nrm_td > 1e-6
y = x*cos(nrm_td) + (td/nrm_td)*sin(nrm_td);
else
% if the step is too small, to avoid dividing by nrm_td, we choose
% to approximate with this retraction-like step.
y = x + td;
y = y / norm(y);
end
end
% Uniform random sampling on the sphere.
function x = random(n, m)
x = normalize_columns(randn(n, m));
end
% Random normalized tangent vector at x.
function d = randomvec(n, m, x)
d = randn(n, m);
d = projection(x, d);
d = d / norm(d(:));
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of oblique.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
stiefelfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/stiefel/stiefelfactory.m
| 4,989 |
utf_8
|
5cc739262d8e75c600af8497647ee711
|
function M = stiefelfactory(n, p, k)
% Returns a manifold structure to optimize over orthonormal matrices.
%
% function M = stiefelfactory(n, p)
% function M = stiefelfactory(n, p, k)
%
% The Stiefel manifold is the set of orthonormal nxp matrices. If k
% is larger than 1, this is the Cartesian product of the Stiefel manifold
% taken k times. The metric is such that the manifold is a Riemannian
% submanifold of R^nxp equipped with the usual trace inner product, that
% is, it is the usual metric.
%
% Points are represented as matrices X of size n x p x k (or n x p if k=1,
% which is the default) such that each n x p matrix is orthonormal,
% i.e., X'*X = eye(p) if k = 1, or X(:, :, i)' * X(:, :, i) = eye(p) for
% i = 1 : k if k > 1. Tangent vectors are represented as matrices the same
% size as points.
%
% By default, k = 1.
%
% See also: grassmannfactory rotationsfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
% July 5, 2013 (NB) : Added ehess2rhess.
% Jan. 27, 2014 (BM) : Bug in ehess2rhess corrected.
% June 24, 2014 (NB) : Added true exponential map and changed the randvec
% function so that it now returns a globally
% normalized vector, not a vector where each
% component is normalized (this only matters if k>1).
if ~exist('k', 'var') || isempty(k)
k = 1;
end
if k == 1
M.name = @() sprintf('Stiefel manifold St(%d, %d)', n, p);
elseif k > 1
M.name = @() sprintf('Product Stiefel manifold St(%d, %d)^%d', n, p, k);
else
error('k must be an integer no less than 1.');
end
M.dim = @() k*(n*p - .5*p*(p+1));
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.dist = @(x, y) error('stiefel.dist not implemented yet.');
M.typicaldist = @() sqrt(p*k);
M.proj = @projection;
function Up = projection(X, U)
XtU = multiprod(multitransp(X), U);
symXtU = multisym(XtU);
Up = U - multiprod(X, symXtU);
% The code above is equivalent to, but much faster than, the code below.
%
% Up = zeros(size(U));
% function A = sym(A), A = .5*(A+A'); end
% for i = 1 : k
% Xi = X(:, :, i);
% Ui = U(:, :, i);
% Up(:, :, i) = Ui - Xi*sym(Xi'*Ui);
% end
end
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(X, egrad, ehess, H)
XtG = multiprod(multitransp(X), egrad);
symXtG = multisym(XtG);
HsymXtG = multiprod(H, symXtG);
rhess = projection(X, ehess - HsymXtG);
end
M.retr = @retraction;
function Y = retraction(X, U, t)
if nargin < 3
t = 1.0;
end
Y = X + t*U;
for i = 1 : k
[Q, R] = qr(Y(:, :, i), 0);
% The instruction with R assures we are not flipping signs
% of some columns, which should never happen in modern Matlab
% versions but may be an issue with older versions.
Y(:, :, i) = Q * diag(sign(sign(diag(R))+.5));
end
end
M.exp = @exponential;
function Y = exponential(X, U, t)
if nargin == 2
t = 1;
end
tU = t*U;
Y = zeros(size(X));
for i = 1 : k
% From a formula by Ross Lippert, Example 5.4.2 in AMS08.
Xi = X(:, :, i);
Ui = tU(:, :, i);
Y(:, :, i) = [Xi Ui] * ...
expm([Xi'*Ui , -Ui'*Ui ; eye(p) , Xi'*Ui]) * ...
[ expm(-Xi'*Ui) ; zeros(p) ];
end
end
M.hash = @(X) ['z' hashmd5(X(:))];
M.rand = @random;
function X = random()
X = zeros(n, p, k);
for i = 1 : k
[Q, unused] = qr(randn(n, p), 0); %#ok<NASGU>
X(:, :, i) = Q;
end
end
M.randvec = @randomvec;
function U = randomvec(X)
U = projection(X, randn(n, p, k));
U = U / norm(U(:));
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, p, k);
M.transp = @(x1, x2, d) projection(x2, d);
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, p, k]);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of stiefel.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
rotationsfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/rotations/rotationsfactory.m
| 4,857 |
utf_8
|
421ccf6b88f519f989d6dd87fb0a1128
|
function M = rotationsfactory(n, k)
% Returns a manifold structure to optimize over rotation matrices.
%
% function M = rotationsfactory(n)
% function M = rotationsfactory(n, k)
%
% Special orthogonal group (the manifold of rotations): deals with matrices
% R of size n x n x k (or n x n if k = 1, which is the default) such that
% each n x n matrix is orthogonal, with determinant 1, i.e., X'*X = eye(n)
% if k = 1, or X(:, :, i)' * X(:, :, i) = eye(n) for i = 1 : k if k > 1.
%
% This is a description of SO(n)^k with the induced metric from the
% embedding space (R^nxn)^k, i.e., this manifold is a Riemannian
% submanifold of (R^nxn)^k endowed with the usual trace inner product.
%
% Tangent vectors are represented in the Lie algebra, i.e., as skew
% symmetric matrices. Use the function M.tangent2ambient(X, H) to switch
% from the Lie algebra representation to the embedding space
% representation.
%
% By default, k = 1.
%
% See also: stiefelfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
% Jan. 31, 2013, NB : added egrad2rgrad and ehess2rhess
if ~exist('k', 'var') || isempty(k)
k = 1;
end
if k == 1
M.name = @() sprintf('Rotations manifold SO(%d)', n);
elseif k > 1
M.name = @() sprintf('Product rotations manifold SO(%d)^%d', n, k);
else
error('k must be an integer no less than 1.');
end
M.dim = @() k*nchoosek(n, 2);
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d(:));
M.typicaldist = @() pi*sqrt(n*k);
M.proj = @(X, H) multiskew(multiprod(multitransp(X), H));
M.tangent = @(X, H) multiskew(H);
M.tangent2ambient = @(X, U) multiprod(X, U);
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function Rhess = ehess2rhess(X, Egrad, Ehess, H)
% Reminder : H contains skew-symmeric matrices. The actual
% direction that the point X is moved along is X*H.
Xt = multitransp(X);
XtEgrad = multiprod(Xt, Egrad);
symXtEgrad = multisym(XtEgrad);
XtEhess = multiprod(Xt, Ehess);
Rhess = multiskew( XtEhess - multiprod(H, symXtEgrad) );
end
M.retr = @retraction;
function Y = retraction(X, U, t)
if nargin == 3
tU = t*U;
else
tU = U;
end
Y = X + multiprod(X, tU);
for i = 1 : k
[Q R] = qr(Y(:, :, i));
% The instruction with R ensures we are not flipping signs
% of some columns, which should never happen in modern Matlab
% versions but may be an issue with older versions.
Y(:, :, i) = Q * diag(sign(sign(diag(R))+.5));
% This is guaranteed to always yield orthogonal matrices with
% determinant +1. Simply look at the eigenvalues of a skew
% symmetric matrix, than at those of identity plus that matrix,
% and compute their product for the determinant: it's stricly
% positive in all cases.
end
end
M.exp = @exponential;
function Y = exponential(X, U, t)
if nargin == 3
exptU = t*U;
else
exptU = U;
end
for i = 1 : k
exptU(:, :, i) = expm(exptU(:, :, i));
end
Y = multiprod(X, exptU);
end
M.log = @logarithm;
function U = logarithm(X, Y)
U = multiprod(multitransp(X), Y);
for i = 1 : k
% The result of logm should be real in theory, but it is
% numerically useful to force it.
U(:, :, i) = real(logm(U(:, :, i)));
end
% Ensure the tangent vector is in the Lie algebra.
U = multiskew(U);
end
M.hash = @(X) ['z' hashmd5(X(:))];
M.rand = @() randrot(n, k);
M.randvec = @randomvec;
function U = randomvec(X) %#ok<INUSD>
U = randskew(n, k);
nrmU = sqrt(U(:).'*U(:));
U = U / nrmU;
end
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, n, k);
M.transp = @(x1, x2, d) d;
M.pairmean = @pairmean;
function Y = pairmean(X1, X2)
V = M.log(X1, X2);
Y = M.exp(X1, .5*V);
end
M.dist = @(x, y) M.norm(x, M.log(x, y));
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, n, k]);
M.vecmatareisometries = @() true;
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of rotations.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
spherecomplexfactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/sphere/spherecomplexfactory.m
| 3,285 |
utf_8
|
28cbdaa05de778558800a89c16acad64
|
function M = spherecomplexfactory(n, m)
% Returns a manifold struct to optimize over unit-norm complex matrices.
%
% function M = spherecomplexfactory(n)
% function M = spherecomplexfactory(n, m)
%
% Manifold of n-by-m complex matrices of unit Frobenius norm.
% By default, m = 1, which corresponds to the unit sphere in C^n. The
% metric is such that the sphere is a Riemannian submanifold of the space
% of 2nx2m real matrices with the usual trace inner product, i.e., the
% usual metric.
%
% See also: spherefactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
if ~exist('m', 'var')
m = 1;
end
if m == 1
M.name = @() sprintf('Complex sphere S^%d', n-1);
else
M.name = @() sprintf('Unit F-norm %dx%d complex matrices', n, m);
end
M.dim = @() 2*(n*m)-1;
M.inner = @(x, d1, d2) real(d1(:)'*d2(:));
M.norm = @(x, d) norm(d, 'fro');
M.dist = @(x, y) acos(real(x(:)'*y(:)));
M.typicaldist = @() pi;
M.proj = @(x, d) reshape(d(:) - x(:)*(real(x(:)'*d(:))), n, m);
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.tangent = M.proj;
M.exp = @exponential;
M.retr = @retraction;
M.log = @logarithm;
function v = logarithm(x1, x2)
error('The logarithmic map is not yet implemented for this manifold.');
end
M.hash = @(x) ['z' hashmd5([real(x(:)) ; imag(x(:))])];
M.rand = @() random(n, m);
M.randvec = @(x) randomvec(n, m, x);
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, m);
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function y = pairmean(x1, x2)
y = x1+x2;
y = y / norm(y, 'fro');
end
end
% Exponential on the sphere
function y = exponential(x, d, t)
if nargin == 2
t = 1;
end
td = t*d;
nrm_td = norm(td, 'fro');
if nrm_td > 1e-6
y = x*cos(nrm_td) + td*(sin(nrm_td)/nrm_td);
else
% If the step is too small, to avoid dividing by nrm_td, we choose
% to approximate with this retraction-like step.
y = x + td;
y = y / norm(y, 'fro');
end
end
% Retraction on the sphere
function y = retraction(x, d, t)
if nargin == 2
t = 1;
end
y = x+t*d;
y = y/norm(y, 'fro');
end
% Uniform random sampling on the sphere.
function x = random(n, m)
x = randn(n, m) + 1i*randn(n, m);
x = x/norm(x, 'fro');
end
% Random normalized tangent vector at x.
function d = randomvec(n, m, x)
d = randn(n, m) + 1i*randn(n, m);
d = reshape(d(:) - x(:)*(real(x(:)'*d(:))), n, m);
d = d / norm(d, 'fro');
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of spherecomplex.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
spherefactory.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/manifolds/sphere/spherefactory.m
| 3,447 |
utf_8
|
1b575cecaef843bcda1574bc09b4760c
|
function M = spherefactory(n, m)
% Returns a manifold struct to optimize over unit-norm vectors or matrices.
%
% function M = spherefactory(n)
% function M = spherefactory(n, m)
%
% Manifold of n-by-m real matrices of unit Frobenius norm.
% By default, m = 1, which corresponds to the unit sphere in R^n. The
% metric is such that the sphere is a Riemannian submanifold of the space
% of nxm matrices with the usual trace inner product, i.e., the usual
% metric.
%
% See also: obliquefactory spherecomplexfactory
% This file is part of Manopt: www.manopt.org.
% Original author: Nicolas Boumal, Dec. 30, 2012.
% Contributors:
% Change log:
if ~exist('m', 'var')
m = 1;
end
if m == 1
M.name = @() sprintf('Sphere S^%d', n-1);
else
M.name = @() sprintf('Unit F-norm %dx%d matrices', n, m);
end
M.dim = @() n*m-1;
M.inner = @(x, d1, d2) d1(:).'*d2(:);
M.norm = @(x, d) norm(d, 'fro');
M.dist = @(x, y) real(acos(x(:).'*y(:)));
M.typicaldist = @() pi;
M.proj = @(x, d) d - x*(x(:).'*d(:));
M.tangent = M.proj;
% For Riemannian submanifolds, converting a Euclidean gradient into a
% Riemannian gradient amounts to an orthogonal projection.
M.egrad2rgrad = M.proj;
M.ehess2rhess = @ehess2rhess;
function rhess = ehess2rhess(x, egrad, ehess, u)
rhess = M.proj(x, ehess) - (x(:)'*egrad(:))*u;
end
M.exp = @exponential;
M.retr = @retraction;
M.log = @logarithm;
function v = logarithm(x1, x2)
v = M.proj(x1, x2 - x1);
di = M.dist(x1, x2);
nv = norm(v, 'fro');
v = v * (di / nv);
end
M.hash = @(x) ['z' hashmd5(x(:))];
M.rand = @() random(n, m);
M.randvec = @(x) randomvec(n, m, x);
M.lincomb = @lincomb;
M.zerovec = @(x) zeros(n, m);
M.transp = @(x1, x2, d) M.proj(x2, d);
M.pairmean = @pairmean;
function y = pairmean(x1, x2)
y = x1+x2;
y = y / norm(y, 'fro');
end
M.vec = @(x, u_mat) u_mat(:);
M.mat = @(x, u_vec) reshape(u_vec, [n, m]);
M.vecmatareisometries = @() true;
end
% Exponential on the sphere
function y = exponential(x, d, t)
if nargin == 2
t = 1;
end
td = t*d;
nrm_td = norm(td, 'fro');
if nrm_td > 1e-6
y = x*cos(nrm_td) + td*(sin(nrm_td)/nrm_td);
else
% if the step is too small, to avoid dividing by nrm_td, we choose
% to approximate with this retraction-like step.
y = x + td;
y = y / norm(y, 'fro');
end
end
% Retraction on the sphere
function y = retraction(x, d, t)
if nargin == 2
t = 1;
end
y = x + t*d;
y = y / norm(y, 'fro');
end
% Uniform random sampling on the sphere.
function x = random(n, m)
x = randn(n, m);
x = x/norm(x, 'fro');
end
% Random normalized tangent vector at x.
function d = randomvec(n, m, x)
d = randn(n, m);
d = d - x*(x(:).'*d(:));
d = d / norm(d, 'fro');
end
% Linear combination of tangent vectors
function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
if nargin == 3
d = a1*d1;
elseif nargin == 5
d = a1*d1 + a2*d2;
else
error('Bad use of sphere.lincomb.');
end
end
|
github
|
skovnats/madmm-master
|
trustregions.m
|
.m
|
madmm-master/compressed_modes/manopt/manopt/solvers/trustregions/trustregions.m
| 27,503 |
utf_8
|
16c81a00a44c928fd6ca503399b04111
|
function [x, cost, info, options] = trustregions(problem, x, options)
% Riemannian trust-regions solver for optimization on manifolds.
%
% function [x, cost, info, options] = trustregions(problem)
% function [x, cost, info, options] = trustregions(problem, x0)
% function [x, cost, info, options] = trustregions(problem, x0, options)
% function [x, cost, info, options] = trustregions(problem, [], options)
%
% This is the Riemannian Trust-Region solver (with tCG inner solve), named
% RTR. This solver will attempt to minimize the cost function described in
% the problem structure. It requires the availability of the cost function
% and of its gradient. It will issue calls for the Hessian. If no Hessian
% nor approximate Hessian is provided, a standard approximation of the
% Hessian based on the gradient will be computed. If a preconditioner for
% the Hessian is provided, it will be used.
%
% For a description of the algorithm and theorems offering convergence
% guarantees, see the references below. Documentation for this solver is
% available online at:
%
% http://www.manopt.org/solver_documentation_trustregions.html
%
%
% The initial iterate is x0 if it is provided. Otherwise, a random point on
% the manifold is picked. To specify options whilst not specifying an
% initial iterate, give x0 as [] (the empty matrix).
%
% The two outputs 'x' and 'cost' are the last reached point on the manifold
% and its cost. Notice that x is not necessarily the best reached point,
% because this solver is not forced to be a descent method. In particular,
% very close to convergence, it is sometimes preferable to accept very
% slight increases in the cost value (on the order of the machine epsilon)
% in the process of reaching fine convergence. In practice, this is not a
% limiting factor, as normally one does not need fine enough convergence
% that this becomes an issue.
%
% The output 'info' is a struct-array which contains information about the
% iterations:
% iter (integer)
% The (outer) iteration number, or number of steps considered
% (whether accepted or rejected). The initial guess is 0.
% cost (double)
% The corresponding cost value.
% gradnorm (double)
% The (Riemannian) norm of the gradient.
% numinner (integer)
% The number of inner iterations executed to compute this iterate.
% Inner iterations are truncated-CG steps. Each one requires a
% Hessian (or approximate Hessian) evaluation.
% time (double)
% The total elapsed time in seconds to reach the corresponding cost.
% rho (double)
% The performance ratio for the iterate.
% rhonum, rhoden (double)
% Regularized numerator and denominator of the performance ratio:
% rho = rhonum/rhoden. See options.rho_regularization.
% accepted (boolean)
% Whether the proposed iterate was accepted or not.
% stepsize (double)
% The (Riemannian) norm of the vector returned by the inner solver
% tCG and which is retracted to obtain the proposed next iterate. If
% accepted = true for the corresponding iterate, this is the size of
% the step from the previous to the new iterate. If accepted is
% false, the step was not executed and this is the size of the
% rejected step.
% Delta (double)
% The trust-region radius at the outer iteration.
% cauchy (boolean)
% Whether the Cauchy point was used or not (if useRand is true).
% And possibly additional information logged by options.statsfun.
% For example, type [info.gradnorm] to obtain a vector of the successive
% gradient norms reached at each (outer) iteration.
%
% The options structure is used to overwrite the default values. All
% options have a default value and are hence optional. To force an option
% value, pass an options structure with a field options.optionname, where
% optionname is one of the following and the default value is indicated
% between parentheses:
%
% tolgradnorm (1e-6)
% The algorithm terminates if the norm of the gradient drops below
% this. For well-scaled problems, a rule of thumb is that you can
% expect to reduce the gradient norm by 8 orders of magnitude
% (sqrt(eps)) compared to the gradient norm at a "typical" point (a
% rough initial iterate for example). Further decrease is sometimes
% possible, but inexact floating point arithmetic will eventually
% limit the final accuracy. If tolgradnorm is set too low, the
% algorithm may end up iterating forever (or at least until another
% stopping criterion triggers).
% maxiter (1000)
% The algorithm terminates if maxiter (outer) iterations were executed.
% maxtime (Inf)
% The algorithm terminates if maxtime seconds elapsed.
% miniter (3)
% Minimum number of outer iterations (used only if useRand is true).
% mininner (1)
% Minimum number of inner iterations (for tCG).
% maxinner (problem.M.dim() : the manifold's dimension)
% Maximum number of inner iterations (for tCG).
% Delta_bar (problem.M.typicaldist() or sqrt(problem.M.dim()))
% Maximum trust-region radius. If you specify this parameter but not
% Delta0, then Delta0 will be set to 1/8 times this parameter.
% Delta0 (Delta_bar/8)
% Initial trust-region radius. If you observe a long plateau at the
% beginning of the convergence plot (gradient norm VS iteration), it
% may pay off to try to tune this parameter to shorten the plateau.
% You should not set this parameter without setting Delta_bar.
% useRand (false)
% Set to true if the trust-region solve is to be initiated with a
% random tangent vector. If set to true, no preconditioner will be
% used. This option is set to true in some scenarios to escape saddle
% points, but is otherwise seldom activated.
% kappa (0.1)
% Inner kappa convergence tolerance.
% theta (1.0)
% Inner theta convergence tolerance.
% rho_prime (0.1)
% Accept/reject ratio : if rho is at least rho_prime, the outer
% iteration is accepted. Otherwise, it is rejected. In case it is
% rejected, the trust-region radius will have been decreased.
% To ensure this, rho_prime must be strictly smaller than 1/4.
% rho_regularization (1e3)
% Close to convergence, evaluating the performance ratio rho is
% numerically challenging. Meanwhile, close to convergence, the
% quadratic model should be a good fit and the steps should be
% accepted. Regularization lets rho go to 1 as the model decrease and
% the actual decrease go to zero. Set this option to zero to disable
% regularization (not recommended). See in-code for the specifics.
% statsfun (none)
% Function handle to a function that will be called after each
% iteration to provide the opportunity to log additional statistics.
% They will be returned in the info struct. See the generic Manopt
% documentation about solvers for further information. statsfun is
% called with the point x that was reached last, after the
% accept/reject decision. See comment below.
% stopfun (none)
% Function handle to a function that will be called at each iteration
% to provide the opportunity to specify additional stopping criteria.
% See the generic Manopt documentation about solvers for further
% information.
% verbosity (2)
% Integer number used to tune the amount of output the algorithm
% generates during execution (mostly as text in the command window).
% The higher, the more output. 0 means silent. 3 and above includes a
% display of the options structure at the beginning of the execution.
% debug (false)
% Set to true to allow the algorithm to perform additional
% computations for debugging purposes. If a debugging test fails, you
% will be informed of it, usually via the command window. Be aware
% that these additional computations appear in the algorithm timings
% too.
% storedepth (20)
% Maximum number of different points x of the manifold for which a
% store structure will be kept in memory in the storedb. If the
% caching features of Manopt are not used, this is irrelevant. If
% memory usage is an issue, you may try to lower this number.
% Profiling may then help to investigate if a performance hit was
% incured as a result.
%
% Notice that statsfun is called with the point x that was reached last,
% after the accept/reject decision. Hence: if the step was accepted, we get
% that new x, with a store which only saw the call for the cost and for the
% gradient. If the step was rejected, we get the same x as previously, with
% the store structure containing everything that was computed at that point
% (possibly including previous rejects at that same point). Hence, statsfun
% should not be used in conjunction with the store to count operations for
% example. Instead, you could use a global variable and increment that
% variable directly from the cost related functions. It is however possible
% to use statsfun with the store to compute, for example, alternate merit
% functions on the point x.
%
% See also: steepestdescent conjugategradient manopt/examples
% This file is part of Manopt: www.manopt.org.
% This code is an adaptation to Manopt of the original GenRTR code:
% RTR - Riemannian Trust-Region
% (c) 2004-2007, P.-A. Absil, C. G. Baker, K. A. Gallivan
% Florida State University
% School of Computational Science
% (http://www.math.fsu.edu/~cbaker/GenRTR/?page=download)
% See accompanying license file.
% The adaptation was executed by Nicolas Boumal.
%
% Change log:
%
% NB April 3, 2013:
% tCG now returns the Hessian along the returned direction eta, so
% that we do not compute that Hessian redundantly: some savings at
% each iteration. Similarly, if the useRand flag is on, we spare an
% extra Hessian computation at each outer iteration too, owing to
% some modifications in the Cauchy point section of the code specific
% to useRand = true.
%
% NB Aug. 22, 2013:
% This function is now Octave compatible. The transition called for
% two changes which would otherwise not be advisable. (1) tic/toc is
% now used as is, as opposed to the safer way:
% t = tic(); elapsed = toc(t);
% And (2), the (formerly inner) function savestats was moved outside
% the main function to not be nested anymore. This is arguably less
% elegant, but Octave does not (and likely will not) support nested
% functions.
%
% NB Dec. 2, 2013:
% The in-code documentation was largely revised and expanded.
%
% NB Dec. 2, 2013:
% The former heuristic which triggered when rhonum was very small and
% forced rho = 1 has been replaced by a smoother heuristic which
% consists in regularizing rhonum and rhoden before computing their
% ratio. It is tunable via options.rho_regularization. Furthermore,
% the solver now detects if tCG did not obtain a model decrease
% (which is theoretically impossible but may happen because of
% numerical errors and/or because of a nonlinear/nonsymmetric Hessian
% operator, which is the case for finite difference approximations).
% When such an anomaly is detected, the step is rejected and the
% trust region radius is decreased.
%
% NB Dec. 3, 2013:
% The stepsize is now registered at each iteration, at a small
% additional cost. The defaults for Delta_bar and Delta0 are better
% defined. Setting Delta_bar in the options will automatically set
% Delta0 accordingly. In Manopt 1.0.4, the defaults for these options
% were not treated appropriately because of an incorrect use of the
% isfield() built-in function.
% Verify that the problem description is sufficient for the solver.
if ~canGetCost(problem)
warning('manopt:getCost', ...
'No cost provided. The algorithm will likely abort.');
end
if ~canGetGradient(problem)
warning('manopt:getGradient', ...
'No gradient provided. The algorithm will likely abort.');
end
if ~canGetHessian(problem)
warning('manopt:getHessian:approx', ...
'No Hessian provided. Using an approximation instead.');
end
% Define some strings for display
tcg_stop_reason = {'negative curvature',...
'exceeded trust region',...
'reached target residual-kappa',...
'reached target residual-theta',...
'dimension exceeded',...
'model increased'};
% Set local defaults here
localdefaults.verbosity = 2;
localdefaults.maxtime = inf;
localdefaults.miniter = 3;
localdefaults.maxiter = 1000;
localdefaults.mininner = 1;
localdefaults.maxinner = problem.M.dim();
localdefaults.tolgradnorm = 1e-6;
localdefaults.kappa = 0.1;
localdefaults.theta = 1.0;
localdefaults.rho_prime = 0.1;
localdefaults.useRand = false;
localdefaults.rho_regularization = 1e3;
% Merge global and local defaults, then merge w/ user options, if any.
localdefaults = mergeOptions(getGlobalDefaults(), localdefaults);
if ~exist('options', 'var') || isempty(options)
options = struct();
end
options = mergeOptions(localdefaults, options);
% Set default Delta_bar and Delta0 separately to deal with additional
% logic: if Delta_bar is provided but not Delta0, let Delta0 automatically
% be some fraction of the provided Delta_bar.
if ~isfield(options, 'Delta_bar')
if isfield(problem.M, 'typicaldist')
options.Delta_bar = problem.M.typicaldist();
else
options.Delta_bar = sqrt(problem.M.dim());
end
end
if ~isfield(options,'Delta0')
options.Delta0 = options.Delta_bar / 8;
end
% Check some option values
assert(options.rho_prime < 1/4, ...
'options.rho_prime must be strictly smaller than 1/4.');
assert(options.Delta_bar > 0, ...
'options.Delta_bar must be positive.');
assert(options.Delta0 > 0 && options.Delta0 < options.Delta_bar, ...
'options.Delta0 must be positive and smaller than Delta_bar.');
% It is sometimes useful to check what the actual option values are.
if options.verbosity >= 3
disp(options);
end
% Create a store database
storedb = struct();
tic();
% If no initial point x is given by the user, generate one at random.
if ~exist('x', 'var') || isempty(x)
x = problem.M.rand();
end
%% Initializations
% k counts the outer (TR) iterations. The semantic is that k counts the
% number of iterations fully executed so far.
k = 0;
% initialize solution and companion measures: f(x), fgrad(x)
[fx fgradx storedb] = getCostGrad(problem, x, storedb);
norm_grad = problem.M.norm(x, fgradx);
% initialize trust-region radius
Delta = options.Delta0;
% Save stats in a struct array info, and preallocate
% (see http://people.csail.mit.edu/jskelly/blog/?x=entry:entry091030-033941)
if ~exist('used_cauchy', 'var')
used_cauchy = [];
end
stats = savestats(problem, x, storedb, options, k, fx, norm_grad, Delta);
info(1) = stats;
info(min(10000, options.maxiter+1)).iter = [];
% ** Display:
if options.verbosity == 2
fprintf(['%3s %3s %5s %5s ',...
'f: %e |grad|: %e\n'],...
' ',' ',' ',' ', fx, norm_grad);
elseif options.verbosity > 2
fprintf('************************************************************************\n');
fprintf('%3s %3s k: %5s num_inner: %5s %s\n',...
'','','______','______','');
fprintf(' f(x) : %e |grad| : %e\n', fx, norm_grad);
fprintf(' Delta : %f\n', Delta);
end
% **********************
% ** Start of TR loop **
% **********************
while true
% Start clock for this outer iteration
tic();
% Run standard stopping criterion checks
[stop reason] = stoppingcriterion(problem, x, options, info, k+1);
% If the stopping criterion that triggered is the tolerance on the
% gradient norm but we are using randomization, make sure we make at
% least miniter iterations to give randomization a chance at escaping
% saddle points.
if stop == 2 && options.useRand && k < options.miniter
stop = 0;
end
if stop
if options.verbosity >= 1
fprintf([reason '\n']);
end
break;
end
if options.verbosity > 2 || options.debug > 0
fprintf('************************************************************************\n');
end
% *************************
% ** Begin TR Subproblem **
% *************************
% Determine eta0
if ~options.useRand
% Pick the zero vector
eta = problem.M.zerovec(x);
else
% Random vector in T_x M (this has to be very small)
eta = problem.M.lincomb(x, 1e-6, problem.M.randvec(x));
% Must be inside trust-region
while problem.M.norm(x, eta) > Delta
eta = problem.M.lincomb(x, sqrt(sqrt(eps)), eta);
end
end
% solve TR subproblem
[eta Heta numit stop_inner storedb] = ...
tCG(problem, x, fgradx, eta, Delta, options, storedb);
srstr = tcg_stop_reason{stop_inner};
% This is only computed for logging purposes, because it may be useful
% for some user-defined stopping criteria. If this is not cheap for
% specific application (compared to evaluating the cost), we should
% reconsider this.
norm_eta = problem.M.norm(x, eta);
if options.debug > 0
testangle = problem.M.inner(x, eta, fgradx) / (norm_eta*norm_grad);
end
% If using randomized approach, compare result with the Cauchy point.
% Convergence proofs assume that we achieve at least the reduction of
% the Cauchy point. After this if-block, either all eta-related
% quantities have been changed consistently, or none of them have
% changed.
if options.useRand
used_cauchy = false;
% Check the curvature,
[Hg storedb] = getHessian(problem, x, fgradx, storedb);
g_Hg = problem.M.inner(x, fgradx, Hg);
if g_Hg <= 0
tau_c = 1;
else
tau_c = min( norm_grad^3/(Delta*g_Hg) , 1);
end
% and generate the Cauchy point.
eta_c = problem.M.lincomb(x, -tau_c * Delta / norm_grad, fgradx);
Heta_c = problem.M.lincomb(x, -tau_c * Delta / norm_grad, Hg);
% Now that we have computed the Cauchy point in addition to the
% returned eta, we might as well keep the best of them.
mdle = fx + problem.M.inner(x, fgradx, eta) ...
+ .5*problem.M.inner(x, Heta, eta);
mdlec = fx + problem.M.inner(x, fgradx, eta_c) ...
+ .5*problem.M.inner(x, Heta_c, eta_c);
if mdle > mdlec
eta = eta_c;
Heta = Heta_c; % added April 11, 2012
used_cauchy = true;
end
end
% Compute the retraction of the proposal
x_prop = problem.M.retr(x, eta);
% Compute the function value of the proposal
[fx_prop storedb] = getCost(problem, x_prop, storedb);
% Will we accept the proposed solution or not?
% Check the performance of the quadratic model against the actual cost.
rhonum = fx - fx_prop;
rhoden = -problem.M.inner(x, fgradx, eta) ...
-.5*problem.M.inner(x, eta, Heta);
% Heuristic -- added Dec. 2, 2013 (NB) to replace the former heuristic.
% This heuristic is documented in the book by Conn Gould and Toint on
% trust-region methods, section 17.4.2.
% rhonum measures the difference between two numbers. Close to
% convergence, these two numbers are very close to each other, so
% that computing their difference is numerically challenging: there may
% be a significant loss in accuracy. Since the acceptance or rejection
% of the step is conditioned on the ratio between rhonum and rhoden,
% large errors in rhonum result in a large error in rho, hence in
% erratic acceptance / rejection. Meanwhile, close to convergence,
% steps are usually trustworthy and we should transition to a Newton-
% like method, with rho=1 consistently. The heuristic thus shifts both
% rhonum and rhoden by a small amount such that far from convergence,
% the shift is irrelevant and close to convergence, the ratio rho goes
% to 1, effectively promoting acceptance of the step.
% The rationale is that close to convergence, both rhonum and rhoden
% are quadratic in the distance between x and x_prop. Thus, when this
% distance is on the order of sqrt(eps), the value of rhonum and rhoden
% is on the order of eps, which is indistinguishable from the numerical
% error, resulting in badly estimated rho's.
% For abs(fx) < 1, this heuristic is invariant under offsets of f but
% not under scaling of f. For abs(fx) > 1, the opposite holds. This
% should not alarm us, as this heuristic only triggers at the very last
% iterations if very fine convergence is demanded.
rho_reg = max(1, abs(fx)) * eps * options.rho_regularization;
rhonum = rhonum + rho_reg;
rhoden = rhoden + rho_reg;
if options.debug > 0
fprintf('DBG: rhonum : %e\n', rhonum);
fprintf('DBG: rhoden : %e\n', rhoden);
end
% This is always true if a linear, symmetric operator is used for the
% Hessian (approximation) and if we had infinite numerical precision.
% In practice, nonlinear approximations of the Hessian such as the
% built-in finite difference approximation and finite numerical
% accuracy can cause the model to increase. In such scenarios, we
% decide to force a rejection of the step and a reduction of the
% trust-region radius. We test the sign of the regularized rhoden since
% the regularization is supposed to capture the accuracy to which
% rhoden is computed: if rhoden were negative before regularization but
% not after, that should not be (and is not) detected as a failure.
model_decreased = (rhoden >= 0);
if ~model_decreased
srstr = [srstr ', model did not decrease']; %#ok<AGROW>
end
rho = rhonum / rhoden;
if options.debug > 0
m = @(x, eta) ...
getCost(problem, x, storedb) + ...
getDirectionalDerivative(problem, x, eta, storedb) + ...
.5*problem.M.inner(x, getHessian(problem, x, eta, storedb), eta);
zerovec = problem.M.zerovec(x);
actrho = (fx - fx_prop) / (m(x, zerovec) - m(x, eta));
fprintf('DBG: new f(x) : %e\n', fx_prop);
fprintf('DBG: actual rho : %e\n', actrho);
fprintf('DBG: used rho : %e\n', rho);
end
% Choose the new TR radius based on the model performance
trstr = ' ';
% If the actual decrease is smaller than 1/4 of the predicted decrease,
% then reduce the TR radius.
if rho < 1/4 || ~model_decreased
trstr = 'TR-';
Delta = Delta/4;
% If the actual decrease is at least 3/4 of the precicted decrease and
% the tCG (inner solve) hit the TR boundary, increase the TR radius.
elseif rho > 3/4 && (stop_inner == 1 || stop_inner == 2)
trstr = 'TR+';
Delta = min(2*Delta, options.Delta_bar);
end
% Otherwise, keep the TR radius constant.
% Choose to accept or reject the proposed step based on the model
% performance.
if model_decreased && rho > options.rho_prime
accept = true;
accstr = 'acc';
x = x_prop;
fx = fx_prop;
[fgradx storedb] = getGradient(problem, x, storedb);
norm_grad = problem.M.norm(x, fgradx);
else
accept = false;
accstr = 'REJ';
end
% Make sure we don't use too much memory for the store database
storedb = purgeStoredb(storedb, options.storedepth);
% k is the number of iterations we have accomplished.
k = k + 1;
% Log statistics for freshly executed iteration.
% Everything after this in the loop is not accounted for in the timing.
stats = savestats(problem, x, storedb, options, k, fx, norm_grad, ...
Delta, info, rho, rhonum, rhoden, accept, numit, ...
norm_eta, used_cauchy);
info(k+1) = stats; %#ok<AGROW>
% ** Display:
if options.verbosity == 2,
fprintf(['%3s %3s k: %5d num_inner: %5d ', ...
'f: %e |grad|: %e %s\n'], ...
accstr,trstr,k,numit,fx,norm_grad,srstr);
elseif options.verbosity > 2,
if options.useRand && used_cauchy,
fprintf('USED CAUCHY POINT\n');
end
fprintf('%3s %3s k: %5d num_inner: %5d %s\n', ...
accstr, trstr, k, numit, srstr);
fprintf(' f(x) : %e |grad| : %e\n',fx,norm_grad);
if options.debug > 0
fprintf(' Delta : %f |eta| : %e\n',Delta,norm_eta);
end
fprintf(' rho : %e\n',rho);
end
if options.debug > 0,
fprintf('DBG: cos ang(eta,gradf): %d\n',testangle);
if rho == 0
fprintf('DBG: rho = 0, this will likely hinder further convergence.\n');
end
end
end % of TR loop (counter: k)
% Restrict info struct-array to useful part
info = info(1:k+1);
if (options.verbosity > 2) || (options.debug > 0),
fprintf('************************************************************************\n');
end
if (options.verbosity > 0) || (options.debug > 0)
fprintf('Total time is %f [s] (excludes statsfun)\n', info(end).time);
end
% Return the best cost reached
cost = fx;
end
% Routine in charge of collecting the current iteration stats
function stats = savestats(problem, x, storedb, options, k, fx, ...
norm_grad, Delta, info, rho, rhonum, ...
rhoden, accept, numit, norm_eta, used_cauchy)
stats.iter = k;
stats.cost = fx;
stats.gradnorm = norm_grad;
stats.Delta = Delta;
if k == 0
stats.time = toc();
stats.rho = inf;
stats.rhonum = NaN;
stats.rhoden = NaN;
stats.accepted = true;
stats.numinner = NaN;
stats.stepsize = NaN;
if options.useRand
stats.cauchy = false;
end
else
stats.time = info(k).time + toc();
stats.rho = rho;
stats.rhonum = rhonum;
stats.rhoden = rhoden;
stats.accepted = accept;
stats.numinner = numit;
stats.stepsize = norm_eta;
if options.useRand,
stats.cauchy = used_cauchy;
end
end
% See comment about statsfun above: the x and store passed to statsfun
% are that of the most recently accepted point after the iteration
% fully executed.
stats = applyStatsfun(problem, x, storedb, options, stats);
end
|
github
|
skovnats/madmm-master
|
calcVoronoiRegsCircCent.m
|
.m
|
madmm-master/compressed_modes/LB/calcVoronoiRegsCircCent.m
| 2,497 |
utf_8
|
b33c6683c5fafa8ead79d9436c30477f
|
function [VorRegsVertices] = calcVoronoiRegsCircCent(Tri, Vertices)
%% Preps.:
A1 = Vertices(Tri(:,1), :);
A2 = Vertices(Tri(:,2), :);
A3 = Vertices(Tri(:,3), :);
a = A1 - A2; % Nx3
b = A3 - A2; % Nx3
c = A1 - A3; % Nx3
M1 = 1/2*(A2 + A3); % Nx3
M2 = 1/2*(A1 + A3); % Nx3
M3 = 1/2*(A2 + A1); % Nx3
N = size(A1, 1);
%% Circumcenters calculation
O = zeros(size(A1));
obtuseAngMat = [(dot(a, b, 2) < 0), (dot(-b, c, 2) < 0), (dot(-c, -a, 2) < 0)];
obtuseAngInds = any(obtuseAngMat, 2);
O(obtuseAngInds, :) = ...
M1(obtuseAngInds, :).*(obtuseAngMat(obtuseAngInds, 1)*[1 1 1]) + ...
M2(obtuseAngInds, :).*(obtuseAngMat(obtuseAngInds, 2)*[1 1 1]) + ...
M3(obtuseAngInds, :).*(obtuseAngMat(obtuseAngInds, 3)*[1 1 1]);
OM3 = -repmat(dot(c, a, 2), 1, 3).*b + repmat(dot(b, a, 2), 1, 3).*c;
OM1 = -repmat(dot(c, b, 2), 1, 3).*a + repmat(dot(a, b, 2), 1, 3).*c;
M1M3 = M1 - M3;
tmp = M3 + OM3.*repmat(dot(cross(M1M3, OM1, 2), cross(OM3, OM1, 2), 2), 1, 3)./...
repmat(dot(cross(OM3, OM1, 2), cross(OM3, OM1, 2), 2), 1, 3);
O(not(obtuseAngInds), :) = tmp(not(obtuseAngInds), :);
%% Voronoi Regions calculation (for each vertex in each triangle.
VorRegs = zeros(N, 3);
% For all the triangles do (though the calculation is correct for
% non-obtuse triangles only:
VorRegs(:,1) = calcArea(A1, M3, O) + calcArea(A1, M2, O);
VorRegs(:,2) = calcArea(A2, M1, O) + calcArea(A2, M3, O);
VorRegs(:,3) = calcArea(A3, M2, O) + calcArea(A3, M1, O);
% % For obtuse triangles:
% TriA = calcArea(A1, A2, A3);
% VorRegs(obtuseAngInds, :) = (1/4*ones(sum(obtuseAngInds), 3) + ...
% 1/4*obtuseAngMat(obtuseAngInds, :)).*repmat(TriA(obtuseAngInds), [1 3]);
%% Voronoi Regions per Vertex
M = size(Vertices, 1);
% VorRegsVertices = zeros(M, 1);
VorRegsVertices = sparse(M, M);
for k = 1:M
% VorRegsVertices(k) = sum(VorRegs(Tri == k));
% as I understand - at diagonal areas of Voronois cells
VorRegsVertices(k, k) = sum(VorRegs(Tri == k));
%% UPD 12.08.2012 by Artiom
VorRegsVertices(k, k) = max( VorRegsVertices(k, k), 1e-7 );
end
end
%% --------------------------------------------------------------------- %%
function [area_tri] = calcArea(A, B, C)
% Calculate areas of triangles
% Calculate area of each triangle
% area_tri = cross(B - A, C - A, 2);
% area_tri = 1/2*sqrt(sum(area_tri.^2, 2));
area_tri = 1/2*sqrt(sum((B - A).^2, 2).*sum((C - A).^2, 2) - dot(B - A, C - A, 2).^2);
end
|
github
|
skovnats/madmm-master
|
gencols.m
|
.m
|
madmm-master/compressed_modes/LB/gencols.m
| 5,738 |
utf_8
|
497e10b44a80cff59db8f7c18b5a9608
|
function colors = gencols(n_colors,bg,func)
% DISTINGUISHABLE_COLORS: pick colors that are maximally perceptually distinct
%
% When plotting a set of lines, you may want to distinguish them by color.
% By default, Matlab chooses a small set of colors and cycles among them,
% and so if you have more than a few lines there will be confusion about
% which line is which. To fix this problem, one would want to be able to
% pick a much larger set of distinct colors, where the number of colors
% equals or exceeds the number of lines you want to plot. Because our
% ability to distinguish among colors has limits, one should choose these
% colors to be "maximally perceptually distinguishable."
%
% This function generates a set of colors which are distinguishable
% by reference to the "Lab" color space, which more closely matches
% human color perception than RGB. Given an initial large list of possible
% colors, it iteratively chooses the entry in the list that is farthest (in
% Lab space) from all previously-chosen entries. While this "greedy"
% algorithm does not yield a global maximum, it is simple and efficient.
% Moreover, the sequence of colors is consistent no matter how many you
% request, which facilitates the users' ability to learn the color order
% and avoids major changes in the appearance of plots when adding or
% removing lines.
%
% Syntax:
% colors = distinguishable_colors(n_colors)
% Specify the number of colors you want as a scalar, n_colors. This will
% generate an n_colors-by-3 matrix, each row representing an RGB
% color triple. If you don't precisely know how many you will need in
% advance, there is no harm (other than execution time) in specifying
% slightly more than you think you will need.
%
% colors = distinguishable_colors(n_colors,bg)
% This syntax allows you to specify the background color, to make sure that
% your colors are also distinguishable from the background. Default value
% is white. bg may be specified as an RGB triple or as one of the standard
% "ColorSpec" strings. You can even specify multiple colors:
% bg = {'w','k'}
% or
% bg = [1 1 1; 0 0 0]
% will only produce colors that are distinguishable from both white and
% black.
%
% colors = distinguishable_colors(n_colors,bg,rgb2labfunc)
% By default, distinguishable_colors uses the image processing toolbox's
% color conversion functions makecform and applycform. Alternatively, you
% can supply your own color conversion function.
%
% Example:
% c = distinguishable_colors(25);
% figure
% image(reshape(c,[1 size(c)]))
%
% Example using the file exchange's 'colorspace':
% func = @(x) colorspace('RGB->Lab',x);
% c = distinguishable_colors(25,'w',func);
% Copyright 2010-2011 by Timothy E. Holy
% Parse the inputs
if (nargin < 2)
bg = [1 1 1]; % default white background
else
if iscell(bg)
% User specified a list of colors as a cell aray
bgc = bg;
for i = 1:length(bgc)
bgc{i} = parsecolor(bgc{i});
end
bg = cat(1,bgc{:});
else
% User specified a numeric array of colors (n-by-3)
bg = parsecolor(bg);
end
end
% Generate a sizable number of RGB triples. This represents our space of
% possible choices. By starting in RGB space, we ensure that all of the
% colors can be generated by the monitor.
n_grid = 30; % number of grid divisions along each axis in RGB space
x = linspace(0,1,n_grid);
[R,G,B] = ndgrid(x,x,x);
rgb = [R(:) G(:) B(:)];
if (n_colors > size(rgb,1)/3)
error('You can''t readily distinguish that many colors');
end
% Convert to Lab color space, which more closely represents human
% perception
if (nargin > 2)
lab = func(rgb);
bglab = func(bg);
else
C = makecform('srgb2lab');
lab = applycform(rgb,C);
bglab = applycform(bg,C);
end
% If the user specified multiple background colors, compute distances
% from the candidate colors to the background colors
mindist2 = inf(size(rgb,1),1);
for i = 1:size(bglab,1)-1
dX = bsxfun(@minus,lab,bglab(i,:)); % displacement all colors from bg
dist2 = sum(dX.^2,2); % square distance
mindist2 = min(dist2,mindist2); % dist2 to closest previously-chosen color
end
% Iteratively pick the color that maximizes the distance to the nearest
% already-picked color
colors = zeros(n_colors,3);
lastlab = bglab(end,:); % initialize by making the "previous" color equal to background
for i = 1:n_colors
dX = bsxfun(@minus,lab,lastlab); % displacement of last from all colors on list
dist2 = sum(dX.^2,2); % square distance
mindist2 = min(dist2,mindist2); % dist2 to closest previously-chosen color
[~,index] = max(mindist2); % find the entry farthest from all previously-chosen colors
colors(i,:) = rgb(index,:); % save for output
lastlab = lab(index,:); % prepare for next iteration
end
end
function c = parsecolor(s)
if ischar(s)
c = colorstr2rgb(s);
elseif isnumeric(s) && size(s,2) == 3
c = s;
else
error('MATLAB:InvalidColorSpec','Color specification cannot be parsed.');
end
end
function c = colorstr2rgb(c)
% Convert a color string to an RGB value.
% This is cribbed from Matlab's whitebg function.
% Why don't they make this a stand-alone function?
rgbspec = [1 0 0;0 1 0;0 0 1;1 1 1;0 1 1;1 0 1;1 1 0;0 0 0];
cspec = 'rgbwcmyk';
k = find(cspec==c(1));
if isempty(k)
error('MATLAB:InvalidColorString','Unknown color string.');
end
if k~=3 || length(c)==1,
c = rgbspec(k,:);
elseif length(c)>2,
if strcmpi(c(1:3),'bla')
c = [0 0 0];
elseif strcmpi(c(1:3),'blu')
c = [0 0 1];
else
error('MATLAB:UnknownColorString', 'Unknown color string.');
end
end
end
|
github
|
skovnats/madmm-master
|
calcLB.m
|
.m
|
madmm-master/compressed_modes/LB/calcLB.m
| 4,269 |
utf_8
|
5d1e4c81097a7b2a73eac18edb6af2d1
|
function [M, DiagS] = calcLB(shape)
% The L-B operator matrix is computed by DiagS^-1*M.
% Calculate the weights matrix M
M = calcCotMatrixM1([shape.X, shape.Y, shape.Z], shape.TRIV);
M = -M;
% Calculate the diagonal of matrix S
DiagS = calcVoronoiRegsCircCent(shape.TRIV, [shape.X, shape.Y, shape.Z]);
%%
DiagS = abs( DiagS );
%%
end
% ----------------------------------------------------------------------- %
function [M] = calcCotMatrixM1(Vertices, Tri)
N = size(Vertices, 1);
M = sparse(N, N);
v1 = Vertices(Tri(:, 2), :) - Vertices(Tri(:, 1), :); %v1 = v1./repmat(normVec(v1), 1, 3);
v2 = Vertices(Tri(:, 3), :) - Vertices(Tri(:, 1), :); %v2 = v2./repmat(normVec(v2), 1, 3);
v3 = Vertices(Tri(:, 3), :) - Vertices(Tri(:, 2), :); %v3 = v3./repmat(normVec(v3), 1, 3);
% cot1 = dot( v1, v2, 2)./normVec(cross( v1, v2, 2)); %cot1(cot1 < 0) = 0;
% cot2 = dot(-v1, v3, 2)./normVec(cross(-v1, v3, 2)); %cot2(cot2 < 0) = 0;
% cot3 = dot(-v2, -v3, 2)./normVec(cross(-v2, -v3, 2)); %cot3(cot3 < 0) = 0;
tmp1 = dot( v1, v2, 2); cot1 = tmp1./sqrt(normVec(v1).^2.*normVec(v2).^2 - (tmp1).^2); clear tmp1;
tmp2 = dot(-v1, v3, 2); cot2 = tmp2./sqrt(normVec(v1).^2.*normVec(v3).^2 - (tmp2).^2); clear tmp2;
tmp3 = dot(-v2, -v3, 2); cot3 = tmp3./sqrt(normVec(v2).^2.*normVec(v3).^2 - (tmp3).^2); clear tmp3;
for k = 1:size(Tri, 1)
M(Tri(k, 1), Tri(k, 2)) = M(Tri(k, 1), Tri(k, 2)) + cot3(k);
M(Tri(k, 1), Tri(k, 3)) = M(Tri(k, 1), Tri(k, 3)) + cot2(k);
M(Tri(k, 2), Tri(k, 3)) = M(Tri(k, 2), Tri(k, 3)) + cot1(k);
end
M = 0.5*(M + M'); % here she does the normalization (comment - Artiom)
% inds = sub2ind([N, N], [Tri(:, 2); Tri(:, 1); Tri(:, 1)], [Tri(:, 3); Tri(:, 3); Tri(:, 2)]);
% M(inds) = M(inds) + [cot1; cot2; cot3];
% inds = sub2ind([N, N], [Tri(:, 3); Tri(:, 3); Tri(:, 2)], [Tri(:, 2); Tri(:, 1); Tri(:, 1)]);
% M(inds) = M(inds) + [cot1; cot2; cot3];
% M = 0.5*(M + M');
% % M(M < 0) = 0;
M = M - diag(sum(M, 2)); % making it Laplacian
function normV = normVec(vec)
normV = sqrt(sum(vec.^2, 2));
end
% function normalV = normalizeVec(vec)
% normalV = vec./repmat(normVec(vec), 1, 3);
% end
end
% ----------------------------------------------------------------------- %
function [M] = calcCotMatrixM(Vertices, Tri) %#ok<DEFNU>
N = size(Vertices, 1);
[transmat] = calcTransmat(N, Tri);
% Calculate the matrix M, when {M}_ij = (cot(alpha_ij) + cot(beta_ij))/2
% [transrow, transcol] = find(triu(transmat,1) > 0);
[transrow, transcol] = find((triu(transmat,1) > 0) | (triu(transmat',1) > 0));
M = sparse(N, N);
for k = 1:length(transrow)
P = transrow(k);
Q = transcol(k);
S = transmat(P,Q);
R = transmat(Q,P);
%%
% u1 = Vertices(Q, :) - Vertices(R, :); u1 = u1./norm(u1);
% v1 = Vertices(P, :) - Vertices(R, :); v1 = v1./norm(v1);
% u2 = Vertices(P, :) - Vertices(S, :); u2 = u2./norm(u2);
% v2 = Vertices(Q, :) - Vertices(S, :); v2 = v2./norm(v2);
% M(P,Q) = -1/2*(dot(u1, v1)/norm(cross(u1, v1)) + dot(u2, v2)/norm(cross(u2, v2)));
tmp1 = 0;
tmp2 = 0;
if (R ~= 0)
u1 = Vertices(Q, :) - Vertices(R, :); u1 = u1./norm(u1);
v1 = Vertices(P, :) - Vertices(R, :); v1 = v1./norm(v1);
tmp1 = dot(u1, v1)/norm(cross(u1, v1));
end
if (S ~= 0)
u2 = Vertices(P, :) - Vertices(S, :); u2 = u2./norm(u2);
v2 = Vertices(Q, :) - Vertices(S, :); v2 = v2./norm(v2);
tmp2 = dot(u2, v2)/norm(cross(u2, v2));
end
M(P,Q) = -1/2*(tmp1 + tmp2);
%%
end
M = 0.5*(M + M');
M = M - diag(sum(M, 2));
end
% ----------------------------------------------------------------------- %
function [transmat] = calcTransmat(N, Tri)
% Calculation of the map of all the connected vertices: for each i,j,
% transmat(i,j) equals to the third vertex of the triangle which connectes
% them; if the vertices aren't connected - transmat(i,j) = 0.
transmat = sparse(N, N);
transmat(sub2ind(size(transmat), Tri(:,1), Tri(:,2))) = Tri(:,3);
transmat(sub2ind(size(transmat), Tri(:,2), Tri(:,3))) = Tri(:,1);
transmat(sub2ind(size(transmat), Tri(:,3), Tri(:,1))) = Tri(:,2);
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gCovMat.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gCovMat.m
| 2,683 |
utf_8
|
c21141d4e605a0a8905593b499eea2cb
|
% function Scov = gCovMat(dist,beta,lamz,lams)
% given n x p matrix x of spatial coords, and dependence parameters
% beta p x 1, this function returns a matrix built from the
% correlation function
% Scov_ij = exp{- sum_k=1:p beta(k)*(x(i,k)-x(j,k))^2 } ./lamz
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function Scov = gCovMat(dist,beta,lamz,lams)
% check for case of a null dataset
%if isempty(dist.d); Scov=[]; return; end
switch dist.type
case 1
n=dist.n;
Scov=zeros(n);
if n>0 % if it's not a null dataset, do the distances
% Scov(dist.indm)=exp(-(dist.d*beta))./lamz; %%% this is faster:
t=exp(-(dist.d*beta))./lamz;
Scov(dist.indm)=t;
Scov=Scov+Scov';
diagInds = 1:(n+1):(n*n);
Scov(diagInds)=1/lamz;
if exist('lams','var')
Scov(diagInds)=Scov(diagInds) + 1/lams;
end
end
case 2
n=dist.n; m=dist.m;
Scov=zeros(n,m);
if n*m >0 % if it's not a null dataset, do the distances
%Scov(dist.indm)=exp(-(dist.d*beta))./lamz; %%% this is faster:
t=exp(-(dist.d*beta))./lamz;
Scov(dist.indm)=t;
end
otherwise
error('invalid distance matrix type in gaspcov');
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
qEst.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/qEst.m
| 11,985 |
utf_8
|
b02a0efa556c1750551ac034a08a8883
|
function th=qEst(pout,pvec,thProb, densFun, varargin)
% function th=subRegionSamp(pout,pvec,thProb, densFun, varargin)
% collect response MLpost, into sets H, M, L, based on the
% vl and vh estimates. Estimate response from M set, given integrated
% density from L and H.
% Operations are generally defined on the native scale, requiring the
% simData.orig.xmin and simData.orig.xrange structures to be in place
% Arguments:
% pvec - candidates for random draws of parameter sets
% thProb - threshold probability
% densFun - density function handle, to get probability of draw from X.
% The density function computes a density of each row vector in the
% matrix it is called with. It operates on the native scale space.
% Optional arguments:
% poolSamp - the number of samples in the vl estimation pool; def. 1e6
% drawNewPoints - default 1; if false load previously saved vl est. pool
% savePool - default 0; if true, saved vl est. pool to 'estPoolDraws'
% numDraws - the number of threshold draws to perform; default 10
% rlzSamp - the sample size of realizations; default 500
% intMin, intMax - the boundaries of integration. Default is [0,1] in
% each dimension. Scale is native space.
% GPest - default false; true indicates that the samples from the M set
% should be modeled by a GP and oversampled from the mean
% GPestSamp - number of samples from M to generate per realization
% (default 1e5)
% doPlot, doPlot2 - default false, perform diagnostic plots (see code)
% saveFile - name of samples datafile, default 'estPoolDraws'
numDraws=10;
rlzSamp=500;
poolSamp=1e6;
numVars=pout.model.p;
cubeMin=zeros(1,numVars);
cubeMax=ones(1,numVars);
drawNewPoints=1;
savePool=0;
GPest=0; GPestSamp=1e5;
doPlot1=0; doPlot2=0;
saveFile='estPoolDraws';
k1=2;
k2=3;
parseAssignVarargs({'numDraws','poolSamp','rlzSamp','cubeMin','cubeMax', ...
'drawNewPoints','savePool','GPest','GPestSamp',...
'doPlot1','doPlot2','saveFile','k1','k2'});
pvals=pout.pvals(pvec);
% set up the sampling region [offset range]
sampCube.range=cubeMax(:)'-cubeMin(:)';
sampCube.offset=cubeMin(:)';
% mapping from the sampling cube to the scaled x prediction area
% d starts in [0,1], projects to sampCube in native scale, then back to
% the prediction scaling.
% native = d*scrange+scoffset
% scaled = (native-pcoffset)/pcrange
% = (d*scrange+scoffset - pcoffset)/pcrange
% = d*scrange/pcrange + (scoffset-pcoffset)/pcrange
predMap.range=sampCube.range ./ pout.simData.orig.xrange;
predMap.offset=(sampCube.offset - pout.simData.orig.xmin) ...
./ pout.simData.orig.xrange;
% find the vl and vh estimates regions
if drawNewPoints
% get new points, and put them in the specified interval in scaled
% space.
d=rand(poolSamp,numVars) .* ...
repmat(predMap.range,poolSamp,1) + ...
repmat(predMap.offset,poolSamp,1);
% prob function is in native scale
p=densFun(d.*repmat(pout.simData.orig.xrange,poolSamp,1) + ...
repmat(pout.simData.orig.xmin,poolSamp,1) );
% pred is, of course, in scaled space
[r rs]=predictPointwise(pout,d,'mode','MAP');
% Normalize probability - outmoded
% p=p*prod(pout.simData.orig.xrange.*sampCube.range);
[v sri ]=calcRespThresh(p,r,thProb);
[vl sril]=calcRespThresh(p,r-k1*rs,thProb);
[vh srih]=calcRespThresh(p,r+k1*rs,thProb);
fprintf('MAP thresh=%f\n',v);
% break into 3 sets.
LFlag= (r+k2*rs)<vl;
UFlag= (r-k2*rs)>vh;
Ld=d(LFlag,:); Ud=d(UFlag,:); Md=d(~(LFlag|UFlag),:);
Lr=r(LFlag); Ur=r(UFlag); Mr=r(~(LFlag|UFlag));
Lp=p(LFlag); Up=p(UFlag); Mp=p(~(LFlag|UFlag));
Lprob=sum(Lp)/sum(p);
Uprob=sum(Up)/sum(p);
if savePool
fprintf('Saving new ML mean draws\n');
save(saveFile,'d','p','r','rs','v','sri','vl','sril','vh','srih',...
'thProb','poolSamp','LFlag','UFlag','Ld','Ud','Md',...
'Lr','Ur','Mr','Lp','Up','Mp','Lprob','Uprob') ;
end
else
fprintf('Loading saved ML mean draws\n');
load(saveFile);
fprintf('Loaded %d draws and threshold %f calculated sets \n',poolSamp,thProb);
end
fprintf('p(r<vl=%6.3f): %d elements with %f prob\n',vl,sum(LFlag),Lprob);
fprintf('p(r>vh=%6.3f): %d elements with %f prob\n',vh,sum(UFlag),Uprob);
if doPlot1
clf;
plot(cumsum(p(sri))/sum(p),r(sri), 'b'); hold on;
plot(cumsum(p(sril))/sum(p),r(sril)-k1*rs(sril), 'r');
plot(cumsum(p(srih))/sum(p),r(srih)+k1*rs(srih), 'r');
a=axis;
plot (thProb* [1 1], a ([3 4]),'k:') ;
plot([0 1],vl*[1 1 ],'k:') ;
plot([0 1],vh*[1 1 ],'k:');
plot([0 1], v*[1 1 ],'k:') ;
end
if doPlot2
clf;
% Bill changed plotxy(Ld(ilinspace(1,end,10000),:),'.'); (3 calls)
plotxy(Ld(round(linspace(1,end,10000)),:),'.');
hold on;
plotxy(Md(round(linspace(1,end,1000)),:),'g.');
plotxy(Ud(round(linspace(1,end,2000)),:),'r.')
end
% Purge M set of the draws that are irrelevant due to no prob. mass
% we could mess up in the case of uniform distributions, throwing away
% everything by mistake, so make sure we have a lot of values before
% starting this
if length(unique(Mp))/length(Mp) > 0.1
Ms=sort(Mp,1,'descend');
pThresh = Ms(find(cumsum(Ms)/sum(Ms) > 0.9999,1));
Md=Md(Mp>pThresh,:);
Mr=Mr(Mp>pThresh);
Mp=Mp(Mp>pThresh);
end
% sample threshold with draws from the M-relevant set
%counter('stime',l,numDraws,10,0);
if size(Md,1)<rlzSamp;
error('\n not enough samples for realization sample size \n');
end
if numDraws; th(numDraws).th=[]; end
for ii=1:numDraws
%counter(ii);
tic;
samp=gSample(size(Md,1),rlzSamp);
pred=gPredict(Md(samp, :),pvals(ceil(rand*end)), ...
pout.model,pout.data,'addResidVar',1,'returnMuSigma',1);
if GPest
% set up a new model that includes the new predictions in M
% (which was a realization in M)
fprintf('Drawing from GP mean ...')
pouts=pout;
pouts.model.m = pouts.model.m+rlzSamp;
pouts.model.w =[pouts.model.w;pred.Myhat'];
pouts.data.w=pouts.model.w;
pouts.data.zt=[pouts.data.zt; Md(samp,:)];
pouts.model.ztDist=genDist(pouts.data.zt);
% predict a whole lot of points, and keep whatever fraction is in
% M -- should it loop until a minimum number are found?
Mresp=[]; Mprob=[]; Mdes=[];
while length(Mresp)<GPestSamp
GPd=rand(GPestSamp,numVars) .* ...
repmat(predMap.range,GPestSamp,1) + ...
repmat(predMap.offset,GPestSamp,1);
[GPr GPrs]=predictPointwise(pouts,GPd,'toNative',0);
GPLFlag= (GPr+3*GPrs)<vl;
GPUFlag= (GPr-3*GPrs)>vh;
Mresp=[Mresp; GPr(~(GPLFlag|GPUFlag))];
GPMd=GPd(~(GPLFlag|GPUFlag),:);
GPMdNum=size(GPMd,1);
Mprob=[Mprob;
densFun(GPMd.*repmat(pout.simData.orig.xrange,GPMdNum,1) + ...
repmat(pout.simData.orig.xmin,GPMdNum,1) ) ];
Mdes=[Mdes;GPMd];
end
probs=[Lprob; (1-Lprob-Uprob)* Mprob/sum(Mprob); Uprob];
yhat=Mresp*pout.simData.orig.ysd + pout.simData.orig.ymean;
hats=[-Inf; yhat; Inf];
else
ypr=squeeze(pred.w);
yhat=ypr*pout.simData.orig.ysd + pout.simData.orig.ymean;
probs=[Lprob; (1-Lprob-Uprob)* Mp(samp)/sum(Mp(samp)); Uprob];
hats=[-Inf; yhat; Inf];
end
th(ii).th=calcRespThresh(probs,hats,thProb);
th(ii).probs=probs;
th(ii).yhat=hats;
if GPest
th(ii).Mdes=Mdes;
end
fprintf('Draw %2d threshold %f; took %4.1fs\n',ii,th(ii).th,toc);
end
%counter ( 'end' )
end
function [th sri]=calcRespThresh(p,r,thProb)
% do the magic
[sr sri]=sort(r);
syp=p(sri);
sypc=cumsum(syp)/sum(syp);
pthi=find( (sypc-thProb)>0 , 1);
% response cutoff between sypc(pthi-1) and sypc(pthi)
% linear interp
th=interp1(sypc([pthi-1 pthi]),sr([pthi-1 pthi]),thProb,'linear');
end
function [r rs]=predictPointwise(pout,xp,varargin)
mode='MAP';
pvec=1:length(pout.pvals);
toNative=1;
verbose=0;
parseAssignVarargs({'mode','pvec','toNative','verbose'});
switch(mode)
case 'MAP'
% get the most likely model
pvals=pout.pvals;
lp=[pvals.logPost]; [lpm lpi]=max(lp);
pvalM=pvals(lpi);
case 'Mean'
% get the mean model
vars={'betaU','lamUz','lamWs','lamWOs'};
for ii=1:length(vars)
pvalM.(vars{ii})=mean([pout.pvals(pvec).(vars{ii})],2);
end
end
% get the response from that model
chunk=50;
numSamp=size(xp,1);
if (numSamp/chunk)~=round(numSamp/chunk)
error('samples not a multiple of chunk (=%d)\n',chunk);
end
xp(xp<0)=0; xp(xp>1)=1;
r=zeros(numSamp,1);
rs=r;
SigDataInv=computeSigDataInv(pout,pvalM);
if verbose; fprintf('predictModML: predicting\n'); end
if verbose; counter('stime',1,numSamp,10,6); end
for ii=1:numSamp/chunk;
if verbose; counter(ii*chunk); end
x=xp((ii-1)*chunk +1:ii*chunk, :);
pred=gPredLocal(x,pout,pvalM,SigDataInv);
yhs=squeeze(pred.Myhat);
shs=sqrt(diag(pred.Syhat));
if toNative
r((ii-1)*chunk +1:ii*chunk)=...
yhs*pout.simData.orig.ysd+pout.simData.orig.ymean;
rs((ii-1)*chunk +1:ii*chunk)=shs*pout.simData.orig.ysd;
else
r((ii-1)*chunk +1:ii*chunk)=yhs;
rs((ii-1)*chunk +1:ii*chunk)=shs;
end
end
if verbose; counter ( 'end' ) ; end
end
function SigDataInv=computeSigDataInv(pout,pvals)
model=pout.model;
m=model.m;p=model.p;q=model.q;pu=model.pu;
betaU=reshape(pvals.betaU,p+q,pu);
lamUz=pvals.lamUz;
lamWs=pvals.lamWs; lamWOs=pvals.lamWOs;
diags1=diagInds(m*pu);
SigData=zeros(m*pu);
for jj=1:pu
bStart=(jj-1)*m+1; bEnd=bStart+m-1;
SigData(bStart:bEnd,bStart:bEnd)=...
gCovMat(model.ztDist,betaU(:,jj),lamUz(jj));
end
SigData(diags1)=SigData(diags1)+ ...
kron(1./(model.LamSim*lamWOs)',ones(1,m)) + ...
kron(1./(lamWs)',ones(1,m)) ;
SigDataInv=inv(SigData);
end
function pred=gPredLocal(xpred,pout,pvals,SigDataInv)
data=pout.data; model=pout.model;
m=model.m;p=model.p;q=model.q;pu=model.pu;
npred=size(xpred,1);
diags2=diagInds(npred*pu);
betaU=reshape(pvals.betaU,p+q,pu);
lamUz=pvals.lamUz;
lamWs=pvals.lamWs; lamWOs=pvals.lamWOs;
xpredDist=genDist (xpred) ;
zxpredDist=genDist2(data.zt,xpred);
SigPred=zeros(npred*pu);
for jj=1:pu
bStart=(jj-1)*npred+1; bEnd=bStart+npred-1;
SigPred(bStart:bEnd,bStart:bEnd)= ...
gCovMat(xpredDist,betaU(:,jj),lamUz(jj));
end
SigPred(diags2)=SigPred(diags2)+ ...
kron(1./(model.LamSim*lamWOs)',ones(1,npred)) + ... % resid var
kron(1./(lamWs)',ones(1,npred)) ;
SigCross=zeros(m*pu,npred*pu);
for jj=1:pu
bStartI=(jj-1)*m+1; bEndI=bStartI+m-1;
bStartJ=(jj-1) *npred+1; bEndJ=bStartJ+npred-1;
SigCross(bStartI:bEndI,bStartJ:bEndJ)=...
gCovMat(zxpredDist,betaU(:,jj),lamUz(jj));
end
% Get the stats for the prediction stuff.
W=(SigCross')*SigDataInv;
pred.Myhat=W*(data.w(:));
pred.Syhat=SigPred-W*SigCross;
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gBoxPlot.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gBoxPlot.m
| 3,489 |
utf_8
|
c6c0b1747e1e919db397f161884e099c
|
% function gBoxPlot(x,varargin)
% substitute for stats toolbox boxplot function, by Gatt
% shows a boxplot-like summary for each column of x
% lines of the box are at the lower quartile, median, and upper quartile
% whiskers extend to the most extreme values with 1.5 times the
% inter-quartile range,
% extreme values outside that are plotted as 'x'
% the only option currently implemented is 'labels' as cell array, i.e.:
% gBoxPlot(rand(10,2),'labels',{'varname1','varname2'});
% May also request no outlier labels, with 'noOutliers' optional argument=1
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function gBoxPlot(x,varargin)
labels=[];
noOutliers=0;
parseAssignVarargs({'labels','noOutliers'});
if exist('boxplot')==2 && ~noOutliers; % use the builtin if available
if ~isempty(labels)
boxplot(x,'labels',labels);
else
boxplot(x);
end
return
end
cla; hold on;
if min(size(x))==1; x=x(:); end
[m n]=size(x);
boxsize=[-1 1] * (0.05 + 0.2*(1-exp(-(n-1))));
for jj=1:size(x,2);
col=x(:,jj);
qs=gQuantile(col,[0.25 0.5 0.75]);
plot(jj+boxsize,[1 1]*qs(1));
plot(jj+boxsize,[1 1]*qs(2),'r');
plot(jj+boxsize,[1 1]*qs(3));
plot([1 1]*boxsize(1)+jj,qs([1 3]));
plot([1 1]*boxsize(2)+jj,qs([1 3]));
% establish whisker low and high limits
xlr=(qs(1)-1.5*(qs(3)-qs(1)));
xlh=(qs(3)+1.5*(qs(3)-qs(1)));
%get the whisker levels & plot (nearest within limit)
wlow=min(col(col>xlr));
whig=max(col(col<xlh));
plot(0.5*boxsize+jj,[1 1]*wlow,'k');
plot([1 1]*jj,[wlow qs(1)],'k--');
plot(0.5*boxsize+jj,[1 1]*whig,'k');
plot([1 1]*jj,[whig qs(3)],'k--');
if ~noOutliers
%plot the remaining extremes
xlow=col(col<xlr);
xhig=col(col>xlh);
for ii=[xlow; xhig]';
plot(jj,ii,'r+');
end
end
end
xmin=min(x(:)); xmax=max(x(:)); xrange=xmax-xmin;
a=axis; axis([0.5 jj+0.5 xmin-0.05*xrange xmax+0.05*xrange]);
set(gca,'xtick',1:n);
if ~isempty(labels)
set(gca,'xticklabel',labels);
else
xlabel('Column number');
end
ylabel('Values');
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gLogBetaPrior.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gLogBetaPrior.m
| 1,867 |
utf_8
|
78659b95a74e58bce6b3dc05d90f580a
|
%function model = gLogBetaPrior(x,parms)
%
% Computes unscaled log beta pdf,
% sum of 1D distributions for each (x,parms) in the input vectors
% for use in prior likelihood calculation
% parms = [a-parameter-vector b-parameter-vector]
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function p = gLogBetaPrior(x,parms)
a=parms(:,1); b=parms(:,2);
x=x(:);
p=sum( (a-1).*log(x) + (b-1).*log(1-x) );
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gPlotMatrix.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gPlotMatrix.m
| 9,865 |
utf_8
|
05634a43bd2c67952be44a7ae7aa6ea5
|
function [h bigAx]=gPlotMatrix(data,varargin)
% function [h bigAx]=gPlotMatrix(data,varargin)
% data - contains vectors for scatterplots
% each row is an vector, as expected for plotmatrix
% varargs include
% 'Pcontours' are the percentile levels for the contour plot
% 'ngrid' is axis grid size (symmetric) (a good guess is 25, default=10)
% 'labels', a cell array of variable names [optional]
% 'ttl', an overall plot title [optional]
% 'axRange', a 2-vector of axis scalings, default [0,1] or data range
% 'ksd', the sd of the contour smoothing kernel (default=0.05)
% 'Pcontours', probability contours, default [0.5 0.9]
% 'ustyle', 'lstyle' is the type of the off-diagonal plots
% 'scatter' is xy scatterplots [default]
% 'imcont' is a smoothed image (2d est. pdf) with contours
% 'shade' causes the scatterplots to to shade from blue to red over
% the input sequence of points
% 'marksize' is the MarkerSize argument to plot for scatterplots
% 'XTickDes' and 'YTickDes', if specified, are double cell arrays, containing
% pairs of designators. Designator {[0.5 0.75], {'1','blue'}} puts the
% labels '1' and 'blue' at 0.5 and 0.75 on the pane, resp. The
% outer cell array is length the number of axes.
% 'oneCellOnly' indicates that only one cell will be picked out, the cell
% designated, e.g., [1 2]
% 'plotPoints' is a matrix of points to over-plot on scatterplots of images,
% it has the same variables as the matrix being plotted
% 'plotPointsDes' is a plot designator for plotpoints, it's a cell array,
% for example {'r*'} or {'r*','markersize',10}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
[n p]=size(data);
% defaults for varargs
labels=[];
if any((data(:)<0)|any(data(:)>1));
axRange=[min(data)' max(data)'];
else
axRange=repmat([0 1],p,1);
end
ksd=0.05; ngrid=10; Pcontours=[0.5;0.9];
ustyle='scatter'; lstyle='scatter'; shade=0; ttl=[]; marksize=6;
XTickDes=[]; YTickDes=[]; oneCellOnly=0;
plotPoints=[]; plotPointsDes={'r*'};
ldata=[];
parseAssignVarargs({'labels','axRange','ngrid','ksd','Pcontours', ...
'ustyle','lstyle','shade','ttl','marksize', ...
'XTickDes','YTickDes','oneCellOnly', ...
'plotPoints','plotPointsDes','ldata'});
histldata=1;
if isempty(ldata); ldata=data; histldata=0; end
Pcontours=Pcontours(:);
ncont=length(Pcontours);
% if shading is enabled, set up the shade structs.
if shade
shgroups=min(n,100); % need to set up groups if n is large
sls=linspace(1,n,shgroups+1)'; slc=linspace(0,1,shgroups);
for shi=1:shgroups; % define a range and a color for each group
sh(shi).ix=ceil(sls(shi)):floor(sls(shi+1));
sh(shi).color=(1-slc(shi))*[0 0 1] + slc(shi)*[1 0 0];
end
else
shgroups=1; sh.ix=1:n; sh.color=[0 0 1];
end
% Put the data into the specified range
% (scale data to [0 1], where the axes will be set below)
data=(data-repmat(axRange(:,1)',n,1)) ./ ...
repmat((axRange(:,2)-axRange(:,1))',n,1);
if ~isempty(plotPoints)
ppn=size(plotPoints,1);
plotPoints=(plotPoints - repmat(axRange(:,1)',ppn,1)) ./ ...
repmat((axRange(:,2)-axRange(:,1))',ppn,1);
end
% Generate a grid and supporting data structures
gridvals = linspace(0,1,ngrid);
[g1 g2] = meshgrid(gridvals,gridvals);
g1v = g1(:); g2v = g2(:);
gvlen = length(g1v);
dens = zeros(gvlen,1);
% begin
clf;
% establish the subplots
for ii=1:p; for jj=1:p;
h(ii,jj)=gPackSubplot(p,p,ii,jj);
end; end
% Put in the histograms
for ii=1:p
axes(h(ii,ii));
if ~histldata % single hist on diag
hist(data(:,ii));
else % two datasets; overlay kernel smooths
for kk=1:length(gridvals)
hdens(kk)=sum(calcNormpdf(data(:,ii),gridvals(kk),ksd));
end
plot(gridvals,hdens);
hold on;
for kk=1:length(gridvals)
hdens(kk)=sum(calcNormpdf(ldata(:,ii),gridvals(kk),ksd));
end
plot(gridvals,hdens,'r');
end
%axisNorm(h(ii,ii),'x',[0 1]);
end
% Go through the 2D plots
for ii=1:p-1; for jj=ii+1:p
% compute the smooth and contours, if it's called for either triangle
if any(strcmp({ustyle,lstyle},'imcont'))
% compute the smooth response
for i=1:gvlen
f = calcNormpdf(data(:,jj),g1v(i),ksd) ...
.*calcNormpdf(data(:,ii),g2v(i),ksd);
dens(i) = sum(f);
end
% normalize dens
dens = dens/sum(dens);
% get the contours
for j=1:ncont
hlevels(j) = fzero(@(x) getp(x,dens)-Pcontours(j),[0 max(dens)]);
end
% precompute for data in lower triangle
% compute the smooth response
for i=1:gvlen
f = calcNormpdf(ldata(:,jj),g1v(i),ksd) ...
.*calcNormpdf(ldata(:,ii),g2v(i),ksd);
ldens(i) = sum(f);
end
% normalize dens
ldens = ldens/sum(ldens);
% get the contours
for j=1:ncont
lhlevels(j) = fzero(@(x) getp(x,dens)-Pcontours(j),[0 max(dens)]);
end
end
% Do the upper triangle plots
axes(h(ii,jj));
switch ustyle
case 'scatter'
for shi=1:shgroups
plot(data(sh(shi).ix,jj),data(sh(shi).ix,ii),'.', ...
'MarkerSize',marksize,'Color',sh(shi).color);
hold on;
end
case 'imcont'
imagesc(g1v,g2v,reshape(dens,ngrid,ngrid)); axis xy; hold on;
colormap(repmat([.9:-.02:.3]',[1 3]));
contour(g1,g2,reshape(dens,ngrid,ngrid),hlevels,'LineWidth',1.0,'Color','b');
otherwise
error('bad specification for lstyle');
end
if ~isempty(plotPoints)
plot(plotPoints(:,jj),plotPoints(:,ii),plotPointsDes{:});
end
axis([0 1 0 1]);
% Do the lower triangle plots
axes(h(jj,ii));
switch lstyle
case 'scatter'
for shi=1:shgroups
plot(ldata(sh(shi).ix,ii),ldata(sh(shi).ix,jj),'.', ...
'MarkerSize',marksize,'Color',sh(shi).color);
hold on;
end
hold on;
case 'imcont'
imagesc(g1v,g2v,reshape(ldens,ngrid,ngrid)'); axis xy; hold on;
colormap(repmat([.9:-.02:.3]',[1 3]));
contour(g1,g2,reshape(ldens,ngrid,ngrid)',lhlevels,'LineWidth',1.0,'Color','b');
otherwise
error('bad specification for lstyle');
end
if ~isempty(plotPoints)
plot(plotPoints(:,ii),plotPoints(:,jj),plotPointsDes{:});
end
axis([0 1 0 1]);
end; end
% Ticks and Tick labels, by default they're not there
set(h,'XTick',[],'YTick',[]);
% but put them on if specified.
if ~isempty(XTickDes)
for ii=1:size(h,2)
set(h(end,ii),'XTick',XTickDes{ii}{1});
set(h(end,ii),'XTickLabel',XTickDes{ii}{2});
end
end
if ~isempty(YTickDes)
for ii=1:size(h,1)
set(h(ii,1),'YTick',YTickDes{ii}{1});
set(h(ii,1),'YTickLabel',YTickDes{ii}{2});
end
end
% labels
if ~isempty(labels)
for ii=1:p
%title(h(1,ii),labels{ii});
ylabel(h(ii,1),labels{ii});
xlabel(h(end,ii),labels{ii});
end
end
% if a title was supplied, put it up relative to an invisible axes
if ~isempty(ttl)
bigAx=axes('position',[0.1 0.1 0.8 0.8],'visible','off'); hold on;
text(0.5,1.05,ttl,'horizontalalignment','center','fontsize',14);
end
if oneCellOnly
set(h(oneCellOnly(1),oneCellOnly(2)), ...
'position',[0.075 0.075 0.85 0.85]);
end
end
% function to get probability of a given level h
function pout = getp(h,d);
iabove = (d >= h);
pout = sum(d(iabove));
end
function y=calcNormpdf(x,m,s)
%calculate a multivariate normal pdf value
n=size(s);
nf=1./( sqrt(2*pi) .* s );
up=exp(-0.5* ((x-m)./s).^2);
y=nf.*up;
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
setupModel.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/setupModel.m
| 14,699 |
utf_8
|
91a9639a208d5021b3a83b639c968ea8
|
% function params=setupModel(obsData,simData,optParms)
% Sets up a gpmsa runnable struct from raw data.
% Please refer to associated documentation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function params=setupModel(obsData,simData,optParms,varargin)
verbose=1;
parseAssignVarargs({'verbose'});
% a shortcut version
if isfield(obsData,'obsData') && isfield(obsData,'simData') && ...
~exist('optParms')
simData=obsData.simData; obsData=obsData.obsData;
end
% params is optional
if ~exist('optParms'); optParms=[]; end
% check for scalar output
if isfield(optParms,'scalarOutput'); scOut=optParms.scalarOutput;
else scOut=0; end
model.scOut=scOut;
% grab some parms to local (short named) vars
n=length(obsData);
if n==0; % eta-only model
% dummy up some empty fields
obsData(1).x=[];
obsData(1).Dobs=[];
obsData(1).yStd=[];
% all vars are x, otherwise some are x some are theta
if iscell(simData.x)
% separable design
p=0;
for ii=1:length(simData.x); p=p+size(simData.x{ii},2); end
else
p=size(simData.x,2);
end
q=0;
else
p=length(obsData(1).x);
if iscell(simData.x)
% separable design
q=-p;
for ii=1:length(simData.x); q=q+size(simData.x{ii},2); end
else
q=size(simData.x,2)-p;
end
end
m=size(simData.yStd,2);
pu=size(simData.Ksim,2);
pv=size(obsData(1).Dobs,2);
if verbose
fprintf('SetupModel: Determined data sizes as follows: \n')
if n==0
fprintf('SetupModel: This is a simulator (eta) -only model\n');
fprintf('SetupModel: m=%3d (number of simulated data)\n',m);
fprintf('SetupModel: p=%3d (number of inputs)\n',p);
fprintf('SetupModel: pu=%3d (transformed response dimension)\n',pu);
else
fprintf('SetupModel: n=%3d (number of observed data)\n',n);
fprintf('SetupModel: m=%3d (number of simulated data)\n',m);
fprintf('SetupModel: p=%3d (number of parameters known for observations)\n',p);
fprintf('SetupModel: q=%3d (number of additional simulation inputs (to calibrate))\n',q);
fprintf('SetupModel: pu=%3d (response dimension (transformed))\n',pu);
fprintf('SetupModel: pv=%3d (discrepancy dimension (transformed))\n',pv);
end
if iscell(simData.x)
fprintf('SetupModel: Kronecker separable design specified\n');
end
fprintf('\n');
end
% check for and process lamVzGroups
if isfield(optParms,'lamVzGroup'); lamVzGroup=optParms.lamVzGroup;
else lamVzGroup=ones(pv,1); end
lamVzGnum=length(unique(lamVzGroup));
if ~isempty(setxor(unique(lamVzGroup),1:lamVzGnum))
error('invalid lamVzGroup specification in setupModel');
end
% put in a Sigy param if not supplied (backward compatability)
if ~isfield(obsData,'Sigy')
for k=1:n; obsData(k).Sigy=eye(size(obsData(k).Kobs,1)); end
end
% make a local copy of Lamy for use in this routine (do inv() only once)
for k=1:n; obs(k).Lamy=inv(obsData(k).Sigy); end
% Construct the transformed obs
if scOut
data.x=[]; data.u=[];
for k=1:n;
data.x(k,:)=obsData(k).x;
data.u(k)=obsData(k).yStd;
end;
else
% ridge to be used for stabilization
DKridge=eye(pu+pv)*1e-6;
data.x=[]; data.v=[]; data.u=[];
for k=1:n;
if (p>0); data.x(k,:)=obsData(k).x; end
% Transform the obs data
DK=[obsData(k).Dobs obsData(k).Kobs];
vu=inv( DK'*obs(k).Lamy*DK + DKridge )* ...
DK'*obs(k).Lamy*obsData(k).yStd;
data.v(k,:)=vu(1:pv);
data.u(k,:)=vu(pv+1:end)';
end;
end
if iscell(simData.x) % add a composed zt to the struct
data.ztSep=simData.x;
tdes=simData.x{end}; if size(tdes,1)==1; tdes=tdes'; end
for ii=length(simData.x)-1:-1:1
ndes=simData.x{ii}; if size(ndes,1)==1; ndes=ndes'; end
[r1 r2]=meshgrid(1:size(simData.x{ii},1),1:size(tdes,1));
tdes=[ndes(r1(:),:) tdes(r2(:),:)];
end
data.zt=tdes;
else
data.zt=simData.x;
data.ztSep=[];
end
data.w=(simData.Ksim\simData.yStd)'; % Construct the transformed sim
% Set initial parameter values
model.theta=0.5*ones(1,q); % Estimated calibration variable
model.betaV=ones(p,lamVzGnum)*0.1; % Spatial dependence for V discrep
model.lamVz=ones(lamVzGnum,1)*20; % Marginal discrepancy precision
model.betaU=ones(p+q,pu)*0.1; % Sim PC surface spatial dependence
model.lamUz=ones(pu,1)*1; % Marginal precision
model.lamWs=ones(pu,1)*1000; % Simulator data precision
% Set up partial results to be stored and passed around;
% Sizes, for reference:
model.n=n; model.m=m; model.p=p; model.q=q;
model.pu=pu; model.pv=pv;
model.lamVzGnum=lamVzGnum; model.lamVzGroup=lamVzGroup;
% Precomputable data forms and covariograms.
model.x0Dist=genDist(data.x);
model.ztDist=genDist(data.zt);
if iscell(data.ztSep) % then compute components of separable design
for ii=1:length(data.ztSep)
model.ztSepDist{ii}=genDist(data.ztSep{ii});
end
end
model.w=data.w(:);
if scOut
model.uw=[data.u(:);data.w(:)];
model.u=data.u(:);
else
model.vuw=[data.v(:);data.u(:);data.w(:)];
model.vu=[data.v(:);data.u(:)];
end
% compute the PC loadings corrections
model.LamSim=diag(simData.Ksim'*simData.Ksim);
% initialize the acceptance record field
model.acc=1;
% compute LamObs, the u/v spatial correlation
if scOut
LO = zeros(n*pu);
for kk=1:n
ivals = (1:pu)+(kk-1)*pu;
LO(ivals,ivals) = obs(kk).Lamy;
end
rankLO = rank(LO);
else
LO = zeros(n*(pv+pu));
for kk=1:n
DK = [obsData(kk).Dobs obsData(kk).Kobs];
ivals = (1:pv+pu)+(kk-1)*(pv+pu);
LO(ivals,ivals) = DK'*obs(kk).Lamy*DK;
end
rankLO = rank(LO);
for kk=1:n
ivals = (1:pv+pu)+(kk-1)*(pv+pu);
LO(ivals,ivals) = LO(ivals,ivals) + DKridge;
end
% now reindex LamObs so that it has the v's first and the
% u's 2nd. LamObs is n*(pu+pv) in size and indexed in
% the order v1 u1 v2 u2 ... vn un. We want to arrange the
% order to be v1 v2 ... vn u1 u2 ... un.
inew = [];
for kk=1:pv
inew = [inew; (kk:(pu+pv):n*(pu+pv))'];
end
for kk=1:pu
inew = [inew; ((pv+kk):(pu+pv):n*(pu+pv))'];
end
LO = LO(inew,inew);
end
% compute the Penrose inverse of LO
model.SigObs=inv(LO)+1e-8*eye(size(LO,1));
% Set prior distribution types and parameters
priors.lamVz.fname ='gLogGammaPrior';
priors.lamVz.params=repmat([1 0.0010],lamVzGnum,1);
priors.lamUz.fname ='gLogGammaPrior';
priors.lamUz.params=repmat([5 5],pu,1);
priors.lamWOs.fname='gLogGammaPrior';
priors.lamWOs.params=[5 0.005];
priors.lamWs.fname ='gLogGammaPrior';
priors.lamWs.params=repmat([3 0.003],pu,1);
priors.lamOs.fname ='gLogGammaPrior';
priors.lamOs.params=[1 0.001];
priors.rhoU.fname ='gLogBetaPrior';
priors.rhoU.params=repmat([1 0.1],pu*(p+q),1);
priors.rhoV.fname ='gLogBetaPrior';
priors.rhoV.params=repmat([1 0.1],p*lamVzGnum);
priors.theta.fname ='gLogNormalPrior';
priors.theta.params=repmat([0.5 10],q,1);
% Modification of lamOs and lamWOs prior distributions
if isfield(optParms,'priors')
if isfield(optParms.priors,'lamWOs')
priors.lamWOs.params=optParms.priors.lamWOs.params;
end
if isfield(optParms.priors,'lamOs')
priors.lamOs.params=optParms.priors.lamOs.params;
end
end
% Prior correction for lamOs and lamWOs prior values (due to D,K basis xform)
%for lamOs, need DK basis correction
totElements=0;
for ii=1:length(obsData);
totElements=totElements+length(obsData(ii).yStd);
end
aCorr=0.5*(totElements-rankLO);
bCorr=0;
if ~scOut
for ii=1:n
DKii = [obsData(ii).Dobs obsData(ii).Kobs];
vuii = [data.v(ii,:)'; data.u(ii,:)'];
resid=obsData(ii).yStd(:) - DKii*vuii;
bCorr=bCorr+0.5*sum(resid'*obs(ii).Lamy*resid);
end
end
priors.lamOs.params(:,1)=priors.lamOs.params(:,1)+aCorr;
priors.lamOs.params(:,2)=priors.lamOs.params(:,2)+bCorr;
%for lamWOs, need K basis correction
aCorr=0.5*(size(simData.yStd,1)-pu)*m;
ysimStdHat = simData.Ksim*data.w';
bCorr=0.5*sum(sum((simData.yStd-ysimStdHat).^2));
priors.lamWOs.params(:,1)=priors.lamWOs.params(:,1)+aCorr;
priors.lamWOs.params(:,2)=priors.lamWOs.params(:,2)+bCorr;
% Set the initial values of lamOs and lamWOs based on the priors.
model.lamWOs=max(100,priors.lamWOs.params(:,1)/priors.lamWOs.params(:,2));
model.lamOs=max(20, priors.lamOs.params(:,1)/priors.lamOs.params(:,2));
% Set prior bounds
priors.lamVz.bLower=0; priors.lamVz.bUpper=Inf;
priors.lamUz.bLower=0.3; priors.lamUz.bUpper=Inf;
priors.lamWs.bLower=60; priors.lamWs.bUpper=1e5;
priors.lamWOs.bLower=60; priors.lamWOs.bUpper=1e5;
priors.lamOs.bLower=0; priors.lamOs.bUpper=Inf;
priors.betaU.bLower=0; priors.betaU.bUpper=Inf;
priors.betaV.bLower=0; priors.betaV.bUpper=Inf;
priors.theta.bLower=0; priors.theta.bUpper=1;
% if thetaConstraintFunction supplied, use that, otherwise
% use a dummy constraint function
if isfield(optParms,'thetaConstraints')
priors.theta.constraints=optParms.thetaConstraints;
% update with the supplied initial theta
model.theta=optParms.thetaInit;
%ii=0;
%while (ii<1e6) && ~tryConstraints(priors.theta.constraints,model.theta)
% model.theta=rand(size(model.theta));
% ii=ii+1;
%end
%if ii==1e6; error('unable to draw theta within constraints'); end
else
priors.theta.constraints={};
end
function constraintsOK=tryConstraints(constraints,theta)
constraintsOK=1;
for const=constraints
constraintsOK=constraintsOK & eval(const{1});
end
end
% Set mcmc step interval values
mcmc.thetawidth=0.2 * ones(1,numel(model.theta));
mcmc.rhoUwidth=0.1* ones(1,numel(model.betaU));
mcmc.rhoVwidth=0.1* ones(1,numel(model.betaV));
mcmc.lamVzwidth=10* ones(1,numel(model.lamVz));
mcmc.lamUzwidth=5* ones(1,numel(model.lamUz));
mcmc.lamWswidth=100* ones(1,numel(model.lamWs));
mcmc.lamWOswidth=100* ones(1,numel(model.lamWOs));
mcmc.lamOswidth=model.lamOs/2* ones(size(model.lamOs));
% set up control var lists for sampling and logging
% pvars is the list of variables from model struct to log
% svars is the list of variables to sample (and compute prior on)
% svarSize is the length of each svar variable
% wvars is the list of corresponding mcmc width names
if n>0 % if there's obsData, do the full deal.
if pv>0
mcmc.pvars={'theta','betaV','betaU','lamVz','lamUz','lamWs', ...
'lamWOs','lamOs','logLik','logPrior','logPost'};
mcmc.svars={'theta','betaV','betaU','lamVz', ...
'lamUz','lamWs','lamWOs','lamOs'};
mcmc.svarSize=[q % theta
p*lamVzGnum % betaV
pu*(p+q) % betaU
lamVzGnum % lamVz
pu % lamUz
pu % lamWs
1 % lamWOs
1]'; % lamOs
mcmc.wvars={'thetawidth','rhoVwidth','rhoUwidth','lamVzwidth', ...
'lamUzwidth','lamWswidth','lamWOswidth','lamOswidth'};
else %this is a no-discrepancy model with observations
mcmc.pvars={'theta','betaU','lamUz','lamWs', ...
'lamWOs','logLik','logPrior','logPost'};
mcmc.svars={'theta','betaU','lamUz','lamWs','lamWOs'};
mcmc.svarSize=[q %theta
pu*(p+q) % betaU
pu % lamUz
pu % lamWs
1]'; % lamWOs
mcmc.wvars={'thetawidth','rhoUwidth', ...
'lamUzwidth','lamWswidth','lamWOswidth'};
end
else % we're doing just an eta model, so a subset of the params.
mcmc.pvars={'betaU','lamUz','lamWs', ...
'lamWOs','logLik','logPrior','logPost'};
mcmc.svars={'betaU','lamUz','lamWs','lamWOs'};
mcmc.svarSize=[pu*(p+q) % betaU
pu % lamUz
pu % lamWs
1]'; % lamWOs
mcmc.wvars={'rhoUwidth', ...
'lamUzwidth','lamWswidth','lamWOswidth'};
end
% Over and out
params.data=data;
params.model=model;
params.priors=priors;
params.mcmc=mcmc;
params.obsData=obsData;
params.simData =simData;
params.optParms=optParms;
params.pvals=[]; % initialize the struct
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
diagInds.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/diagInds.m
| 1,688 |
utf_8
|
f59b568d366c4b91f80524bae479d267
|
% function inds=createDiagInds(n)
%
% Return the 1-D indices of the diagonal of an nxn matrix
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function inds=diagInds(n)
inds = 1:(n+1):(n*n);
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gPredict.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gPredict.m
| 12,969 |
utf_8
|
0faae2364a770f2bd0a51297aae3cb08
|
%function pred=gPredict(xpred,pvals,model,data,varargs)
% Predict using a gpmsa constructed model.
% result is a 3-dimensional prediction matrix:
% #pvals by model-dims by length-xpred
% model-dims is the simulation basis size for a w-prediction (a model
% with no observation data) or the v (discrepancy basis loadings) and u
% (simulation basis loadings) for uv-prediction
% argument pair keywords are
% mode - 'wpred' or 'uvpred'
% theta - the calibration variable value(s) to use, either one value or
% one for each xpred. Default is the calibrated param in pvals
% addResidVar - default 0, whether to add the residual variability
% returnRealization - default 1, whether to return realizations of the
% process specified. values will be in a .w field for wpred, or
% .v and .u fields for uvpred.
% returnMuSigma - default 0, whether to return the mean and covariance of
% the process specified. results will be in a matrix .Myhat field
% and a cell array .Syhat field.
% returnCC - default 0, return the cross-covariance of the data and
% predictors.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function pred=gPredict(xpred,pvals,model,data,varargin)
if model.n==0
mode='wpred';
else
mode='uvpred';
end
theta=[];
addResidVar=0;
returnRealization=1;
returnMuSigma=0;
returnCC=0;
parseAssignVarargs({'mode','theta','addResidVar', ...
'returnRealization','returnMuSigma', ...
'returnCC'});
switch mode
case 'uvpred'
pred=uvPred(xpred,pvals,model,data,theta,addResidVar, ...
returnRealization,returnMuSigma,returnCC);
case 'wpred'
pred=wPred(xpred,pvals,model,data,theta,addResidVar, ...
returnRealization,returnMuSigma,returnCC);
otherwise
error('invalid mode in gPredict');
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function pred=wPred(xpred,pvals,model,data,thetapred,addResidVar,retRlz,retMS,retC)
n=model.n; m=model.m;p=model.p;q=model.q;pu=model.pu;
npred=size(xpred,1);
diags1=diagInds(m*pu);
diags2=diagInds(npred*pu);
nreal=length(pvals);
tpred=zeros([nreal,npred*pu]);
for ii=1:length(pvals)
if n>0
theta=pvals(ii).theta';
end
betaU=reshape(pvals(ii).betaU,p+q,pu);
lamUz=pvals(ii).lamUz;
lamWs=pvals(ii).lamWs; lamWOs=pvals(ii).lamWOs;
if n>0
if isempty(thetapred)
xpredt=[xpred repmat(theta,npred,1)];
else
xpredt=[xpred thetapred];
end
else
xpredt=xpred;
end
xpredDist=genDist(xpredt);
zxpredDist=genDist2(data.zt,xpredt);
SigW=zeros(m*pu);
for jj=1:pu
bStart=(jj-1)*m+1; bEnd=bStart+m-1;
SigW(bStart:bEnd,bStart:bEnd)=...
gCovMat(model.ztDist,betaU(:,jj),lamUz(jj));
end
SigW(diags1)=SigW(diags1)+ ...
kron(1./(model.LamSim*lamWOs)',ones(1,m)) + ...
kron(1./(lamWs)',ones(1,m)) ;
SigWp=zeros(npred*pu);
for jj=1:pu
bStart=(jj-1)*npred+1; bEnd=bStart+npred-1;
SigWp(bStart:bEnd,bStart:bEnd)= ...
gCovMat(xpredDist,betaU(:,jj),lamUz(jj));
end
SigWp(diags2)=SigWp(diags2)+ ...
kron(1./(lamWs)',ones(1,npred)) ;
if addResidVar
SigWp(diags2)=SigWp(diags2)+ ...
kron(1./(model.LamSim*lamWOs)',ones(1,npred));
end
SigWWp=zeros(m*pu,npred*pu);
for jj=1:pu
bStartI=(jj-1)*m+1; bEndI=bStartI+m-1;
bStartJ=(jj-1)*npred+1; bEndJ=bStartJ+npred-1;
SigWWp(bStartI:bEndI,bStartJ:bEndJ)=...
gCovMat(zxpredDist,betaU(:,jj),lamUz(jj));
end
SigData=SigW;
SigPred=SigWp;
SigCross=SigWWp;
% Get the stats for the prediction stuff.
%W=(SigCross')/SigData;
W=linsolve(SigData,SigCross,struct('SYM',true,'POSDEF',true))';
Myhat=W*(data.w(:));
Syhat=SigPred-W*SigCross;
if retRlz
% And do a realization
tpred(ii,:)=rmultnormsvd(1,Myhat,Syhat')';
end
if retMS
% add the distribution params
pred.Myhat(ii,:)=Myhat;
pred.Syhat{ii}=Syhat;
end
if retC
pred.CC{ii}=SigCross;
end
end
if retRlz
% Reshape the pred matrix to 3D:
% first dim - (number of realizations [pvals])
% second dim - (number of principal components)
% third dim - (number of points [x,theta]s)
pred.w=zeros(length(pvals),pu,npred);
for ii=1:pu
pred.w(:,ii,:)=tpred(:,(ii-1)*npred+1:ii*npred);
end
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function pred=uvPred(xpred,pvals,model,data,thetapred,addResidVar,retRlz,retMS,retC)
n=model.n;m=model.m;p=model.p;q=model.q;pu=model.pu;pv=model.pv;
lamVzGnum=model.lamVzGnum; lamVzGroup=model.lamVzGroup;
npred=size(xpred,1);
diags0=diagInds(n*pu);
diags1=diagInds(m*pu);
diags2=diagInds(npred*pu);
x0Dist=genDist(data.x);
xpred0Dist=genDist(xpred);
xxpred0Dist=genDist2(data.x,xpred);
nreal=length(pvals);
tpred=zeros([nreal,npred*(pv+pu)]);
for ii=1:length(pvals)
theta=pvals(ii).theta';
betaV=reshape(pvals(ii).betaV,p,lamVzGnum);
betaU=reshape(pvals(ii).betaU,p+q,pu);
lamVz=pvals(ii).lamVz; lamUz=pvals(ii).lamUz; lamWOs=pvals(ii).lamWOs;
lamWs=pvals(ii).lamWs; lamOs=pvals(ii).lamOs;
if isempty(thetapred)
xpredt=[xpred repmat(theta,npred,1)];
else
xpredt=[xpred thetapred];
end
xDist=genDist([data.x repmat(theta,n,1)]);
ztDist=genDist(data.zt);
xzDist=genDist2([data.x repmat(theta,n,1)],data.zt);
xpredDist=genDist(xpredt);
xxpredDist=genDist2([data.x repmat(theta,n,1)],xpredt);
zxpredDist=genDist2(data.zt,xpredt);
% Generate the part of the matrix related to the data
% Four parts to compute: Sig_v, Sig_u, Sig_w, and the Sig_uw crossterm
SigV=zeros(n*pv);
for jj=1:lamVzGnum;
vCov(jj).mat=gCovMat(x0Dist, betaV(:,jj), lamVz(jj));
end
for jj=1:pv
bStart=(jj-1)*n+1; bEnd=bStart+n-1;
SigV(bStart:bEnd,bStart:bEnd)=vCov(lamVzGroup(jj)).mat;
end
SigU=zeros(n*pu);
for jj=1:pu
bStart=(jj-1)*n+1; bEnd=bStart+n-1;
SigU(bStart:bEnd,bStart:bEnd)= ...
gCovMat(xDist,betaU(:,jj),lamUz(jj));
end
SigU(diags0)=SigU(diags0)+...
kron(1./(lamWs)',ones(1,n)) ;
SigW=zeros(m*pu);
for jj=1:pu
bStart=(jj-1)*m+1; bEnd=bStart+m-1;
SigW(bStart:bEnd,bStart:bEnd)=...
gCovMat(ztDist,betaU(:,jj),lamUz(jj));
end
SigW(diags1)=SigW(diags1)+ ...
kron(1./(model.LamSim*lamWOs)',ones(1,m)) + ...
kron(1./(lamWs)',ones(1,m)) ;
SigUW=zeros(n*pu,m*pu);
for jj=1:pu
bStartI=(jj-1)*n+1; bEndI=bStartI+n-1;
bStartJ=(jj-1)*m+1; bEndJ=bStartJ+m-1;
SigUW(bStartI:bEndI,bStartJ:bEndJ)=...
gCovMat(xzDist,betaU(:,jj),lamUz(jj));
end
if model.scOut
SigData=[ SigU+SigV SigUW; ...
SigUW' SigW ];
SigData(1:n*pu,1:n*pu) = ...
SigData(1:n*pu,1:n*pu) + model.SigObs*1/lamOs;
else
SigData=[SigV zeros(n*pv,(n+m)*pu); ...
zeros((n+m)*pu,n*pv) [ SigU SigUW; ...
SigUW' SigW ] ];
SigData(1:n*(pv+pu),1:n*(pv+pu)) = ...
SigData(1:n*(pv+pu),1:n*(pv+pu)) + model.SigObs*1/lamOs;
end
% Generate the part of the matrix related to the predictors
% Parts to compute: Sig_vpred, Sig_upred
SigVp=zeros(npred*pv);
for jj=1:lamVzGnum;
vpCov(jj).mat=gCovMat(xpred0Dist, betaV(:,jj), lamVz(jj));
end
for jj=1:pv
bStart=(jj-1)*npred+1; bEnd=bStart+npred-1;
SigVp(bStart:bEnd,bStart:bEnd)=vpCov(lamVzGroup(jj)).mat;
end
%SigVp(diagInds(npred*pv))=SigVp(diagInds(npred*pv))+1;
SigUp=zeros(npred*pu);
for jj=1:pu
bStart=(jj-1)*npred+1; bEnd=bStart+npred-1;
SigUp(bStart:bEnd,bStart:bEnd)= ...
gCovMat(xpredDist,betaU(:,jj),lamUz(jj));
end
SigUp(diags2)=SigUp(diags2)+...
kron(1./(lamWs)',ones(1,npred)) ;
if addResidVar
SigUp(diags2)=SigUp(diags2)+ ...
kron(1./(model.LamSim*lamWOs)',ones(1,npred)) ;
end
SigPred=[SigVp zeros(npred*pv,npred*pu); ...
zeros(npred*pu,npred*pv) SigUp ];
% Now the cross-terms.
SigVVx=zeros(n*pv,npred*pv);
for jj=1:lamVzGnum;
vvCov(jj).mat=gCovMat(xxpred0Dist, betaV(:,jj), lamVz(jj));
end
for jj=1:pv
bStartI=(jj-1)*n+1; bEndI=bStartI+n-1;
bStartJ=(jj-1)*npred+1; bEndJ=bStartJ+npred-1;
SigVVx(bStartI:bEndI,bStartJ:bEndJ)=vvCov(lamVzGroup(jj)).mat;
end
SigUUx=zeros(n*pu,npred*pu);
for jj=1:pu
bStartI=(jj-1)*n+1; bEndI=bStartI+n-1;
bStartJ=(jj-1)*npred+1; bEndJ=bStartJ+npred-1;
SigUUx(bStartI:bEndI,bStartJ:bEndJ)=...
gCovMat(xxpredDist,betaU(:,jj),lamUz(jj));
end
SigWUx=zeros(m*pu,npred*pu);
for jj=1:pu
bStartI=(jj-1)*m+1; bEndI=bStartI+m-1;
bStartJ=(jj-1)*npred+1; bEndJ=bStartJ+npred-1;
SigWUx(bStartI:bEndI,bStartJ:bEndJ)=...
gCovMat(zxpredDist,betaU(:,jj),lamUz(jj));
end
if model.scOut
SigCross=[SigVVx SigUUx; ...
zeros(m*pu,npred*pv) SigWUx];
else
SigCross=[SigVVx zeros(n*pv,npred*pu); ...
zeros(n*pu,npred*pv) SigUUx; ...
zeros(m*pu,npred*pv) SigWUx];
end
if 0
figure(3)
subplot(2,2,1); imagesc(gScale(SigData,'sqrt'))
subplot(2,2,2); imagesc(gScale(SigCross,'sqrt'))
subplot(2,2,3); imagesc(gScale(SigCross','sqrt'))
subplot(2,2,4); imagesc(gScale(SigPred,'sqrt'))
keyboard
end
% Get the stats for the prediction stuff.
%W=(SigCross')/SigData;
W=linsolve(SigData,SigCross,struct('SYM',true,'POSDEF',true))';
if model.scOut, Myhat=W*model.uw; else Myhat=W*model.vuw; end
Syhat=SigPred-W*SigCross;
if retRlz
% And do a realization
tpred(ii,:)=rmultnormsvd(1,Myhat,Syhat')';
end
if retMS
% log the distribution params
pred.Myhat(ii,:)=Myhat;
pred.Syhat{ii}=Syhat;
end
if retC
pred.CC{ii}=SigCross;
end
end
if retRlz
% Reshape the pred matrix to 3D, for each component:
% first dim - (number of realizations [pvals])
% second dim - (number of principal components)
% third dim - (number of points [x,theta]s)
pred.v=zeros(length(pvals),pv,npred);
pred.u=zeros(length(pvals),pu,npred);
for ii=1:pv
pred.v(:,ii,:)=tpred(:,(ii-1)*npred+1:ii*npred);
end
for ii=1:pu
pred.u(:,ii,:)=tpred(:,pv*npred+((ii-1)*npred+1:ii*npred) );
end
end
end
%
% Helper function rmultnormSVD computes multivariate normal realizations
function rnorm = rmultnormsvd(n,mu,cov)
[U S] = svd(cov);
rnorm = repmat(mu,1,n) + U*sqrt(S) * randn(size(mu,1),n);
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
computeLogPrior.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/computeLogPrior.m
| 2,422 |
utf_8
|
df71fa1621e6def310ce84163c42cc8d
|
%function model = computeLogPrior(priors,mcmc,model)
%
% Builds the prior likelihood
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function model = computeLogPrior(priors,mcmc,model)
f=mcmc.svars;
logprior=0;
lastp=0;
for ii=1:length(f);
curf=f{ii};
switch(curf)
% betaU ad betaV have to be handled with rho/beta transformation
case 'betaU'
rhoU= exp(-model.betaU.*(0.5^2));
rhoU(rhoU>0.999)=0.999;
logprior=logprior + feval(priors.rhoU.fname,rhoU,priors.rhoU.params);
case 'betaV'
rhoV= exp(-model.betaV.*(0.5^2));
rhoV(rhoV>0.999)=0.999;
logprior=logprior + feval(priors.rhoV.fname,rhoV,priors.rhoV.params);
otherwise % it's general case for the others
logprior=logprior + ...
feval(priors.(curf).fname,model.(curf),priors.(curf).params);
end
%fprintf('%10s %f\n',curf,logprior-lastp); lastp=logprior;
%fprintf('%10s %f\n',curf,logprior);
end
model.logPrior=logprior;
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
showPvals.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/showPvals.m
| 2,687 |
utf_8
|
7afb8f157187182f1163280278447048
|
% function showPvals(pvals, skip)
% skip = the beginning index to display; optional
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function showPvals(pvals, skip)
if ~exist('skip'); skip = 1; end;
fprintf('Processing pval struct from index %d to %d\n',skip,length(pvals));
f=fieldnames(pvals); fdel=false(length(f),1);
for ii=1:length(f) if ~isempty(strfind(f{ii},'Acc')); fdel(ii)=1; end; end
f=f(~fdel);
flen=length(f);
x=skip:length(pvals);
pvals=pvals(skip:end);
cla
for ii=1:flen
y=[pvals.(f{ii})];
h(ii)=pvalSubplot(flen,ii);
if length(x)==length(y)
plot(x,y);
else
plot(y);
end
ylabel(f{ii});
fprintf('%10s: mean s.d. \n',f{ii})
for ii=1:size(y,1)
fprintf(' %3d: %12.4g %12.4g \n', ...
ii,mean(y(ii,:)),std(y(ii,:)) );
end
end
set(h(1:end-1),'XTick',[]);
end
% An internal function is needed to get the subplots to use more of the
% available figure space
function h=pvalSubplot(n,i)
sep=0.25;
left =0.1;
sizelr=0.8;
bottom=(1-(i/n))*0.8+0.1;
sizetb=(1/n)*0.8*(1-sep/2);
h=subplot('position',[left bottom sizelr sizetb]);
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
counter.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/counter.m
| 3,295 |
utf_8
|
c028bb3c870f6796e476f67360b9a36c
|
% function counter('start',first_value,last_value,skip_counts,feed)
% function counter('stime',first_value,last_value,skip_seconds,feed)
% Setup mode
% first_value=first value in counter
% last_value=last value in counter (for time computation)
% feed = count of display events for computing time remaining (and linefeed)
% skip_counts = count to skip any printout
% skip_seconds = time delay for progress display
% function counter(index)
% Run Mode
% index = the current loop index
% function counter('end')
% print the final time
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function out=counter(arg1,arg2,arg3,arg4,arg5)
persistent first last feed lval skip mode dcount;
out=0;
if strcmp(arg1,'start')
tic;
mode=0;
first=arg2; last=arg3; skip=arg4; feed=arg5;
lval=first-skip;
dcount=0;
fprintf('Started value counter, vals %d -> %d\n',first,last);
fprintf(' ');
return;
elseif strcmp(arg1,'stime')
tic;
mode=1;
first=arg2; last=arg3;
skip=arg4; feed=arg5;
lval=0;
dcount=0;
fprintf('Started timed counter, vals %d -> %d\n',first,last);
fprintf(' ');
return;
elseif strcmp(arg1,'end')
etime=toc;
if etime>60; fprintf('%dmin:',floor(toc/60)); end;
fprintf('%5.2fsec \n',mod(toc,60));
return;
end
val=arg1;
switch(mode)
case 0
i=val;
case 1
i=toc;
%fprintf('%f..\n',i);
end
if i>(lval+skip-1)
out=1;
lval=i;
fprintf('%d..',val);
dcount=dcount+1;
if (dcount>=feed);
if (last>first)
fprintf('%5.1f min, %5.1f min remain\n ',toc/60,toc/60*(last-val+1)/(val-first));
end
dcount=0;
out=2;
end;
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
genDist2.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/genDist2.m
| 2,533 |
utf_8
|
82783b882e1fc9d1b4096cb0d3db2da6
|
% function d = gendist2(data1,data2,dataDesc);
% generates the nxmxp distance array values and supporting
% information, given the nxp matrix data1 and mxp data2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function d = genDist2(data1,data2,dataDesc)
d.type=2;
[n p1] = size(data1);
[m p2] = size(data2);
p=max(p1,p2);
%generate & store the list of n*m distance indices
inds=n*m;
indi=repmat(1:n,1,m);
indj=repmat(1:m,n,1); indj=indj(:)';
d.n=n; d.m=m; d.p=p;
d.indi=indi; d.indj=indj;
d.indm=indi + n*(indj-1);
if any([p1 p2]==0); d.d=[]; return; end % if either dataset is empty
d.d=(data1(indi,:)-data2(indj,:)).^2;
%if ~exist('dataDesc','var'); cat=any([data1;data2]<0);
% else cat=[dataDesc.typeCategorical];
%end
%cat0=find(~cat); cat1=find(cat);
%
%if isempty(cat1);
% d.d=(data1(indi,:)-data2(indj,:)).^2;
%else
% d.d=zeros(inds,p);
% d.d(:,cat0)=(data1(indi,cat0)-data2(indj,cat0)).^2;
% d.d(:,cat1)=(data1(indi,cat1)~=data2(indj,cat1))*0.5;
%end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gLogGammaPrior.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gLogGammaPrior.m
| 1,859 |
utf_8
|
d710e649ff2ed42a891da063011c5753
|
%function model = gLogGammaPrior(x,parms)
%
% Computes unscaled log normal pdf,
% sum of 1D distributions for each (x,parms) in the input vectors
% for use in prior likelihood calculation
% parms = [a-parameter-vector b-parameter-vector]
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function p = gLogGammaPrior(x,parms)
a=parms(:,1); b=parms(:,2);
x=x(:);
p=sum( (a-1).*log(x) - b.*x );
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
parseAssignVarargs.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/parseAssignVarargs.m
| 2,382 |
utf_8
|
41948e793af8c4b330571f7498b71208
|
% function parseAssignVarargs(validVars)
% assigns specified caller varargs to the corresponding variable name
% in the calling workspace. vars not specified are not assigned.
% validVars is a cell array of strings that represents possible
% arg names, and the variable name in the workspace (identical)
% varargs is the varargin cell array to parse
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function parseAssignVarargs(validVars)
pr=0;
bargs=evalin('caller','varargin');
for ii=1:2:length(bargs)
varID=find(strcmp(validVars,bargs{ii}));
if length(varID)~=1;
error('bad argument detected by parseAssignVarargs');
end
if pr; fprintf('Assigning: %s\n', validVars{varID}); bargs{ii+1}, end
if evalin('caller',['exist(''' validVars{varID} ''');'])
assignin('caller',validVars{varID},bargs{ii+1});
else
error('Variable (argument) %s not initialized\n',validVars{varID});
end
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gAnalyzePCA.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gAnalyzePCA.m
| 2,848 |
utf_8
|
b90e5f67f23f598aa3fbb2d3c866aa60
|
%function a=gAnalyzePCA(y,y1)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [a K]=gAnalyzePCA(y,y1)
[U S V]=svd(y,0);
K = (U*S)/sqrt(size(y,2));
a=cumsum(diag(S).^2); a=a/a(end);
figure(10); clf;
subplot(1,2,1); hold on; plot(a); axis([1 length(a) 0 1])
subplot(1,2,2); hold on; plot(a(1:min(end,10))); axis([1 10 0 1])
% add the mean absolute deviation of the simulators
if exist('y1')
for ii=1:size(K,1);
y1pcv(ii)=sum(abs(y1-K(:,1:ii)*(K(:,1:ii)\y1)));
ypcv(ii)=sum(sum(abs(y-K(:,1:ii)*(K(:,1:ii)\y))));
end
y1pcv=1-y1pcv/sum(abs(y1));
ypcv=1-ypcv/sum(abs(y(:)));
subplot(1,2,1); plot(ypcv,'g'); plot(y1pcv,'r');
legend({'variance explained','sim abs resid explained','obs abs resid explained'},'location','Best');
subplot(1,2,2); plot(ypcv(1:min(end,10)),'g'); plot(y1pcv(1:min(end,10)),'r');
legend({'variance explained','sim abs resid explained','obs abs resid explained'},'location','Best');
title('zoom on first 10 PCs')
end
figure(11); clf
PC=U*S;
for ii=1:5;
h(ii)=subplot(5,1,ii);
plot(PC(:,ii));
title(['PC ' num2str(ii)]);
end
axisNorm(h,'xymax');
figure(12); clf;
for ii=1:10;
h(ii)=subplot(10,1,ii);
K=U(:,1:ii)*S(1:ii,1:ii);
pc=K\y;
yhat=K*pc;
plot(yhat-y);
title(['reconstruction error with ' num2str(ii) ' PC']);
end;
axisNorm(h,'ymax')
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gLogNormalPrior.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gLogNormalPrior.m
| 1,868 |
utf_8
|
34a389d335f9f51af952c938fcfcde8a
|
%function model = gLogNormalPrior(x,parms)
%
% Computes unscaled log normal pdf,
% sum of 1D distributions for each (x,parms) in the input vectors
% for use in prior likelihood calculation
% parms = [mean-vector standard-deviation-vector]
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function p = gLogNormalPrior(x,parms)
mu=parms(:,1); std=parms(:,2);
x=x(:);
p = - .5 * sum( ((x-mu)./std).^2 );
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
axisNorm.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/axisNorm.m
| 3,170 |
utf_8
|
0db8d59bce06058f104cfb06a9cafea6
|
% function axisNorm(handles, mode, axisVals)
% Tool to set 2S plot axes to the same values.
% handles is a list of handles to the plots in question
% mode is combinations of 'x', 'y', and 'z', optionally followed by 'max'
% indicating which axes are to be set, and whether they are to be
% autoscaled to the outer bounds of all, or to the given values
% in axisVals.
% For example
% 'xmax' scales the x axis in all handles to the max bounds;
% 'xyzmax' scales all axes to their max enclosures
% 'xy' scales the x and y axes to values in axisVals
% axisVals, if supplied, has dummy values in unspecified positions
% 'imrange' mode normalizes the range of the images (that is,
% the CLim axis properties)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function axisNorm(h, mode, ax)
%test
h=h(:);
if strcmp(mode,'imrange')
clim=[inf -inf];
for ii=1:length(h);
cl=get(h(ii),'CLim');
clim(1)=min(clim(1),cl(1)); clim(2)=max(clim(2),cl(2));
end
set(h,'CLim',clim);
return
end
maxMode=0; xParm=0; yParm=0; zParm=0;
if regexp(mode,'max');
maxMode=1;
mode=mode(1:regexp(mode,'max')-1);
end
if regexp(mode,'x'); xParm=1; end
if regexp(mode,'y'); yParm=1; end
if regexp(mode,'z'); zParm=1; end
if maxMode % then determine the enclosing axes
axNum=length(axis(h(1)));
axMult=repmat([-1 1],1,axNum/2);
ax=-Inf*ones(1,axNum);
for ii=1:length(h)
ax=max([ax; axis(h(ii)).*axMult]);
end
ax=ax.*axMult;
mode=mode(1:regexp(mode,'max')-1);
end
for ii=1:length(h)
a=axis(h(ii));
if xParm
a([1 2])=ax([1 2]);
end
if yParm
a([3 4])=ax([3 4]);
end
if zParm
a([5 6])=ax([5 6]);
end
axis(h(ii),a);
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gGMICDF.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gGMICDF.m
| 1,058 |
utf_8
|
a8ecc3f457ad1127cf36f1cc5da6f0f5
|
function icVals = gGMICDF(means,vars,cVals)
% function icVals = gGMICDF(means,vars,Cvals)
% compute inverse CDF of a gaussian mixture(s)
% each row of means and vars defines a mixture
% output icVals is (rows of means&vars) by (length of cVals)
icVals=zeros(size(means,1),length(cVals));
sds=sqrt(vars);
for ii=1:size(means,1)
mingrid=min(means(ii,:)-4*sds(ii,:));
maxgrid=max(means(ii,:)+4*sds(ii,:));
grid=linspace(mingrid,maxgrid,1e4);
fullMix=zeros(size(means,2),length(grid));
for jj=1:size(means,2);
fullMix(jj,:)=gNormpdf(grid,means(ii,jj),sds(ii,jj));
end
mm=sum(fullMix);
icVals(ii,:)=empiricalICDF(grid,mm,cVals);
end
end
function icdf=empiricalICDF(grid,updf,cdfVals)
ecdf=cumsum(updf)/sum(updf);
icdf=zeros(size(cdfVals));
for ii=1:length(cdfVals)
cLoc=find(ecdf>cdfVals(ii),1);
if isempty(cLoc)
icdf(ii)=grid(end);
elseif cLoc==1
icdf(ii)=grid(1);
else
icdf(ii)=interp1(ecdf([cLoc-1 cLoc]),grid([cLoc-1,cLoc]),cdfVals(ii));
end
end
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
computeLogLik.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/computeLogLik.m
| 7,999 |
utf_8
|
cab080bb792a424e4b553052e849bdc1
|
% function model = computeLogLik(model,data,C)
%
% Builds the log likelihood of the data given the model parameters.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function model = computeLogLik(model,data,C)
n=model.n; m=model.m;
pu=model.pu; pv=model.pv;
p=model.p; q=model.q;
lamVzGnum=model.lamVzGnum; lamVzGroup=model.lamVzGroup;
% validate and process the changed field
do.theta=0;do.betaV=0;do.lamVz=0;do.betaU=0;do.lamUz=0;
do.lamWs=0;do.lamWOs=0;
if strcmp(C.var,'theta') || strcmp(C.var,'all') % update distances
model.xDist=genDist([data.x repmat(model.theta,n,1)]);
model.xzDist=genDist2([data.x repmat(model.theta,n,1)],data.zt);
end
switch C.var
case 'all'
do.theta=1; do.betaV=1; do.lamVz=1; do.betaU=1;
do.lamUz=1; do.lamWs=1; do.lamWOs=1; do.lamOs=1;
model.SigWl=zeros(pu,1);
case 'theta'; do.theta=1;
case 'betaV'; do.betaV=1;
case 'lamVz'; do.lamVz=1;
case 'betaU'; do.betaU=1;
case 'lamUz'; do.lamUz=1;
case 'lamWs'; do.lamWs=1;
case 'lamWOs'; do.lamWOs=1;
case 'lamOs'; do.lamOs=1;
otherwise
%error('Invalid Subtype in computeLogLik');
end
betaV=model.betaV; lamVz=model.lamVz;
betaU=model.betaU; lamUz=model.lamUz;
lamWs=model.lamWs; lamWOs=model.lamWOs; lamOs=model.lamOs;
% Four parts to compute: Sig_v, Sig_u, Sig_w, and the Sig_uw crossterm
if (do.theta || do.betaV || do.lamVz)
SigV=[];
for jj=1:lamVzGnum
SigV(jj).mat=gCovMat(model.x0Dist, betaV(:,jj), lamVz(jj));
end
model.SigV=SigV;
else
SigV=model.SigV;
end
if (do.theta || do.betaU || do.lamUz || do.lamWs)
SigU(pu).mat=[];
diags1=diagInds(n);
for jj=1:pu
SigU(jj).mat=gCovMat(model.xDist,betaU(:,jj),lamUz(jj));
SigU(jj).mat(diags1)=SigU(jj).mat(diags1)+1/lamWs(jj);
end
model.SigU=SigU;
else
SigU=model.SigU;
end
if (do.betaU || do.lamUz || do.lamWs || do.lamWOs)
diags2=diagInds(m);
switch C.var
case 'all'
jinds=1:pu;
case 'betaU'
jinds=ceil( C.index/(p+q) );
case {'lamUz','lamWs'}
jinds=C.index;
case 'lamWOs'
jinds=1:pu;
end
for jj=jinds
if isempty(data.ztSep)
cg=gCovMat(model.ztDist,betaU(:,jj),lamUz(jj));
cg(diags2)=cg(diags2)+1/(model.LamSim(jj)*lamWOs) + 1/lamWs(jj);
% calculate the SigW likelihood for each block
model.SigWl(jj)=doLogLik(cg,model.w((jj-1)*m+1:jj*m));
% calculate the SigW inverse for each block
model.SigWi(jj).mat=inv(cg);
else
% there is a separable design, so compute these as kron'ed blocks
segVarStart=1;
for ii=1:length(data.ztSep)
segVars=segVarStart:(segVarStart + model.ztSepDist{ii}.p-1);
segVarStart=segVarStart+ model.ztSepDist{ii}.p;
cg{ii}=gCovMat(model.ztSepDist{ii},betaU(segVars,jj),lamUz(jj));
end
cgNugget=1/(model.LamSim(jj)*lamWOs) + 1/lamWs(jj);
[model.SigWl(jj) model.SigWi(jj).mat]= ...
doLogLikSep(cg,cgNugget,model.w((jj-1)*m+1:jj*m));
end
end
end
if (do.theta || do.betaU || do.lamUz)
SigUW(pu).mat=[];
for jj=1:pu
SigUW(jj).mat=gCovMat(model.xzDist,betaU(:,jj),lamUz(jj));
end
model.SigUW=SigUW;
else
SigUW=model.SigUW;
end
% The computation is decomposed into the likelihood of W,
% and the likelihood of VU|W.
% Compute the likelihood of the W part (already done the blocks)
LogLikW=sum(model.SigWl);
% Compute the likelihood of the VU|W
% This requires using the gaussian model estimation stuff.
% It is complicated because of shortcuts allowed by lack of correlation
% between W and V
% do these ops, on the block diagonal blocks:
% W=SigUW*model.SigWi;
% SigUgW=SigU-W*SigUW';
W(pu).mat=[];
SigUgW(pu).mat=[];
for ii=1:pu
W(ii).mat=SigUW(ii).mat*model.SigWi(ii).mat;
SigUgW(ii).mat=SigU(ii).mat-W(ii).mat*SigUW(ii).mat';
end
%for scalar output: SigVUgW=[SigV+SigUgW] ...
% + model.SigObs/lamOs;
%otherwise: SigVUgW=[SigV zeros(n*pv,n*pu); ...
% zeros(n*pu,n*pv) SigUgW ] ...
% + model.SigObs/lamOs;
SigVUgW=model.SigObs/lamOs;
for ii=1:pv
blkRange=(ii-1)*n+1:ii*n;
SigVUgW(blkRange,blkRange)=SigVUgW(blkRange,blkRange)+ ...
SigV(lamVzGroup(ii)).mat;
end
if model.scOut
for ii=1:pu
blkRange=(ii-1)*n+1:ii*n;
SigVUgW(blkRange,blkRange)=SigVUgW(blkRange,blkRange)+SigUgW(ii).mat;
end
else
for ii=1:pu
blkRange=n*pv+[(ii-1)*n+1:ii*n];
SigVUgW(blkRange,blkRange)=SigVUgW(blkRange,blkRange)+SigUgW(ii).mat;
end
end
% do this op: MuVUgW =W*model.w;
MuVUgW=zeros(n*pu,1);
for ii=1:pu
blkRange1=(ii-1)*n+1:ii*n;
blkRange2=(ii-1)*m+1:ii*m;
MuVUgW(blkRange1)=W(ii).mat*model.w(blkRange2);
end
% for scalar output: MuDiff= [u] - [MuVUgW]
% otherwise: MuDiff= [v;u] - [0;MuVUgW]
if model.scOut
MuDiff=model.u; MuDiff=MuDiff-MuVUgW;
else
MuDiff=model.vu; MuDiff(pv*n+1:end)=MuDiff(pv*n+1:end)-MuVUgW;
end
% Now we can get the LL of VU|W
LogLikVUgW=doLogLik(SigVUgW,MuDiff);
% Final Answer, LL(VU) = LL(VU|W) + LL(W)
model.logLik=LogLikVUgW+LogLikW;
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [L chCov]=doLogLik(Sigma,data)
chCov=chol(Sigma);
logDet=sum(log(diag(chCov))); % actually, the log sqrt(det)
p1=(chCov')\data;
L=-logDet-0.5*(p1'*p1);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [L Sinv]=doLogLikSep(Sigma,nugget,data)
% get the eigenDecomp for the blocks
for ii=1:length(Sigma)
[V{ii} D{ii}]=eig(Sigma{ii});
end
% compute the determinant from the eigenvalues and the nugget
dkron=diag(D{end});
for ii=(length(Sigma)-1):-1:1
dkron=kron(diag(D{ii}),dkron);
end
logDet=log(prod(dkron+nugget));
% compute the composite inverse, including the nugget
vkron=V{end};
for ii=(length(Sigma)-1):-1:1
vkron=kron(V{ii},vkron);
end
Sinv=vkron * diag(1./(dkron+nugget)) * vkron';
% compute the log likelihood
L=-0.5*logDet-0.5*data'*Sinv*data;
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gpmmcmc.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gpmmcmc.m
| 21,190 |
utf_8
|
0c98b80fe8dff8fc32c33dc786814458
|
% function [params hierParams] = gpmmcmc(params,nmcmc,varargin)
% params - a parameters struct or array of structs
% nmcmc - number of full draws to perform (overridden for stepInit mode)
% varargs are in string/value pairs
% 'noCounter' - default 0, 1 ==> do not output a counter of iterations
% 'step' - default 0, 1 ==> specified step size mode
% 'initOnly' - only do & return the precomputaton of partial results
% 'noInit' - initialization is not necessary, skip precomputation
% 'clist' - for multiple models, the description of common thetas
% each row is for one linked variable (theta). A row is a
% list of indicators the same length as the number of
% models (the params array). A zero indicates the
% corresponding theta is not in the corresponding model, a
% nonzero entry indicates the index of that theta in that
% model.
% 'hierParams'- parameter structure for hierarchical model linking of
% theta parameters in joint models.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
% Brian Williams, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [params hierParams] = gpmmcmc(params,nmcmc,varargin)
% Grab to local variable
numMods=length(params);
% Process input arguments
noCounter=0; clist=zeros(0,numMods);
step=0; initOnly=0; noInit=0;
hierParams=[];
parseAssignVarargs({'noCounter', 'clist','hierParams', ...
'step','initOnly','noInit'});
% Backwards compatibility
for modi=1:numMods
% for zt as a single field
if ~isfield(params(modi).data,'zt');
params(modi).data.zt=[params(modi).data.z params(modi).data.t];
end
% for separable design field (indicator)
if ~isfield(params(modi).data,'ztSep'); params(modi).data.ztSep=[]; end
end
% if there is a hierarchical model, seed the models' priors.
params=copyHPriors(params,hierParams);
% Initialize the models
if ~noInit
for modi=1:numMods
% computing the likelihood sets up partial results inside the model structure
C.var='all';
params(modi).model=computeLogLik(params(modi).model,params(modi).data,C);
params(modi).model=computeLogPrior(params(modi).priors,params(modi).mcmc,...
params(modi).model);
params(modi).model.logPost=params(modi).model.logPrior+params(modi).model.logLik;
end
end
if initOnly; return; end
% initialize the structure that will record draw info
for modi=1:numMods
if numel(params(modi).pvals);
pvals=params(modi).pvals; poff=length(pvals);
else
for var=params(modi).mcmc.pvars; params(modi).pvals(1).(var{1})=0; end
for var=params(modi).mcmc.svars; params(modi).pvals(1).([var{1} 'Acc'])=0; end;
poff=0;
end
params(modi).pvals(poff+nmcmc)=params(modi).pvals(1);
end
% pull out the minimal subset data structure to pass around
for modi=1:numMods;
subParams(modi).model=params(modi).model;
subParams(modi).data=params(modi).data;
subParams(modi).priors=params(modi).priors;
subParams(modi).mcmc=params(modi).mcmc;
end
% Counter will be used and displayed if we are not in linked model mode
if ~noCounter; counter('stime',1,nmcmc,10,10); end
% Do mcmc draws
for iter=1:nmcmc
if ~noCounter; counter(iter); end
for modi=1:numMods
% Get some local vars picked out
svars=subParams(modi).mcmc.svars; svarSize=subParams(modi).mcmc.svarSize;
wvars=subParams(modi).mcmc.wvars;
for varNum=1:length(svars)
C.var=svars{varNum};
C.aCorr=1; % default is no step correction.
switch(C.var)
case {'theta'}
for k=1:svarSize(varNum)
C.index=k;C.val=subParams(modi).model.(C.var)(k) + ...
(rand(1)-.5)*subParams(modi).mcmc.(wvars{varNum})(k);
subParams=mcmcEval(subParams,modi,C,clist);
acc.(C.var)(k)=subParams(modi).model.acc;
end
case {'betaV','betaU'}
for k=1:svarSize(varNum)
cand = exp(-subParams(modi).model.(svars{varNum})(k).*(.5^2))+ ...
(rand(1)-.5)*subParams(modi).mcmc.(wvars{varNum})(k);
C.index=k;C.val=-log(cand)/(0.5^2);
subParams=mcmcEval(subParams,modi,C,clist);
acc.(C.var)(k)=subParams(modi).model.acc;
end
case {'lamVz','lamUz','lamWs','lamWOs','lamOs'}
for k=1:svarSize(varNum)
if ~step
C.index=k;
[C.val C.aCorr]=chooseVal(subParams(modi).model.(C.var)(k));
subParams(modi).model.acc=0; %might not call eval
if C.aCorr;
subParams=mcmcEval(subParams,modi,C,clist);
end
else
C.index=k;C.val=subParams(modi).model.(C.var)(k) + ...
(rand(1)-.5)*subParams(modi).mcmc.(wvars{varNum})(k);
subParams=mcmcEval(subParams,modi,C,clist);
end
acc.(C.var)(k)=subParams(modi).model.acc;
end
otherwise
error('Unknown sample variable in gpmmcmc mcmcStep')
end
end
% Save the designated fields
for varName=subParams(modi).mcmc.pvars
params(modi).pvals(poff+iter).(varName{1})=...
subParams(modi).model.(varName{1})(:);
end
for varName=subParams(modi).mcmc.svars
params(modi).pvals(poff+iter).([varName{1} 'Acc'])=acc.(varName{1})(:);
end
end % going through the models
% if there is a hierarchical models structure, perform sampling
if ~isempty(hierParams)
[subParams hierParams] = mcmcHier(subParams,hierParams,step,poff+iter);
for hi=1:length(hierParams);
for vi=1:length(hierParams(hi).vars)
modi=hierParams(hi).vars(vi).modNum;
varNum=hierParams(hi).vars(vi).varNum;
if hierParams(hi).pvals(poff+iter).acc(3)
params(modi).pvals(poff+iter).theta(varNum)=...
subParams(modi).model.theta(varNum);
end
end
end
end
end % going through the iterations
if ~noCounter; counter('end'); end % end the counter
% recover the main data structure
for modi=1:numMods;
params(modi).model= subParams(modi).model;
params(modi).data= subParams(modi).data;
params(modi).priors=subParams(modi).priors;
end
% And that's it for the main function ....
end %main function gaspmcmc
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [params hP] = mcmcHier(params,hP,step,logi)
pr=0;
oldPriorStyle=0;
% if there is a hierarchical model on thetas, this is where
% the hyperparameters are sampled and updated
% go through all hierarchical models specified
for hi=1:length(hP)
reject=zeros(7,1);
% First, sample the hyperparameters
% generate a candidate draw and evaluate for mean
cand=hP(hi).model.mean + (rand(1)-.5)*hP(hi).mcmc.meanWidth;
if cand<hP(hi).priors.mean.bLower || cand>hP(hi).priors.mean.bUpper
if pr; fprintf(' Reject, out of bounds 1\n'); end
reject(1)=1;
end
if ~reject(1)
constraintsOK=1;
for jj=1:length(hP(hi).vars)
modind=hP(hi).vars(jj).modNum; varNum=hP(hi).vars(jj).varNum;
theta=params(modind).priors.theta.params(:,1); theta(varNum)=cand;
for const=params(modind).priors.theta.constraints
constraintsOK=constraintsOK & eval(const{1});
end
end
if ~constraintsOK
if pr
fprintf(' Reject, from hierarchical mean constraint set 1\n');
end
reject(1)=1;
end
end
if ~reject(1)
[params hP reject(2)]=evalHierDraw(cand,hP(hi).model.lam,1,params,hP,hi);
end
% generate a candidate draw and evaluate for lam
aCorr=1;
if ~step
[cand aCorr]=chooseVal(hP(hi).model.lam);
else
cand=hP(hi).model.lam+(rand(1)-.5)*hP(hi).mcmc.lamWidth;
end
if cand<hP(hi).priors.lam.bLower || cand>hP(hi).priors.lam.bUpper
if pr; fprintf(' Reject, out of bounds 3\n'); end
reject(3)=1;
else
[params hP reject(4)]=evalHierDraw(hP(hi).model.mean,cand,aCorr,params,hP,hi);
end
% Second, try a lockstep update of the hierarchical and individual
% theta means (to avoid shrinkage overstability problems) This moves
% all of the points and the h.model mean by the same shift, so the
% only changes are the models' likelihoods and the hier mean prior
candDelta=(rand(1)-.5)*hP(hi).mcmc.lockstepMeanWidth;
% candidate for the hierarchical mean
newHmean=hP(hi).model.mean+ candDelta;
if pr
fprintf('candDelta=%f, old=%f new=%f; ', ...
candDelta, hP(hi).model.mean,newHmean);
end
% Check bounds
if newHmean < hP(hi).priors.mean.bLower || ...
newHmean > hP(hi).priors.mean.bUpper
if pr; fprintf(' Reject, out of bounds 5\n'); end
reject(5)=1;
end
if ~reject(5)
constraintsOK=1;
for jj=1:length(hP(hi).vars)
modind=hP(hi).vars(jj).modNum; varNum=hP(hi).vars(jj).varNum;
theta=params(modind).priors.theta.params(:,1);
theta(varNum)=newHmean;
for const=params(modind).priors.theta.constraints
constraintsOK=constraintsOK & eval(const{1});
end
end
if ~constraintsOK
if pr
fprintf(' Reject, from hierarchical mean constraint set 5\n');
end
reject(5)=1;
end
end
if ~reject(5)
% compute the updated Normal prior for the hier. model mean
if oldPriorStyle
curHPrior=-0.5*( (hP(hi).model.mean-hP(hi).priors.mean.mean)./ ...
(hP(hi).priors.mean.std) ).^2;
newHPrior=-0.5*( (newHmean-hP(hi).priors.mean.mean)./ ...
(hP(hi).priors.mean.std) ).^2;
else
curHPrior=gLogNormalPrior(hP(hi).model.mean, ...
[hP(hi).priors.mean.mean hP(hi).priors.mean.std]);
newHPrior=gLogNormalPrior(newHmean, ...
[hP(hi).priors.mean.mean hP(hi).priors.mean.std]);
end
if pr
fprintf('prior from %f to %f; ',curHPrior,newHPrior);
end
% Compute the updated likelihood for the associated models
C.var='theta';
for jj=1:length(hP(hi).vars)
modind=hP(hi).vars(jj).modNum; varNum=hP(hi).vars(jj).varNum;
modelT(jj)=params(modind).model;
curLik(jj)=modelT(jj).logLik;
modelT(jj).theta(varNum)=modelT(jj).theta(varNum)+candDelta;
% check bounds for the model
if modelT(jj).theta(varNum) < params(modind).priors.theta.bLower || ...
modelT(jj).theta(varNum) > params(modind).priors.theta.bUpper
if pr; fprintf(' Reject, out of bounds 6\n'); end
reject(6)=1;
end
theta=modelT(jj).theta';
constraintsOK=1;
for const=params(modind).priors.theta.constraints
constraintsOK=constraintsOK & eval(const{1});
end
if ~constraintsOK
if pr; fprintf(' Reject, from theta constraint set 6\n'); end
reject(6)=1;
end
if ~reject(6)
% set the var and compute the new likelihood
modelT(jj)=computeLogLik(modelT(jj),params(modind).data,C);
% extract the new likelihood value
newLik(jj)=modelT(jj).logLik;
end
end
end
if ~any(reject(5:6))
% Add up the priors and likelihoods
oldPost=sum(curLik)+curHPrior;
newPost=sum(newLik)+newHPrior;
if pr
fprintf('lik from %f to %f; ',sum(curLik),sum(newLik));
end
% Test acceptance and update
if log(rand)<(newPost-oldPost)
% record the new hier model mean
hP(hi).model.mean=newHmean;
for jj=1:length(hP(hi).vars)
modind=hP(hi).vars(jj).modNum; varNum=hP(hi).vars(jj).varNum;
% Update models, after adding up the correct posterior lik
modelT(jj).logPost=modelT(jj).logLik+modelT(jj).logPrior;
params(modind).model=modelT(jj);
% Update model prior mean as the new hyper param mean
params(modind).priors.theta.params(varNum,1)= newHmean;
end
else
reject(7)=1;
end
end
if pr; if ~any(reject); fprintf('accept \n');
else fprintf('reject %d\n',find(reject)); end
end
hP(hi).pvals(logi).mean=hP(hi).model.mean;
hP(hi).pvals(logi).lam=hP(hi).model.lam;
hP(hi).pvals(logi).acc=[~any(reject(1:2)) ~any(reject(3:4)) ...
~any(reject(5:7))]';
end
end % function mcmcHier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% evaluate whether to accept a change to hier priors.
function [params hP reject]=evalHierDraw(mean,lam,aCorr,params,hP,hi)
oldPriorStyle=0;
% go through each linked var, find out what the updated prior is
for vari=1:length(hP(hi).vars)
modind=hP(hi).vars(vari).modNum; varNum=hP(hi).vars(vari).varNum;
curPrior(vari)=params(modind).model.logPrior;
priorsT(vari)=params(modind).priors;
priorsT(vari).theta.params(varNum,1)=mean;
priorsT(vari).theta.params(varNum,2)=sqrt(1/lam);
modelT=computeLogPrior(priorsT(vari),...
params(hP(hi).vars(vari).modNum).mcmc, ...
params(hP(hi).vars(vari).modNum).model);
newPrior(vari)=modelT.logPrior;
end
% compute the hierarchical prior parameter likelihoods
% mean is a normal prior
% lambda is a gamma prior
if oldPriorStyle
curHPrior=0.5* ( (hP(hi).model.mean-hP(hi).priors.mean.mean)./ ...
(hP(hi).priors.mean.std) ).^2;
newHPrior=0.5* ( (mean-hP(hi).priors.mean.mean)./ ...
(hP(hi).priors.mean.std) ).^2;
curHPrior=curHPrior+(hP(hi).priors.lam.a-1).*log(hP(hi).model.lam)- ...
hP(hi).priors.lam.b*hP(hi).model.lam;
newHPrior=newHPrior+(hP(hi).priors.lam.a-1).*log(lam)- ...
hP(hi).priors.lam.b*lam;
else
curHPrior=gLogNormalPrior(hP(hi).model.mean, ...
[hP(hi).priors.mean.mean hP(hi).priors.mean.std]);
newHPrior=gLogNormalPrior(mean, ...
[hP(hi).priors.mean.mean hP(hi).priors.mean.std]);
curHPrior=curHPrior+gLogGammaPrior(hP(hi).model.lam, ...
[hP(hi).priors.lam.a hP(hi).priors.lam.b]);
newHPrior=newHPrior+gLogGammaPrior(lam, ...
[hP(hi).priors.lam.a hP(hi).priors.lam.b]);
end
% sum up the priors
oldLogPrior=sum(curPrior) + curHPrior;
newLogPrior=sum(newPrior) + newHPrior;
% check for acceptance
if ( log(rand)<(newLogPrior-oldLogPrior + log(aCorr)) )
reject=0;
% accept! record the current vals
hP(hi).model.mean=mean; hP(hi).model.lam=lam;
% put stuff back into the submodel prior structs, update the
% calculated prior and posterior lik.
for vari=1:length(hP(hi).vars)
modind=hP(hi).vars(vari).modNum;
params(modind).priors=priorsT(vari);
params(modind).model.logPrior=newPrior(vari);
params(modind).model.logPost=newPrior(vari)+params(modind).model.logLik;
end
else
reject=1;
end
end % function EvalHierDraw
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function params=mcmcEval(params,modi,C,clist)
params(modi).model.acc=0; % default status is to not accept
model=params(modi).model; data=params(modi).data;
priors=params(modi).priors; mcmc=params(modi).mcmc;
pr=0; % print diagnostics
if pr; fprintf('%s %2d %.2f ',C.var,C.index,C.val); end
if pr; fprintf(':: %.2f %.2f ',model.logLik,model.logPrior); end
% If var is a theta, must check whether it is linked to other models
thisVarLinks=[];
if strcmp(C.var,'theta')
thisVarLinks=clist(clist(:,modi)==C.index,:);
if any(thisVarLinks) & ~all(thisVarLinks(1:modi-1)==0)
% only the first in round robin samples the linked variable
if pr; fprintf(' Skipping linked var, index %d\n',C.index); end
return
end
end
% Check hard parameter bounds.
if (C.val<priors.(C.var).bLower || ...
priors.(C.var).bUpper<C.val || ...
~isreal(C.val));
if pr; fprintf(' Reject, out of bounds\n'); end
return
end
modelT=model;
modelT.(C.var)(C.index)=C.val;
modelT=computeLogPrior(priors,mcmc,modelT);
modelT=computeLogLik(modelT,data,C);
modelT.logPost=modelT.logPrior+modelT.logLik;
% if theta, consult the constraint function
if strcmp(C.var,'theta')
theta=modelT.theta';
constraintsOK=1;
for const=priors.theta.constraints
constraintsOK=constraintsOK & eval(const{1});
end
if ~constraintsOK
if pr; fprintf(' Reject, from theta constraint set\n'); end
return
end
end
% If we are in a linked model, compute the likelihood correction
lOldLik=[]; lNewLik=[];
linkInds=find(thisVarLinks~=0); %all the links
if linkInds; linkInds(find(linkInds==modi))=[]; end %this mod's doesn't count as a link
for link=1:length(linkInds)
lModelT(link)=params(linkInds(link)).model;
lOldLik(link)=lModelT(link).logLik;
lModelT(link).theta(thisVarLinks(linkInds(link)))=modelT.theta(C.index);
D.var='theta';
lModelT(link)=computeLogLik(lModelT(link),params(linkInds(link)).data,D);
lNewLik(link)=lModelT(link).logLik;
if pr;
fprintf('\n Linked vars; LL of model %d var %d from %8.5f to %8.5f', ...
linkInds(link),thisVarLinks(linkInds(link)),lOldLik(link),lNewLik(link));
end
end
if pr && ~isempty(linkInds); fprintf('\n'); end
oldLogPost=model. logPost + sum(lOldLik);
newLogPost=modelT.logPost + sum(lNewLik);
if pr; fprintf(':: %.2f %.2f ',modelT.logLik,modelT.logPrior); end
if pr && ~isempty(linkInds);
fprintf('\n links changge LL as old %f to new %f',oldLogPost,newLogPost);
end
if ( log(rand)<(newLogPost-oldLogPost + log(C.aCorr)) )
if pr; fprintf(' Accept \n'); end
model=modelT;
model.acc=1;
% if we are in a linked model, update the linked theta vals & mods
for link=1:length(linkInds)
lModelT(link)=computeLogPrior(params(linkInds(link)).priors,...
params(linkInds(link)).mcmc,lModelT(link));
lModelT(link).logPost=lModelT(link).logPrior+lModelT(link).logLik;
params(linkInds(link)).model=lModelT(link);
if pr; fprintf('updated linked model %d\n',linkInds(link)); end
end
else
if pr; fprintf(' Reject \n'); end
end
params(modi).model=model;
end % function mcmcEval
%%%%%%%%%%%%%%%%%%%%%%%%%%
function [dval,acorr]=chooseVal(cval)
% select the interval, and draw a new value.
w=max(1,cval/3);
dval=cval + (rand*2-1)*w;
% do a correction, which depends on the old and new interval
w1=max(1,dval/3);
if cval > (dval+w1)
acorr=0;
else
acorr=w/w1;
end
% fprintf('cval=%10.4f, dval=%10.4f, acorr=%10.4f\n',cval,dval,acorr)
end
%%%%%%%%%%%%%%%%%%%%%%%%%%
function params=copyHPriors(params,hP)
% if there is a hierarchical model, sync the models' priors.
for ii=1:length(hP)
for jj=1:length(hP(ii).vars)
modind=hP(ii).vars(jj).modNum; varNum=hP(ii).vars(jj).varNum;
params(modind).priors.theta.params(varNum,1)=hP(ii).model.mean;
params(modind).priors.theta.params(varNum,2) =sqrt(1/hP(ii).model.lam);
end
end
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
gPred.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/gPred.m
| 2,011 |
utf_8
|
c55122a8c3a1f97dc4ace2271cd35a75
|
%function pred=gPred(xpred,pvals,model,data,mode,theta)
% Predict using a gpmsa constructed model.
% this is an interface to the new gPredict for backward compatibility
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function pred=gPred(xpred,pvals,model,data,mode,theta)
if strcmp(mode,'etamod');
mode='wpred';
theta=[];
end
if exist('theta','var');
pred=gPredict(xpred,pvals,model,data,'mode',mode,'theta',theta,'returnMuSigma',1);
else
pred=gPredict(xpred,pvals,model,data,'mode',mode,'returnMuSigma',1);
end
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
setupDefaultHierParams.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/setupDefaultHierParams.m
| 3,063 |
utf_8
|
69fff5dc392d1cc5a1bc4f30f9d4a835
|
% This defines a hierarchical model parameter structure as an example.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function hierParams=setupDefaultHierParams
% Hier is a struct array, each element represents one hierarchical
% model that addresses variables from models in a joint model analysis
% the links define the variables. This example creates a hierarchical model
% with two variables, the first variable (theta) in models 1 and 2.
hierParams(1).vars(1).modNum=1;
hierParams(1).vars(1).varNum=1;
hierParams(1).vars(2).modNum=2;
hierParams(1).vars(2).varNum=1;
% a starting point and a stored location for the hierarchical model
% the hierarchical distribution is a normal, with mean and precision
hierParams(1).model.mean=0.5;
hierParams(1).model.lam=10;
% priors for the hierarchical parameters
% the mean is from a normal dist, the lam is from a gamma
hierParams(1).priors.mean.mean=0.5;
hierParams(1).priors.mean.std=10;
hierParams(1).priors.mean.bLower=0;
hierParams(1).priors.mean.bUpper=1;
hierParams(1).priors.lam.a=1;
hierParams(1).priors.lam.b=1e-8;
hierParams(1).priors.lam.bLower=0;
hierParams(1).priors.lam.bUpper=Inf;
% and a place for mcmc control parameters
hierParams(1).mcmc.meanWidth=0.1;
% lockstep update parameters
hierParams(1).mcmc.lockstepMeanWidth=0.1;
% lambda will be sampled as an adaptive parameter
% a place for recording the samples, in the pvals structure
hierParams(1).pvals.mean=[];
hierParams(1).pvals.lam=[];
% this is where you would put in the next hierParams struct array to cover
% further hierarchical models in the analysis
end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
genDist.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/genDist.m
| 2,605 |
utf_8
|
fc3644a58bfe56b543e25af38b5d6bbd
|
% function d = gendist(data,dataDesc);
% generates the nxnxp distance array values and supporting
% information, given the nxp location matrix x
% or if a d is passed in, just update the distances
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function d = genDist(data,dataDesc)
d.type=1;
[n p] = size(data);
%generate the list of (n-1)*(n-1) distance indices
inds=n*(n-1)/2;
indi=zeros(inds,1);indj=zeros(inds,1);
ind=1;for ii=1:n-1; indi(ind:ind+n-ii-1)=ii; indj(ind:ind+n-ii-1)=ii+1:n; ind=ind+n-ii; end;
d.n=n; d.p=p;
d.indi=indi; d.indj=indj;
d.indm=indi + n*(indj-1);
if p==0; d.d=[]; return; end
d.d=(data(indj,:)-data(indi,:)).^2;
% This is to support categoricals; depricated
%if ~exist('dataDesc','var'); cat=any(data<0);
% else cat=[dataDesc.typeCategorical];
%end
%cat0=find(~cat); cat1=find(cat);
% if isempty(cat1)
% d.d=(data(indj,:)-data(indi,:)).^2;
% else
% d.d=zeros(inds,p);
% d.d(:,cat0)=(data(indj,cat0)-data(indi,cat0)).^2;
% d.d(:,cat1)=(data(indj,cat1)~=data(indi,cat1))*0.5;
% end
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
diagPlots.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/diagPlots.m
| 8,497 |
utf_8
|
808d30c93261db1c54070040fc633472
|
% function ret=diagPlots(pout,pvec,plotNum,varargin)
% Some generic plots for GPS/SA diagnostics. Note that most plots of
% response surfaces and predictions are application specific because of
% the unknown structure of the data. (see basicExPlots for examples)
% pout - the structure output from a gaspMCMC function call
% pvec - a vector of the elements of the associated MCMC chain to process
% plotnum - which plot (1-8) to do; scalar.
% 1 - rho plot for the theta and x variables by PC
% 101 - summary rho plot across all PC's (weighted by var contrib.)
% 2 - theta calibration plot
% 3 - lamOS and lamVz combined s.d. (joint model capable)
% 4 - PC diagnostics from the simulation dataset (for PC settings analysis)
% 5 - 1D conditional plot eta MAP mean and pointwise +/-2sd,
% non-active vars at mid-range (for scalar response model)
% 6 - 2D conditional eta MAP mean response, non-active vars at mid-range,
% default vars [1,2], specify with 'vars2D' optional arg
% (for scalar response model)
% TBD - Response plot of the simulation model basis loadings vs. params
% Possible variable/value sets:
% 'labels' - cell array of labels for input variable names
% 'labels2' - cell array of labels for output variable names
% 'figNum' - figure number to plot into
% 'evenWeight' - do weighting calculations evenly (no PCs weighting)
% 'ngrid' - pass to gPlotMatrix
% 'ksd' - pass to gPlotMatrix
% 'standardized' - output variables in standardized scale
% 'var1D' - variables to be varied in a conditional plot
% (type 5), default 1
% 'vars2D' - 2-vector of variables to be varied in a conditional plot
% (type 6), default [1 2]
% 'gridCond' - prediction grid for conditional plot (type 5 or 6),
% default is linspace(0,1,10)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function ret=diagPlots(pout,pvec,plotNum,varargin)
ret=[];
% Extract some variables from the structure, for convenience
pvals=pout(1).pvals(pvec);
model=pout(1).model; data=pout(1).data;
obsData=pout(1).obsData; simData=pout(1).simData;
pu=model.pu; pv=model.pv; p=model.p; q=model.q;
py=size(obsData(1).Dobs,1);
% set defaults, then parse arguments
labels=[];
for ii=1:p; labels=[labels {['x ' num2str(ii)]}]; end
for ii=1:q; labels=[labels {['theta ' num2str(ii)]}]; end
closeFig=false; figNum=plotNum;
evenWeight=false;
ngrid=40; ksd=0.05;
standardized=true;
for ii=1:py; labels2{ii}=['Var ',num2str(ii)]; end
vars2D=[1 2];
var1D=1;
gridCond=linspace(0,1,10);
parseAssignVarargs({'labels','figNum','closeFig','evenWeight', ...
'ngrid','ksd','standardized','labels2', ...
'var1D','vars2D','gridCond'});
% Plot the rhoU response in box plots, by principal component
% The rhoU indicates the degree to which the variable affects the
% principal component. rho is in (0,1), rho=1 indicates no effect.
if plotNum==1;
figure(figNum); clf;
% Collect the MCMC record of the betas
bu=[pvals.betaU]';
% Transform them into rhos
ru = exp(-bu*0.25);
% set up labels for the plot
for ii=1:pu;
r=ru(:,(ii-1)*(p+q)+1:ii*(p+q));
subplot(pu,1,ii);
gBoxPlot(r,'labels',labels);
title(['PC' num2str(ii)]);
a=axis; axis([a(1) a(2) 0 1]);
end
end
% boxplot the mean rhoU response across PCs
if plotNum==101
figure(figNum); clf;
% recreate the PC variability
[U,S,V]=svd(simData.yStd,0);
a=diag(S).^2; a=a./sum(a); a=a(1:pu);
% Collect the MCMC record of the betas
bu=[pvals.betaU]';
% get the weighted mean of each across PCs
bumean=zeros(length(pvec),p+q);
aweight=repmat(a,1,p+q)/sum(a);
if evenWeight; aweight=ones(pu,p+q)/pu; end
for ii=1:length(pvec)
bumean(ii,:)=sum( reshape(bu(ii,:),p+q,pu)' .* aweight );
end
% plot the means transformed into rhos
gBoxPlot(exp(-0.25*bumean),'labels',labels);
a=axis; axis([a(1:2) 0 1]);
ret.bwm=bumean;
end
% Examine the theta posterior calibration
% Each theta was estimated with MCMC, the result is a sample of the
% underlying theta distribution
if plotNum==2
figure(figNum); clf;
t=[pvals.theta]';
gPlotMatrix(t,'shade',1,'lstyle','imcont','ngrid',ngrid, ...
'ksd',ksd,'shade',1,'labels',labels);
end
% Plot the lamOS and lamVz combined s.d. These together indicate
% how much the simulation model is regularized, corresponds to how
% important the simulation data is. This is particularly interesting
% in joint models, or models with lamVz groups.
if plotNum==3
figure(figNum); clf;
lovSD=[]; L={};
for ii=1:length(pout)
los=[pout(ii).pvals(pvec).lamOs]';
lvz=[pout(ii).pvals(pvec).lamVz]';
lovSD=[lovSD sqrt(1./repmat(los,1,size(lvz,2)) + 1./lvz) ];
for jj=1:size(lvz,2)
L{end+1}=['Mod ' num2str(ii) ' Grp ' num2str(jj)];
end
end
boxplot(lovSD,'labels',L);
a=axis; axis([a(1:2) 0 max(a(4),1)]);
end
% Plot the calibrated discrepancy, each output point as a
% discrete response in its own histogram
if plotNum==4
figure(figNum); clf
% predict in uvpred mode over the specified realizations
pred=gPred(0.5,pvals,pout.model,pout.data,'uvpred');
v=(pred.v * obsData(1).Dobs)';
if ~standardized
if isscalar(simData.orig.ysd)
v=v.* simData.orig.ysd;
else
v=v.*repmat(simData.orig.ysd,1,size(v,2));
end
v=v+repmat(simData.orig.ymean,1,size(v,2));
end
isize=ceil(sqrt(py)); jsize=ceil(py/isize);
for ii=1:py
ret.h(ii)=gPackSubplot(isize,jsize,ii,0,0.4); hold on;
hist(v(ii,:));
a=axis;
text(a([1 2])*[0.9 0.1]',a([3 4])*[0.1 0.9]',labels2{ii});
end
end
% plot a 1D conditional response mean plot of the simulation emulator (eta).
if plotNum==5
figure(figNum);
xpred=0.5*ones(numel(gridCond),p);
xpred(:,var1D)=gridCond;
[mapVal pvecMAP]=max([pout.pvals(pvec).logPost]);
pred=gPredict(xpred,pout.pvals(pvec(pvecMAP)),pout.model,pout.data, ...
'returnMuSigma',1);
plot(gridCond,pred.Myhat); hold on;
plot(gridCond,pred.Myhat+diag(pred.Syhat{1})'*2,':');
plot(gridCond,pred.Myhat-diag(pred.Syhat{1})'*2,':');
mean(pred.Myhat)
xlabel(labels{var1D});
end
% plot a 2D conditional response mean plot of the simulation emulator (eta).
if plotNum==6
figure(figNum);
[g1 g2]=meshgrid(gridCond);
xpred=0.5*ones(numel(gridCond)^2,p);
xpred(:,vars2D(1))=g1(:); xpred(:,vars2D(2))=g2(:);
[mapVal pvecMAP]=max([pout.pvals(pvec).logPost]);
pred=gPredict(xpred,pout.pvals(pvec(pvecMAP)),pout.model,pout.data, ...
'returnMuSigma',1);
mesh(g1,g2,reshape(pred.Myhat,size(g1)));
xlabel( labels{vars2D(1)} );
ylabel( labels{vars2D(2)} );
end
if closeFig; close(figNum); end
end %main plot function
|
github
|
AndrewCWalker/rsm_tool_suite-master
|
stepsize.m
|
.m
|
rsm_tool_suite-master/Automated_RSM/MCMC/gpmsa/matlab/stepsize.m
| 7,516 |
utf_8
|
cd004244c3bf737b639f5c735e93d34a
|
% function [params hierParams] = stepsize(params,nBurn,nLev,varargin)
% compute step sizes from step size data collect run in gpmmcmc
% please see associated documentation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Author: James R. Gattiker, Los Alamos National Laboratory
% Brian Williams, Los Alamos National Laboratory
%
% This file was distributed as part of the GPM/SA software package
% Los Alamos Computer Code release LA-CC-06-079, C-06,114
%
% Copyright 2008. Los Alamos National Security, LLC. This material
% was produced under U.S. Government contract DE-AC52-06NA25396 for
% Los Alamos National Laboratory (LANL), which is operated by Los Alamos
% National Security, LLC for the U.S. Department of Energy. The U.S.
% Government has rights to use, reproduce, and distribute this software.
% NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
% WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
% THIS SOFTWARE. If software is modified to produce derivative works,
% such modified software should be clearly marked, so as not to confuse
% it with the version available from LANL.
% Additionally, this program is free software; you can redistribute it
% and/or modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation; version 2.0 of the License.
% Accordingly, this program is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [params hierParams] = stepsize(params,nBurn,nLev,varargin)
numMods=length(params);
clist=zeros(0,numMods);
hierParams=[];
parseAssignVarargs({'clist','hierParams'});
numHMods=length(hierParams);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% set up ranges, and precompute structures for quick&easy sampling
fprintf('Setting up structures for stepsize statistics collect ...\n');
ex = -(nLev-1)/2:(nLev-1)/2;
for ii=1:numMods
for varNum=1:length(params(ii).mcmc.svars)
svar=params(ii).mcmc.svars{varNum};
svarS=params(ii).mcmc.svarSize(varNum);
base.(svar)=2*ones(nLev,svarS);
acc.(svar)=zeros(nLev,svarS);
end
% specialize in some cases
for varNum=1:length(params(ii).mcmc.svars)
var=params(ii).mcmc.svars{varNum};
switch(var)
case 'theta'
base.(var)(ex>0,:)=20.0^(2.0/(nLev-1));
case {'betaV','betaU'}
base.(var)(ex>0,:)=10.0^(2.0/(nLev-1));
case {'lamUz','lamOs'}
base.(var)(ex>0,:)=100.0^(2.0/(nLev-1));
end
end
step(ii).base=base;
step(ii).ex=ex;
end
if ~isempty(hierParams)
base.mean=2*ones(nLev); base.mean(ex>0)=20.0^(2.0/(nLev-1));
base.lam=2*ones(nLev);
end
% pre-compute the widths for the levels, making whole mcmc structs to
% substitute in and out of the params as we go through the levels
for ii=1:numMods
smcmc(ii,1:nLev)=params(ii).mcmc;
for lev=1:nLev
for varNum=1:length(params(ii).mcmc.svars)
wvar=params(ii).mcmc.wvars{varNum};
svar=params(ii).mcmc.svars{varNum};
svarS=params(ii).mcmc.svarSize(varNum);
for k=1:svarS
smcmc(ii,lev).(wvar)(k)=smcmc(ii,lev).(wvar)(k)* ...
step(ii).base.(svar)(lev,k)^step(ii).ex(lev);
wrec(ii).(wvar)(k,lev)=smcmc(ii,lev).(wvar)(k);
end
end
end
end
for hi=1:numHMods
smcmcH(hi,1:nLev)=hierParams(hi).mcmc;
for lev=1:nLev
smcmcH(hi,lev).meanWidth=smcmcH(hi,lev).meanWidth* ...
base.mean(lev)^ex(lev);
wrecH(hi).meanWidth(lev)=smcmcH(hi,lev).meanWidth;
smcmcH(hi,lev).lamWidth=smcmcH(hi,lev).lamWidth* ...
base.lam(lev)^ex(lev);
wrecH(hi).lamWidth(lev)=smcmcH(hi,lev).lamWidth;
smcmcH(hi,lev).lockstepMeanWidth=smcmcH(hi,lev).lockstepMeanWidth* ...
base.mean(lev)^ex(lev);
wrecH(hi).lockstepMeanWidth(lev)=smcmcH(hi,lev).lockstepMeanWidth;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Herein goeth the sampling code.
fprintf('Collecting stepsize acceptance stats ...\n');
%init the params structs
[params hierParams]=gpmmcmc(params,0,'initOnly',1,'clist',clist,...
'hierParams',hierParams);
fprintf('Drawing %d samples (nBurn) over %d levels (nLev) \n',nBurn,nLev);
counter('stime',1,nBurn*nLev,15,10);
for burn=1:nBurn
for lev=1:nLev
counter((burn-1)*nLev+1+lev);
for ii=1:numMods
params(ii).mcmc=smcmc(ii,lev);
end
for hi=1:numHMods
hierParams(hi).mcmc=smcmcH(hi,lev);
end
[params hierParams]=gpmmcmc(params,1,'noInit',1,'noCounter',1,...
'step',1,'clist',clist,'hierParams',hierParams);
end
end
counter('end');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Estimate the optimal step size
fprintf('Computing optimal step sizes ...\n');
for ii=1:numMods
for varNum=1:length(params(ii).mcmc.svars)
svar=params(ii).mcmc.svars{varNum};
svarS=params(ii).mcmc.svarSize(varNum);
acc=[params(ii).pvals.([svar 'Acc'])]';
accCount(ii).(svar)=zeros(nLev,svarS);
for k=1:svarS
accCount(ii).(svar)(:,k)=sum(reshape(acc(:,k),nLev,nBurn),2);
end
end
end
for hi=1:numHMods
acc=[hierParams(hi).pvals.acc]';
accCountH(hi).mean=sum(reshape(acc(:,1),nLev,nBurn),2);
accCountH(hi).lam=sum(reshape(acc(:,2),nLev,nBurn),2);
accCountH(hi).lockstepMean=sum(reshape(acc(:,3),nLev,nBurn),2);
end
nTrials=ones(nLev,1)*nBurn;
logit = log(1/(exp(1)-1));
for ii=1:numMods
for varNum=1:length(params(ii).mcmc.svars)
wvar=params(ii).mcmc.wvars{varNum};
svar=params(ii).mcmc.svars{varNum};
svarS=params(ii).mcmc.svarSize(varNum);
switch(svar)
case {'theta'}
for k=1:svarS
thisVarLinks=clist(clist(:,ii)==k,:);
if any(thisVarLinks) & ~all(thisVarLinks(1:ii-1)==0)
stepWidth(ii).(wvar)(k)=0;
else
widths=wrec(ii).(wvar)(k,:);
b=glmfit(log(widths),[accCount(ii).(svar)(:,k) nTrials],'binomial');
stepWidth(ii).(wvar)(k)=exp((logit-b(1))/b(2));
end
end
otherwise
for k=1:svarS
widths=wrec(ii).(wvar)(k,:);
b=glmfit(log(widths),[accCount(ii).(svar)(:,k) nTrials],'binomial');
stepWidth(ii).(wvar)(k)=exp((logit-b(1))/b(2));
end
end
end
end
for hi=1:numHMods
widths=wrecH(hi).meanWidth;
b=glmfit(log(widths),[accCountH(hi).mean nTrials],'binomial');
stepWidthH(hi).meanWidth=exp((logit-b(1))/b(2));
widths=wrecH(hi).lamWidth;
b=glmfit(log(widths),[accCountH(hi).lam nTrials],'binomial');
stepWidthH(hi).lamWidth=exp((logit-b(1))/b(2));
widths=wrecH(hi).lockstepMeanWidth;
b=glmfit(log(widths),[accCountH(hi).lockstepMean nTrials],'binomial');
stepWidthH(hi).lockstepMeanWidth=exp((logit-b(1))/b(2));
end
%put the estimated step sizes back into the params struct
for ii=1:numMods
for varNum=1:length(params(ii).mcmc.svars)
wvar=params(ii).mcmc.wvars{varNum};
params(ii).mcmc.(wvar)=stepWidth(ii).(wvar);
end
end
for hi=1:numHMods
hierParams(hi).mcmc.meanWidth=stepWidthH(hi).meanWidth;
hierParams(hi).mcmc.lamWidth=stepWidthH(hi).lamWidth;
hierParams(hi).mcmc.lockstepMeanWidth=stepWidthH(hi).lockstepMeanWidth;
end
fprintf('Step size assignment complete.\n');
%params.mcmc.acc=accCount;
%params.mcmc.wrec=wrec;
%params.mcmc.smcmc=smcmc;
|
github
|
mbuckler/ReversiblePipeline-master
|
ImgPipe_Matlab.m
|
.m
|
ReversiblePipeline-master/src/Matlab/ImgPipe_Matlab.m
| 18,556 |
utf_8
|
3cb3d09d6499bf586ac0162d62fbe26d
|
%==============================================================
% Image Processing Pipeline
%
% This is a Matlab implementation of a pre-learned image
% processing model. A description of the model can be found in
% "A New In-Camera Imaging Model for Color Computer Vision
% and its Application" by Seon Joo Kim, Hai Ting Lin,
% Michael Brown, et al. Code for learning a new model can
% be found at the original project page. This particular
% implementation was written by Mark Buckler.
%
% Original Project Page:
% http://www.comp.nus.edu.sg/~brown/radiometric_calibration/
%
% Model Format Readme:
% http://www.comp.nus.edu.sg/~brown/radiometric_calibration/datasets/Model_param/readme.pdf
%
%==============================================================
function ImgPipe_Matlab
% Model directory
model_dir = '../../camera_models/NikonD7000/';
% White balance index (select from the transform file)
% First white balance in file has wb_index of 1
% For more information see the model readme
wb_index = 6;
% Image directory
image_dir = '../../imgs/NikonD7000FL/';
% Results directory
results_dir = 'pipe_results/';
% Raw image
raw_image_name = 'DSC_0916.NEF.raw_1C.tiff';
% Jpg image
jpg_image_name = 'DSC_0916.JPG';
% Create directories for results
mkdir(pwd, results_dir);
mkdir(pwd, strcat(results_dir,'forward_images/'));
mkdir(pwd, strcat(results_dir,'backward_images/'));
% Patch start locations
% [xstart,ystart]
%
% NOTE: Must align patch start in raw file with the demosiac
% pattern start. Otherwise colors will be switched in the
% final result.
patchstarts = [ ...
[551, 2751]; ... % 1
[1001, 2751]; ... % 2
[1501, 2751]; ... % 3
[2001, 2751]; ... % 4
[551, 2251]; ... % 5
[1001, 2251]; ... % 6
[1501, 2251]; ... % 7
[2001, 2251]; ... % 8
[551, 1751]; ... % 9
[1001, 1751]; ... % 10
[1501, 1751]; ... % 11
[2001, 1751]; ... % 12
];
% Number of patch tests to run
patchnum = 12;
% Define patch size (patch width and height in pixels
patchsize = 10;
% Initialize results
forward_results = zeros(patchnum,3,3);
backward_results = zeros(patchnum,3,3);
% Process patches
for i=1:patchnum
% Run the forward model on the patch
[demosaiced, transformed, gamutmapped, tonemapped, forward_ref] = ...
ForwardPipe(model_dir, image_dir, results_dir, wb_index, ...
raw_image_name, jpg_image_name, ...
patchstarts(i,2), patchstarts(i,1), patchsize, i);
% Compare the pipeline output to the reference
[refavg, resultavg, error] = ...
patch_compare(tonemapped, forward_ref);
forward_results(i,1,:) = resultavg;
forward_results(i,2,:) = refavg;
forward_results(i,3,:) = error;
% Run the backward model on the patch
[revtonemapped, revgamutmapped, revtransformed, remosaiced, backward_ref] = ...
BackwardPipe(model_dir, image_dir, results_dir, wb_index, ...
jpg_image_name, raw_image_name, ...
patchstarts(i,2), patchstarts(i,1), patchsize, i);
% Compare the pipeline output to the reference
[refavg, resultavg, error] = ...
patch_compare(remosaiced, backward_ref);
backward_results(i,1,:) = resultavg;
backward_results(i,2,:) = refavg;
backward_results(i,3,:) = error;
end
write_results(forward_results, patchnum, ...
strcat(results_dir,'forward_results.txt'));
write_results(backward_results, patchnum, ...
strcat(results_dir,'backward_results.txt'));
disp(strcat('Avg % color channel error for forward: ', ...
num2str(mean(mean(abs(forward_results(:,3,:)))))));
disp(strcat('Avg % color channel error for backward: ', ...
num2str(mean(mean(abs(backward_results(:,3,:)))))));
disp(strcat('Max % color channel error for forward: ', ...
num2str(max(max(abs(forward_results(:,3,:)))))));
disp(strcat('Max % color channel error for backward: ', ...
num2str(max(max(abs(backward_results(:,3,:)))))));
disp('See results folder for error per patch and per color channel');
end
function [demosaiced, transformed, gamutmapped, tonemapped, ref_image] = ...
ForwardPipe(model_dir, image_dir, results_dir, wb_index, ...
in_image_name, ref_image_name, ystart, xstart, patchsize, patchid)
% Establish patch
xend = xstart + patchsize - 1;
yend = ystart + patchsize - 1;
%==============================================================
% Import Forward Model Data
%
% Note: This assumes a camera model folder with a single
% camera setting and transform. This is not the case for
% every folder, but it is for the Nikon D40 on the Normal
% setting and with Fl(L14)/florescent color.
% Model file reading
transforms_file = dlmread( ...
strcat(model_dir,'raw2jpg_transform.txt'));
ctrl_points_file = dlmread( ...
strcat(model_dir,'raw2jpg_ctrlPoints.txt'));
coeficients_file = dlmread( ...
strcat(model_dir,'raw2jpg_coefs.txt'));
resp_funct_file = dlmread( ...
strcat(model_dir,'raw2jpg_respFcns.txt'));
% Color space transform
Ts = transforms_file(2:4,:);
% Calculate base for the white balance transform selected
% For more details see the camera model readme
wb_base = 6 + 5*(wb_index-1);
% White balance transform
Tw = diag(transforms_file(wb_base+3,:));
% Combined transforms
TsTw = Ts*Tw;
TsTw_file = transforms_file(wb_base:wb_base+2,:);
% Perform quick check to determine equivalence with provided model
% Round to nearest 4 decimal representation for check
TsTw_4dec = round(TsTw*10000)/10000;
TsTw_file_4dec = round(TsTw_file*10000)/10000;
assert( isequal( TsTw_4dec, TsTw_file_4dec), ...
'Transform multiplication not equal to result found in model file, or import failed' )
% Gamut mapping: Control points
ctrl_points = ctrl_points_file(2:end,:);
% Gamut mapping: Weights
weights = coeficients_file(2:(size(coeficients_file,1)-4),:);
% Gamut mapping: c
c = coeficients_file((size(coeficients_file,1)-3):end,:);
% Tone mapping (reverse function is what is contained within model
% file)
frev = resp_funct_file(2:end,:);
%==============================================================
% Import Raw Image Data
% NOTE: Can use RAW2TIFF.cpp to convert raw to tiff. This isn't
% automatically called by this script yet, but could be.
in_image = imread(strcat(image_dir,in_image_name));
%==============================================================
% Import Reference image
ref_image = imread(strcat(image_dir,ref_image_name));
% Downsize to match patch size
ref_image = ref_image(ystart:yend,xstart:xend,:);
%==============================================================
% Forward pipeline function
% Convert to uint16 representation for demosaicing
in_image_unit16 = im2uint16(in_image);
% Demosaic image
demosaiced = im2uint8(demosaic(in_image_unit16,'rggb'));%gbrg %rggb
% Convert to double precision for transforming and gamut mapping
image_float = im2double(demosaiced);
% Downsize image to patch size
demosaiced = demosaiced(ystart:yend,xstart:xend,:);
image_float = image_float(ystart:yend,xstart:xend,:);
% Pre-allocate memory
height = size(image_float,1);
width = size(image_float,2);
transformed = zeros(height,width,3);
gamutmapped = zeros(height,width,3);
tonemapped = zeros(height,width,3);
for y = 1:height
for x = 1:width
% transformed = RAWdemosaiced * Ts * Tw
transformed(y,x,:) = transpose(squeeze(image_float(y,x,:))) ...
* transpose(TsTw);
% gamut mapping
gamutmapped(y,x,:) = RBF(squeeze(transformed(y,x,:)), ...
ctrl_points, weights, c);
% tone mapping
tonemapped(y,x,:) = tonemap(im2uint8(squeeze(gamutmapped(y,x,:))), frev);
end
% Let user know how far along we are
disp((y/size(image_float,1))*100)
end
%==============================================================
% Export Image(s)
ref_image = im2uint8(ref_image);
image_float = im2uint8(image_float);
transformed = im2uint8(transformed);
gamutmapped = im2uint8(gamutmapped);
tonemapped = im2uint8(tonemapped);
imwrite(ref_image, strcat(results_dir, ...
'forward_images/', in_image_name, ...
'.p',int2str(patchid),'.forward_reference.tif'));
imwrite(tonemapped, strcat(results_dir, ...
'forward_images/', in_image_name, ...
'.p',int2str(patchid),'.forward_result.tif'));
end
function [revtonemapped, revgamutmapped, revtransformed, remosaiced, ref_image_colored] = ...
BackwardPipe(model_dir, image_dir, results_dir, wb_index, ...
in_image_name, ref_image_name, ystart, xstart, patchsize, patchid)
% Establish patch
xend = xstart + patchsize - 1;
yend = ystart + patchsize - 1;
%==============================================================
% Import Backward Model Data
%
% Note: This assumes a camera model folder with a single
% camera setting and transform. This is not the case for
% every folder, but it is for the Nikon D40 on the Normal
% setting and with Fl(L14)/florescent color.
% Model file reading
% Model file reading
transforms_file = dlmread( ...
strcat(model_dir,'jpg2raw_transform.txt'));
ctrl_points_file = dlmread( ...
strcat(model_dir,'jpg2raw_ctrlPoints.txt'));
coeficients_file = dlmread( ...
strcat(model_dir,'jpg2raw_coefs.txt'));
resp_funct_file = dlmread( ...
strcat(model_dir,'jpg2raw_respFcns.txt'));
% Color space transform
Ts = transforms_file(2:4,:);
% Calculate base for the white balance transform selected
% For more details see the camera model readme
wb_base = 6 + 5*(wb_index-1);
% White balance transform
Tw = diag(transforms_file(wb_base+3,:));
% Combined transforms
TsTw = Ts*Tw;
TsTw_file = transforms_file(wb_base:wb_base+2,:);
% Perform quick check to determine equivalence with provided model
% Round to nearest 4 decimal representation for check
TsTw_4dec = round(TsTw*10000)/10000;
TsTw_file_4dec = round(TsTw_file*10000)/10000;
assert( isequal( TsTw_4dec, TsTw_file_4dec), ...
'Transform multiplication not equal to result found in model file, or import failed' )
% Gamut mapping: Control points
ctrl_points = ctrl_points_file(2:end,:);
% Gamut mapping: Weights
weights = coeficients_file(2:(size(coeficients_file,1)-4),:);
% Gamut mapping: c
c = coeficients_file((size(coeficients_file,1)-3):end,:);
% Tone mapping (reverse function is what is contained within model
% file)
frev = resp_funct_file(2:end,:);
%==============================================================
% Import Image Data
in_image = imread(strcat(image_dir,in_image_name));
ref_image = imread(strcat(image_dir,ref_image_name));
% Convert the input image to double represenation
ref_image = im2double(ref_image);
%==============================================================
% Backward pipeline function
% Convert to double precision for processing
image_float = im2double(in_image);
% Extract patches
image_float = image_float(ystart:yend,xstart:xend,:);
ref_image = ref_image (ystart:yend,xstart:xend);
% Pre-allocate memory
height = size(image_float,1);
width = size(image_float,2);
revtransformed = zeros(height,width,3);
revtonemapped = zeros(height,width,3);
revgamutmapped = zeros(height,width,3);
remosaiced = zeros(height,width,3);
ref_image_colored = zeros(height,width,3);
for y = 1:height
for x = 1:width
% Reverse tone mapping
revtonemapped(y,x,:) = revtonemap(squeeze(image_float(y,x,:)), frev);
% Reverse gamut mapping
revgamutmapped(y,x,:) = RBF(squeeze(revtonemapped(y,x,:)), ...
ctrl_points, weights, c);
% Reverse color mapping and white balancing
% RAWdemosaiced = transformed * inv(TsTw) = transformed / TsTw
revtransformed(y,x,:) = transpose(squeeze(revgamutmapped(y,x,:))) ...
* inv(transpose(TsTw));
% Re-mosaicing
% Note: This is not currently parameterizable, assumes rggb
yodd = mod(y,2);
xodd = mod(x,2);
% If a red pixel
if yodd && xodd
remosaiced(y,x,:) = [revtransformed(y,x,1), 0, 0];
% If a green pixel
elseif xor(yodd,xodd)
remosaiced(y,x,:) = [0, revtransformed(y,x,2), 0];
% If a blue pixel
elseif ~yodd && ~xodd
remosaiced(y,x,:) = [0, 0, revtransformed(y,x,3)];
end
%======================================================
% Reorganize reference image
% Note: This is not currently parameterizable, assumes rggb
% If a red pixel
if yodd && xodd
ref_image_colored(y,x,:) = [ref_image(y,x), 0, 0];
% If a green pixel
elseif xor(yodd,xodd)
ref_image_colored(y,x,:) = [0, ref_image(y,x), 0];
% If a blue pixel
elseif ~yodd && ~xodd
ref_image_colored(y,x,:) = [0, 0, ref_image(y,x)];
end
end
% Let user know how far along we are
disp((y/size(image_float,1))*100)
end
%==============================================================
% Export Image(s)
ref_image = im2uint8(ref_image);
ref_image_colored = im2uint8(ref_image_colored);
revtransformed = im2uint8(revtransformed);
revtonemapped = im2uint8(revtonemapped);
revgamutmapped = im2uint8(revgamutmapped);
remosaiced = im2uint8(remosaiced);
imwrite(ref_image, strcat(results_dir, ...
'backward_images/', in_image_name, ...
'.p',int2str(patchid),'.back_ref.tif'));
imwrite(ref_image_colored, strcat(results_dir, ...
'backward_images/', in_image_name, ...
'.p',int2str(patchid),'.back_ref_colored.tif'));
imwrite(remosaiced, strcat(results_dir, ...
'backward_images/', in_image_name, ...
'.p',int2str(patchid),'.back_result.tif'));
end
% Radial basis function for forward and reverse gamut mapping
function out = RBF (in, ctrl_points, weights, c)
out = zeros(3,1);
% Weighted control points
for idx = 1:size(ctrl_points,1)
dist = norm(transpose(in) - ctrl_points(idx,:));
for color = 1:3
out(color) = out(color) + weights(idx,color) * dist;
end
end
% Biases
for color = 1:3
out(color) = out(color) + c(1,color);
out(color) = out(color) + (c(2,color) * in(1));
out(color) = out(color) + (c(3,color) * in(2));
out(color) = out(color) + (c(4,color) * in(3));
end
end
% Forward mapping function
function out = tonemap (in, revf)
out = zeros(3,1);
for color = 1:3 % 1-R, 2-G, 3-B
% Find index of value which is closest to the input
[~,idx] = min(abs(revf(:,color)-im2double(in(color))));
% If index is zero, bump up to 1 to prevent 0 indexing in Matlab
if idx == 0
idx = 1;
end
% Convert the index to float representation of image value
out(color) = idx/256;
end
end
% Reverse tone mapping function
function out = revtonemap (in, revf)
out = zeros(3,1);
for color = 1:3 % 1-R, 2-G, 3-B
% Convert the input to an integer between 1 and 256
idx = round(in(color)*256);
% If index is zero, bump up to 1 to prevent 0 indexing in Matlab
if idx == 0
idx = 1;
end
% Index the reverse tone mapping function
out(color) = revf(idx,color);
end
end
% Patch color analysis and comparison function
function [refavg, resultavg, error] = patch_compare(resultpatch, referencepatch)
refavg = zeros(3,1);
resultavg = zeros(3,1);
error = zeros(3,1);
for color = 1:3 % 1-R, 2-G, 3-B
% Take two dimensional pixel averages
refavg(color) = mean(mean(referencepatch(:,:,color)));
resultavg(color) = mean(mean(resultpatch(:,:,color)));
% Compute error
diff = resultavg(color)-refavg(color);
error(color) = (diff/256.0)*100;
end
end
% Write the pipeline data results to an output file
function write_results(results, patchnum, file_name)
outfileID = fopen(file_name, 'w');
% Display results
fprintf(outfileID, 'res(red), res(green), res(blue)\n');
fprintf(outfileID, 'ref(red), ref(green), ref(blue)\n');
fprintf(outfileID, 'err(red), err(green), err(blue)\n');
fprintf(outfileID, '\n');
for i=1:patchnum
fprintf(outfileID, 'Patch %d: \n', i);
% Print results
fprintf(outfileID, '%4.2f, %4.2f, %4.2f \n', ...
results(i,1,1), results(i,1,2), results(i,1,3));
% Print reference
fprintf(outfileID, '%4.2f, %4.2f, %4.2f \n', ...
results(i,2,1), results(i,2,2), results(i,2,3));
% Print error
fprintf(outfileID, '%4.2f, %4.2f, %4.2f \n', ...
results(i,3,1), results(i,3,2), results(i,3,3));
fprintf(outfileID, '\n');
end
end
|
github
|
zhangliliang/caffe-master
|
classification_demo.m
|
.m
|
caffe-master/matlab/demo/classification_demo.m
| 5,412 |
utf_8
|
8f46deabe6cde287c4759f3bc8b7f819
|
function [scores, maxlabel] = classification_demo(im, use_gpu)
% [scores, maxlabel] = classification_demo(im, use_gpu)
%
% Image classification demo using BVLC CaffeNet.
%
% IMPORTANT: before you run this demo, you should download BVLC CaffeNet
% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)
%
% ****************************************************************************
% For detailed documentation and usage on Caffe's Matlab interface, please
% refer to Caffe Interface Tutorial at
% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab
% ****************************************************************************
%
% input
% im color image as uint8 HxWx3
% use_gpu 1 to use the GPU, 0 to use the CPU
%
% output
% scores 1000-dimensional ILSVRC score vector
% maxlabel the label of the highest score
%
% You may need to do the following before you start matlab:
% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64
% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6
% Or the equivalent based on where things are installed on your system
%
% Usage:
% im = imread('../../examples/images/cat.jpg');
% scores = classification_demo(im, 1);
% [score, class] = max(scores);
% Five things to be aware of:
% caffe uses row-major order
% matlab uses column-major order
% caffe uses BGR color channel order
% matlab uses RGB color channel order
% images need to have the data mean subtracted
% Data coming in from matlab needs to be in the order
% [width, height, channels, images]
% where width is the fastest dimension.
% Here is the rough matlab for putting image data into the correct
% format in W x H x C with BGR channels:
% % permute channels from RGB to BGR
% im_data = im(:, :, [3, 2, 1]);
% % flip width and height to make width the fastest dimension
% im_data = permute(im_data, [2, 1, 3]);
% % convert from uint8 to single
% im_data = single(im_data);
% % reshape to a fixed size (e.g., 227x227).
% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');
% % subtract mean_data (already in W x H x C with BGR channels)
% im_data = im_data - mean_data;
% If you have multiple images, cat them with cat(4, ...)
% Add caffe/matlab to you Matlab search PATH to use matcaffe
if exist('../+caffe', 'dir')
addpath('..');
else
error('Please run this demo from caffe/matlab/demo');
end
% Set caffe mode
if exist('use_gpu', 'var') && use_gpu
caffe.set_mode_gpu();
gpu_id = 0; % we will use the first gpu in this demo
caffe.set_device(gpu_id);
else
caffe.set_mode_cpu();
end
% Initialize the network using BVLC CaffeNet for image classification
% Weights (parameter) file needs to be downloaded from Model Zoo.
model_dir = '../../models/bvlc_reference_caffenet/';
net_model = [model_dir 'deploy.prototxt'];
net_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];
phase = 'test'; % run with phase test (so that dropout isn't applied)
if ~exist(net_weights, 'file')
error('Please download CaffeNet from Model Zoo before you run this demo');
end
% Initialize a network
net = caffe.Net(net_model, net_weights, phase);
if nargin < 1
% For demo purposes we will use the cat image
fprintf('using caffe/examples/images/cat.jpg as input image\n');
im = imread('../../examples/images/cat.jpg');
end
% prepare oversampled input
% input_data is Height x Width x Channel x Num
tic;
input_data = {prepare_image(im)};
toc;
% do forward pass to get scores
% scores are now Channels x Num, where Channels == 1000
tic;
% The net forward function. It takes in a cell array of N-D arrays
% (where N == 4 here) containing data of input blob(s) and outputs a cell
% array containing data from output blob(s)
scores = net.forward(input_data);
toc;
scores = scores{1};
scores = mean(scores, 2); % take average scores over 10 crops
[~, maxlabel] = max(scores);
% call caffe.reset_all() to reset caffe
caffe.reset_all();
% ------------------------------------------------------------------------
function crops_data = prepare_image(im)
% ------------------------------------------------------------------------
% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that
% is already in W x H x C with BGR channels
d = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');
mean_data = d.mean_data;
IMAGE_DIM = 256;
CROPPED_DIM = 227;
% Convert an image returned by Matlab's imread to im_data in caffe's data
% format: W x H x C with BGR channels
im_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR
im_data = permute(im_data, [2, 1, 3]); % flip width and height
im_data = single(im_data); % convert from uint8 to single
im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data
im_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)
% oversample (4 corners, center, and their x-axis flips)
crops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');
indices = [0 IMAGE_DIM-CROPPED_DIM] + 1;
n = 1;
for i = indices
for j = indices
crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);
crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);
n = n + 1;
end
end
center = floor(indices(2) / 2) + 1;
crops_data(:,:,:,5) = ...
im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);
crops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
yalmiptest.m
|
.m
|
LMI-Matlab-master/yalmip/yalmiptest.m
| 17,035 |
utf_8
|
4a8ad7d56c1153743ca991381cc2f3a6
|
function out = yalmiptest(prefered_solver,auto)
%YALMIPTEST Runs a number of test problems.
%
% YALMIPTEST is recommended when a new solver or a new version
% of YALMIP installed.
%
% EXAMPLES
% YALMIPTEST % Without argument, default solver used
% YALMIPTEST('solver tag') % Test with specified solver
% YALMIPTEST(options) % Test with specific options structure from
%
% See also SDPSETTINGS
if ~exist('sedumi2pen.m')
disp('Add /yalmip/extras etc to your path first...')
disp('Read the <a href="http://users.isy.liu.se/johanl/yalmip/pmwiki.php?n=Tutorials.Installation">Installation notes</a>.')
return
end
if ~exist('callsedumi.m')
disp('Still missing paths...Just do an addpath(genpath(''yalmiprootdirectory''));')
return
end
detected = which('yalmip.m','-all');
% Will not work in Octave as Octave only reports first item found?
if isa(detected,'cell')
if length(detected)>1
disp('You seem to have multiple installations of YALMIP in your path. Please correct this...');
detected
return
end
end
% Pagination really doesn't work well with solvers
more off
donttest = 0;
if (nargin==1) && isa(prefered_solver,'char') && strcmp(prefered_solver,'test')
donttest = 0;
prefered_solver = '';
else
donttest = 1;
end
if nargin==0
prefered_solver = '';
else
if ~(isa(prefered_solver,'struct') | isa(prefered_solver,'char'))
error('Argument should be a solver tag, or a sdpsettings structure');
end
if isa(prefered_solver,'char')
donttest = 1;
end
end
if ~(exist('callsedumi')==2)
disp('The directory yalmip/solvers is not in your path.')
disp('Put yalmip/, yalmip/solvers, yalmip/extras and yalmip/demos in your MATLAB path.');
return
end
foundstring = {'not found','found'};
teststring = {'-failed','+passed'};
if ~donttest
header = {'Solver','Version/module','Status','Unit test'};
else
header = {'Solver','Version/module','Status'};
end
[solvers,found] = getavailablesolvers(0);
solvers = solvers([find(found);find(~found)]);
found = [found(find(found));found(find(~found))];
j = 1;
for i = 1:length(solvers)
if solvers(i).show
data{j,1} = upper(solvers(i).tag);
data{j,2} = solvers(i).version;
if length(solvers(i).subversion)>0
data{j,2} = [data{j,2} ' ' solvers(i).subversion];
end
data{j,3} = foundstring{found(i)+1};
if ~donttest
if found(i)
if options.verbose
disp(['Testing ' solvers(i).tag '...']);
end
try
if solvers(i).maxdet
pass = lyapell(sdpsettings('solver',solvers(i).tag,'verbose',0));
else
if solvers(i).sdp
pass = stabtest(sdpsettings('solver',solvers(i).tag,'verbose',0));
else
pass = feasiblelp(sdpsettings('solver',solvers(i).tag,'verbose',0));
end
end
data{j,4} = teststring{pass+1};
catch
data{j,4} = '-failed';
end
else
data{j,4} = 'not tested';
end
end
j = j+1;
end
end
if isa(prefered_solver,'char')
ops = sdpsettings('Solver',prefered_solver);
else
ops = prefered_solver;
end
if ~((nargin==2) & (ops.verbose==0))
yalmiptable({'Searching for installed solvers'},header,data);
disp(' ')
end
if nargin<2
disp('Press any key to continue test')
pause
end
i=1;
test{i}.fcn = 'testsdpvar';
test{i}.desc = 'Core functionalities';
i = i+1;
test{i}.fcn = 'feasiblelp';
test{i}.desc = 'LP';
i = i+1;
test{i}.fcn = 'toepapprox';
test{i}.desc = 'LP';
i = i+1;
test{i}.fcn = 'feasibleqp';
test{i}.desc = 'QP';
i = i+1;
test{i}.fcn = 'toepapprox2';
test{i}.desc = 'QP';
i = i+1;
test{i}.fcn = 'socptest1';
test{i}.desc = 'SOCP';
i = i+1;
test{i}.fcn = 'socptest2';
test{i}.desc = 'SOCP';
i = i+1;
test{i}.fcn = 'socptest3';
test{i}.desc = 'SOCP';
i = i+1;
test{i}.fcn = 'complete';
test{i}.desc = 'SDP';
i = i+1;
test{i}.fcn = 'complete_2';
test{i}.desc = 'SDP';
i = i+1;
test{i}.fcn = 'maxcut';
test{i}.desc = 'SDP';
i = i+1;
test{i}.fcn = 'feasible';
test{i}.desc = 'SDP';
i = i+1;
test{i}.fcn = 'lyapell';
test{i}.desc = 'MAXDET';
i = i+1;
test{i}.fcn = 'lyapell2';
test{i}.desc = 'MAXDET';
i = i+1;
%test{i}.fcn = 'circuit1';
%test{i}.desc = 'GP';
%i = i+1;
test{i}.fcn = 'infeasible';
test{i}.desc = 'Infeasible LP';
i = i+1;
test{i}.fcn = 'infeasibleqp';
test{i}.desc = 'Infeasible QP';
i = i+1;
test{i}.fcn = 'infeasiblesdp';
test{i}.desc = 'Infeasible SDP';
i = i+1;
test{i}.fcn = 'momenttest';
test{i}.desc = 'Moment relaxation';
i = i+1;
test{i}.fcn = 'sostest';
test{i}.desc = 'Sum-of-squares';
i = i+1;
test{i}.fcn = 'bmitest';
test{i}.desc = 'Bilinear SDP';
i = i+1;
pass_strings = {'Error','Passed','Solver not available'};
tt = cputime;
% Run test-problems
for i = 1:length(test)
try
t=cputime;
if ops.verbose
disp(' ');
disp(['Testing function ' test{i}.fcn]);
disp(' ');
end
[pp,ss,res] = eval([test{i}.fcn '(ops)']);
pass(i) = pp;
sols{i} = ss.info;
results{i}=res;
ttime(i) = cputime-t;
catch
pass(i) = 0;
results{i} = 'NAN';
sols{i} = 'Unknown problem in YALMIP';
ttime(i) = cputime-tt;
end
end
totaltime = cputime-tt;
clear data;
header = {'Test','Solution', 'Solver message'};
for i = 1:length(pass)
thetime = num2str(ttime(i),4);
data{i,1} = test{i}.desc;
data{i,2} = results{i};
data{i,3} = sols{i};
end
if ops.verbose
disp(' ');
end
formats{1}.data.just = 'right';
formats{2}.data.just = 'right';
formats{3}.data.just = 'right';
formats{1}.header.just = 'right';
formats{2}.header.just = 'right';
formats{3}.header.just = 'right';
clc
yalmiptable([],header,data,formats)
% Test if any LMI solver is installed.
x = sdpvar(2);[p,aux1,aux2,m] = export(x>=0,[],[],[],[],0);
if ~isempty(m)
only_lmilab = strcmpi(m.solver.tag,'lmilab');
else
only_lmilab = 0;
end
x = binvar(1);[p,aux1,aux2,m] = export(x>=0,[],[],[],[],0);
if ~isempty(m)
only_bnb = strcmpi(m.solver.tag,'bnb');
else
only_bnb = 0;
end
if only_lmilab
disp('You do not have any efficient LMI solver installed (only found <a href=" http://users.isy.liu.se/johanl/yalmip/pmwiki.php?n=Solvers.LMILAB">LMILAB</a>).')
disp('If you intend to solve LMIs, please install a better solver.')
end
if only_bnb
disp('You do not have any efficient MILP solver installed (only found internal <a href=" http://users.isy.liu.se/johanl/yalmip/pmwiki.php?n=Solvers.BNB">BNB</a>).')
disp('If you intend to solve MILPs, please install a better solver.')
end
if only_lmilab || only_bnb
disp('See <a href=" http://users.isy.liu.se/johanl/yalmip/pmwiki.php?n=Solvers">Interfaced solvers in YALMIP</a>')
end
function [pass,sol,result] = testsdpvar(ops)
% Test the sdpvar implementation
pass = 1;
sol.info = yalmiperror(0,'YALMIP');
try
x = sdpvar(2,2);
x = sdpvar(2,2,'symmetric');
x = sdpvar(2,2,'full');
x = sdpvar(2,2,'toeplitz');
x = sdpvar(2,2,'hankel');
x = sdpvar(2,2,'skew');
if ~ishermitian(sdpvar(2,2,'hermitian','complex'))
error('bug')
end
if ~issymmetric(sdpvar(2,2,'symmetric','complex'))
error('bug')
end
if ~isreal(real(sdpvar(2,2,'symmetric','complex')))
error('bug')
end
if isreal(sqrt(-1)*real(sdpvar(2,2,'symmetric','complex')))
error('bug')
end
x = sdpvar(2,1,'','co');
if ~isreal(x'*x)
error('bug')
end
x = sdpvar(2,2,'','co');
if ~isreal(diag(x'*x))
error('bug')
end
x = sdpvar(1,1);
y = sdpvar(2,2);
x*eye(2);
eye(2)*x;
y*3;
3*y;
x = sdpvar(2,3);
y = sdpvar(2,3);
assign(x,randn(2,3));
z = replace(x,x(1,1:2),[8 9]);
z = x+y;
z = x-y;
z = x+1;
z = x-1;
z = x+ones(2,3);
z = x-ones(2,3);
z = ones(2,3)-x;
z = ones(2,3)-x;
z = eye(2)*x;
z = x*eye(3);
z = diag(x);
z = trace(x(1:2,1:2));
z = diff(x);
z = fliplr(x);
z = flipud(x);
z = kron(x,eye(3));
z = kron(eye(3),x);
z = rot90(x);
z = sum(x);
z = diff(x);
z = x';
z = x.';
z = tril(x);
z = triu(x);
z = [x y];
z = [x;y];
sdpvar x y
diag([x y])*[x^-1;y^-1];
assert(isequal([x x;x x]*x,[x x;x x].*x))
assert(isequal(trace([x x;x x]*[x y;y x]),x*x+x*y+y*x+x*x))
% Regression ??
yalmip('clear')
sdpvar x
(1+x+x^4)*(1-x^2);
% Regression complex multiplcation
A = randn(10,5)+sqrt(-1)*randn(10,5);
b = randn(10,1)+sqrt(-1)*randn(10,1);
x = sdpvar(5,1);
res = A*x-b;
assert(nnz(clean([res res]'*[res res]-res'*res,1e-8))==0)
assert(isreal(clean(res'*res,1e-8)))
assert(isreal(x*x'))
result = 'N/A';
catch
sol.info = 'Problems';
result = 'N/A';
pass = 0;
end
function [pass,sol,result] = feasible(ops)
t = sdpvar(1,1);
Y = sdpvar(2,2);
F = [Y<=t*eye(2), Y>=[1 0.2;0.2 1]];
sol = solvesdp(F,t,ops);
pass = ismember(sol.problem,[0 3 4 5]);
if pass
result = resultstring(t,1.2);
else
result = 'N/A';
end
function [pass,sol,result] = infeasible(ops)
t = sdpvar(1,1);
F = [t>=0, t<=-10];
sol = solvesdp(F,t,ops);
pass = ~(sol.problem==0);
result = 'N/A';
function [pass,sol,result] = lyapell(ops)
A = [1 0;0.4 1];
B = [0.4;0.08];
L = [1.9034 1.1501];
Y = sdpvar(2,2);
F = [Y Y*(A-B*L)';(A-B*L)*Y Y]>=0;
F = F+[L*Y*L'<=1];
sol = solvesdp(F,-logdet(Y),ops);
Y = double(Y);
pass = ismember(sol.problem,[0 3 4 5]);
if pass
result = resultstring(Y,[2.9957 -4.1514;-4.1514 6.2918]);
else
result = 'N/A';
end
%pass = pass & (sum(sum(abs(Y-[2.9957 -4.15;-4.15 6.29])))<0.01);
function [pass,sol,result] = lyapell2(ops)
A = [1 0;0.4 1];
B = [0.4;0.08];
L = [1.9034 1.1501];
Y = sdpvar(2,2);
F = [Y Y*(A-B*L)';(A-B*L)*Y Y]>=0;
F = F+[L*Y*L'<=1];
sol = solvesdp(F,-logdet(Y),ops);
Y = double(Y);
pass = ismember(sol.problem,[0 3 4 5]);
if pass
result = resultstring(Y,[2.9957 -4.1514;-4.1514 6.2918]);
else
result = 'N/A';
end
function [pass,sol,result] = complete(ops)
x = sdpvar(1,1);
y = sdpvar(1,1);
z = sdpvar(1,1);
X = [[x 1 2];[1 y 3];[2 3 100]];
F = [X>=0,x>=10,y>=0,z>=0, x<=1000, y<=1000,z<=1000];
sol = solvesdp(F,x+y+z,ops);
x = double(x);
y = double(y);
z = double(z);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring([x;y;z],[10;0.1787;0]);
else
result = 'N/A';
end
function [pass,sol,result] = complete_2(ops)
yalmip('clear')
x = sdpvar(1,1);
z = sdpvar(1,1);
X = [[x 2];[2 z]];
F = [X>=0, x>=0,z>=0,x<=10,z<=10];
sol = solvesdp(F,x+z,ops);
x = double(x);
z = double(z);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring([x;z],[2;2]);
else
result = 'N/A';
end
function [pass,sol,result] = maxcut(ops)
% Upper bound on maxcut of a n-cycle
n = 15;
Q = zeros(n);
for i = 1:n-1
Q(i,i+1) = 1;Q(i+1,i) = 1;
end
Q(n,1) = 1;Q(1,n) = 1;
Q = 0.25*(diag(Q*ones(n,1))-Q);
t = sdpvar(1,1);
tau = sdpvar(n,1);
F = t>=0;
M = [[-Q zeros(n,1)];[zeros(1,n) t]];
for i = 1:n
ei = zeros(n,1);ei(i,1) = 1;
M = M+tau(i)*[ei*ei' zeros(n,1);zeros(1,n) -1];
end
F = F+[M>=0];
sol = solvesdp(F,t,ops);
t = double(t);
tau = double(t);
pass = ismember(sol.problem,[0 3 4 5]);
if pass
result = resultstring(t,14.8361);
else
result = 'N/A';
end
function [pass,sol,result] = socptest1(ops)
yalmip('clear')
x = sdpvar(2,1);
a = [0;1];
b = [1;1];
F = norm(x-a)<=1;
F = F+[norm(x-b) <= 1];
sol = solvesdp(F,sum(x),ops);
pass = ismember(sol.problem,[0 3 4 5]);
x = double(x);
if pass
result = resultstring(sum(x),0.58578);
else
result = 'N/A';
end
function [pass,sol,result] = socptest2(ops)
z = sdpvar(3,1);
x = sdpvar(3,1);
y = sdpvar(3,1);
a = [0;1;0];
b = [1;1;0];
F = norm(x-a)<=1;
F = F+[norm(x-b)<=1];
F = F+[x(1)==0.35];
F = F+[z(2:3)==[5;6]];
sol = solvesdp(F,sum(x),ops);
pass = ismember(sol.problem,[0 3 4 5]);
x = double(x);
y = double(y);
z = double(z);
if pass
result = resultstring(sum(x),0.27592);
else
result = 'N/A';
end
function [pass,sol,result] = socptest3(ops)
z = sdpvar(2,1);
x = sdpvar(2,1);
y = sdpvar(3,1);
a = [0;1];
b = [1;1];
F = norm(x-a)<=1;
F = F+[norm(x-b)<=1];
F = F+[x(1)==0.35];
F = F+[z(1,end)>=5];
F = F+[z(2,end)<=100];
F = F+[z(2)==5];
sol = solvesdp(F,sum(x),ops);
pass = ismember(sol.problem,[0 3 4 5]);
x = double(x);
y = double(y);
z = double(z);
if pass
result = resultstring(sum(x),0.59);
else
result = 'N/A';
end
function [pass,sol,result] = feasiblelp(ops)
N = 5;
A = [2 -1;1 0];
B = [1;0];
C = [0.5 0.5];
[H,S] = create_CHS(A,B,C,N);
x = [2;0];
t = sdpvar(2*N,1);
U = sdpvar(N,1);
Y = H*x+S*U;
F = (U<=1)+(U>=-1);
F = F+(Y(N)>=-1);
F = F+(Y(N)<=1);
F = F+([Y;U]<=t)+([Y;U]>=-t);
sol = solvesdp(F,sum(t),ops);
pass = ismember(sol.problem,[0 3 4 5]);
if pass
result = resultstring(sum(t),12.66666);
else
result = 'N/A';
end
function [pass,sol,result] = feasibleqp(ops)
N = 5;
A = [2 -1;1 0];
B = [1;0];
C = [0.5 0.5];
[H,S] = create_CHS(A,B,C,N);
x = [2;0];
U = sdpvar(N,1);
Y = H*x+S*U;
F = (U<=1)+(U>=-1);
F = F+(Y(N)>=-1);
F = F+(Y(N)<=1);
sol = solvesdp(F,Y'*Y+U'*U,ops);
pass = ismember(sol.problem,[0 3 4 5]);
if pass
result = resultstring(Y'*Y+U'*U,26.35248);
else
result = 'N/A';
end
function [pass,sol,result] = infeasibleqp(ops)
N = 5;
A = [2 -1;1 0];
B = [1;0];
C = [0.5 0.5];
[H,S] = create_CHS(A,B,C,N);
x = [2;0];
U = sdpvar(N,1);
Y = H*x+S*U;
F = (U<=1)+(U>=-1);
F = F+(Y(N)>=-1);
F = F+(Y(N)<=1);
F = F + (U>=0);
sol = solvesdp(F,Y'*Y+U'*U,ops);
pass = ismember(sol.problem,[1]);
result = 'N/A';
function [pass,sol,result] = infeasiblesdp(ops)
A = magic(6);
A = A*A';
P = sdpvar(6,6);
sol = solvesdp((A'*P+P*A <= -P) + (P>=eye(6)),trace(P),ops);
pass = (sol.problem==1);
result = 'N/A';
function [pass,sol,result]=toepapprox(ops)
n = 5;
P = magic(n);
Z = sdpvar(n,n,'toeplitz');
t = sdpvar(n,n,'full');
F = (P-Z<=t)+(P-Z>=-t);
sol = solvesdp(F,sum(sum(t)),ops);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring(sum(sum(t)),156);
else
result = 'N/A';
end
function [pass,sol,result]=toepapprox2(ops)
n = 5;
P = magic(n);
Z = sdpvar(n,n,'toeplitz');
t = sdpvar(n,n,'full');
resid = P-Z;resid = resid(:);
sol = solvesdp([],resid'*resid,ops);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring(resid'*resid,1300);
else
result = 'N/A';
end
function [pass,sol,result]=momenttest(ops)
x1 = sdpvar(1,1);
x2 = sdpvar(1,1);
x3 = sdpvar(1,1);
objective = -2*x1+x2-x3;
F = (x1*(4*x1-4*x2+4*x3-20)+x2*(2*x2-2*x3+9)+x3*(2*x3-13)+24>=0);
F = F + (4-(x1+x2+x3)>=0);
F = F + (6-(3*x2+x3)>=0);
F = F + (x1>=0);
F = F + (2-x1>=0);
F = F + (x2>=0);
F = F + (x3>=0);
F = F + (3-x3>=0);
sol = solvemoment(F,objective,ops);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring(objective,-6);
else
result = 'N/A';
end
function [pass,sol,result]=sostest(ops)
yalmip('clear')
x = sdpvar(1,1);
y = sdpvar(1,1);
t = sdpvar(1,1);
F = (sos(1+x^7+x^8+y^4-t));
sol = solvesos(F,-t,ops);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring(t,0.9509);
else
result = 'N/A';
end
function [pass,sol,result]=bmitest(ops)
A = [-1 2;-3 -4];
P = sdpvar(2,2);
alpha = sdpvar(1,1);
F = (P>=eye(2))+(A'*P+P*A <= -2*alpha*P)+(alpha >= 0);
sol = solvesdp([F,P(:) <= 100],-alpha,ops);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring(alpha,2.5);
else
result = 'N/A';
end
function [pass,sol,result]=circuit1(ops)
x = sdpvar(7,1);
% Data
a = ones(7,1);
alpha = ones(7,1);
beta = ones(7,1);
gamma = ones(7,1);
f = [1 0.8 1 0.7 0.7 0.5 0.5]';
e = [1 2 1 1.5 1.5 1 2]';
Cout6 = 10;
Cout7 = 10;
% Model
C = alpha+beta.*x;
A = sum(a.*x);
P = sum(f.*e.*x);
R = gamma./x;
D1 = R(1)*(C(4));
D2 = R(2)*(C(4)+C(5));
D3 = R(3)*(C(5)+C(7));
D4 = R(4)*(C(6)+C(7));
D5 = R(5)*(C(7));
D6 = R(6)*Cout6;
D7 = R(7)*Cout7;
% Constraints
F = (x >= 1) + (P <= 20) + (A <= 100);
% Objective
D = max((D1+D4+D6),(D1+D4+D7),(D2+D4+D6),(D2+D4+D7),(D2+D5+D7),(D3+D5+D6),(D3+D7));
sol = solvesdp(F,D,ops);
pass = ismember(sol.problem,[0 3 4 5]);
result = 'N/A';
if pass
result = resultstring(D,7.8936);
else
result = 'N/A';
end
function result = resultstring(x,xopt)
if norm(double(x(:))-xopt(:))<=1e-3*(1+norm(xopt(:)))
result = 'Correct';
else
result = 'Incorrect';
end
function assert(a)
if ~a
error('Assertion failed!');
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
solvesdp.m
|
.m
|
LMI-Matlab-master/yalmip/solvesdp.m
| 15,551 |
utf_8
|
8aa9cfafe34ac3e4c7a88041a3fd9d2d
|
function diagnostic = solvesdp(varargin)
%SOLVESDP Obsolete command, please use OPTIMIZE
yalmiptime = clock; % Let us see how much time we spend
% *********************************
% CHECK INPUT
% *********************************
nargin = length(varargin);
% First check of objective for early transfer to multiple solves
if nargin>=2
if isa(varargin{2},'double')
varargin{2} = [];
elseif isa(varargin{2},'sdpvar') && numel(varargin{2})>1
% Several objectives
diagnostic = solvesdp_multiple(varargin{:});
return
end
end
if nargin<1
help solvesdp
return
else
F = varargin{1};
if isa(F,'constraint')
F = lmi(F);
end
if isa(F,'lmi')
F = flatten(F);
end
if isa(F,'sdpvar')
% We do allow sloppy coding of logic constraints, i.e writing a
% constraints as [a|b true(a)]
Fnew = [];
for i = 1:length(F)
if length(getvariables(F(i)))>1
Fnew = nan;
break
end
operator = yalmip('extstruct',getvariables(F(i)));
if isempty(operator)
Fnew = nan;
break
end
if length(operator)>1
Fnew = nan;
break
end
if ~strcmp(operator.fcn,'or')
Fnew = nan;
break
end
Fnew = Fnew + (true(F(i)));
end
if isnan(Fnew)
error('First argument (F) should be a constraint object.');
else
F = Fnew;
end
elseif isempty(F)
F = lmi([]);
elseif ~isa(F,'lmi')
error('First argument (F) should be a constraint object.');
end
end
if nargin>=2
h = varargin{2};
if isa(h,'double')
h = [];
end
if ~(isempty(h) | isa(h,'sdpvar') | isa(h,'logdet') | isa(h,'ncvar'))
if isa(h,'struct')
error('Second argument (the objective function h) should be an sdpvar or logdet object (or empty). It appears as if you sent an options structure in the second argument.');
else
error('Second argument (the objective function h) should be an sdpvar or logdet object (or empty).');
end
end
if isa(h,'logdet')
logdetStruct.P = getP(h);
logdetStruct.gain = getgain(h);
h = getcx(h);
if isempty(F)
F = ([]);
end
else
logdetStruct = [];
end
else
logdetStruct = [];
h = [];
end
if ~isempty(F)
if any(is(F,'sos'))
diagnostic = solvesos(varargin{:});
return
end
end
if isa(h,'sdpvar')
if is(h,'complex')
error('Complex valued objective does not make sense.');
end
end
if nargin>=3
options = varargin{3};
if ~(isempty(options) | isa(options,'struct'))
error('Third argument (options) should be an sdpsettings struct (or empty).');
end
if isempty(options)
options = sdpsettings;
end
else
options = sdpsettings;
end
options.solver = lower(options.solver);
% If user has logdet term, but no preference on solver, we try to hook up
% with SDPT3 if possible.
if ~isempty(logdetStruct)
if strcmp(options.solver,'')
% options.solver = 'sdpt3,*';
end
end
% Call chance solver?
if length(F) > 0
rand_declarations = is(F,'random');
if any(rand_declarations)
% diagnostic = solverandom(F(find(~rand_declarations)),h,options,recover(getvariables(sdpvar(F(find(unc_declarations))))));
return
end
end
% Call robust solver?
if length(F) > 0
unc_declarations = is(F,'uncertain');
if any(unc_declarations)
diagnostic = solverobust(F(find(~unc_declarations)),h,options,recover(getvariables(sdpvar(F(find(unc_declarations))))));
return
end
end
if isequal(options.solver,'mpt') | nargin>=4
solving_parametric = 1;
else
solving_parametric = 0;
end
% Just for safety
if isempty(F) & isempty(logdetStruct)
F = lmi;
end
if any(is(F,'sos'))
error('You have SOS constraints. Perhaps you meant to call SOLVESOS.');
end
% Super stupido
if length(F) == 0 & isempty(h) & isempty(logdetStruct)
diagnostic.yalmiptime = 0;
diagnostic.solvertime = 0;
diagnostic.info = 'No problems detected (YALMIP)';
diagnostic.problem = 0;
diagnostic.dimacs = [NaN NaN NaN NaN NaN NaN];
return
end
% Dualize the problem?
if ~isempty(F)
if options.dualize == -1
sdp = find(is(F,'sdp'));
if ~isempty(sdp)
if all(is(F(sdp),'sdpcone'))
options.dualize = 1;
end
end
end
end
if options.dualize == 1
[Fd,objd,aux1,aux2,aux3,complexInfo] = dualize(F,h,[],[],[],options);
options.dualize = 0;
diagnostic = solvesdp(Fd,-objd,options);
if ~isempty(complexInfo)
for i = 1:length(complexInfo.replaced)
n = size(complexInfo.replaced{i},1);
re = 2*double(complexInfo.new{i}(1:n,1:n));
im = 2*double(complexInfo.new{i}(1:n,n+1:end));
im=triu((im-im')/2)-(triu((im-im')/2))';
assign(complexInfo.replaced{i},re + sqrt(-1)*im);
end
end
return
end
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
% DID WE SELECT THE MOMENT SOLVER
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if isequal(options.solver,'moment')
if ~isempty(logdetStruct)
error('Cannot dualize problems with logaritmic objective')
end
options.solver = options.moment.solver;
[diagnostic,x,momentdata] = solvemoment(F,h,options,options.moment.order);
diagnostic.momentdata = momentdata;
diagnostic.xoptimal = x;
return
end
% ******************************************
% COMPILE IN GENERALIZED YALMIP FORMAT
% ******************************************
[interfacedata,recoverdata,solver,diagnostic,F,Fremoved,ForiginalQuadratics] = compileinterfacedata(F,[],logdetStruct,h,options,0,solving_parametric);
% ******************************************
% FAILURE?
% ******************************************
if ~isempty(diagnostic)
diagnostic.yalmiptime = etime(clock,yalmiptime);
return
end
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
% DID WE SELECT THE LMILAB SOLVER WITH A KYP
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if strcmpi(solver.tag,'lmilab') & any(is(F,'kyp'))
[diagnostic,failed] = calllmilabstructure(F,h,options);
if ~failed % Did this problem pass (otherwise solve using unstructured call)
diagnostic.yalmiptime = etime(clock,yalmiptime)-diagnostic.solvertime;
return
end
end
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
% DID WE SELECT THE KYPD SOLVER
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if strcmpi(solver.tag,'kypd')
diagnostic = callkypd(F,h,options);
diagnostic.yalmiptime = etime(clock,yalmiptime)-diagnostic.solvertime;
return
end
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
% DID WE SELECT THE STRUL SOLVER
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if strfind(solver.tag,'STRUL')
diagnostic = callstrul(F,h,options);
diagnostic.yalmiptime = etime(clock,yalmiptime)-diagnostic.solvertime;
return
end
%******************************************
% DID WE SELECT THE MPT solver (backwards comb)
%******************************************
actually_save_output = interfacedata.options.savesolveroutput;
if strcmpi(solver.tag,'mpt-2') | strcmpi(solver.tag,'mpt-3') | strcmpi(solver.tag,'mpcvx') | strcmpi(solver.tag,'mplcp')
interfacedata.options.savesolveroutput = 1;
if isempty(interfacedata.parametric_variables)
if (nargin < 4 | ~isa(varargin{4},'sdpvar'))
error('You must specify parametric variables.')
else
interfacedata.parametric_variables = [];
for i = 1:length(varargin{4})
interfacedata.parametric_variables = [interfacedata.parametric_variables;find(ismember(recoverdata.used_variables,getvariables(varargin{4}(i))))];
end
if isempty(varargin{5})
interfacedata.requested_variables = [];
else
interfacedata.requested_variables = [];
for i = 1:length(varargin{5})
interfacedata.requested_variables = [interfacedata.requested_variables;find(ismember(recoverdata.used_variables,getvariables(varargin{5}(i))))];
end
end
end
end
end
% *************************************************************************
% Just return the YALMIP model. Used when solving multiple objectives
% *************************************************************************
if isfield(options,'pureexport')
interfacedata.recoverdata = recoverdata;
diagnostic = interfacedata;
return
end
if strcmpi(solver.version,'geometric')
% Actual linear user variables
if options.assertgpnonnegativity
check = find(interfacedata.variabletype==0);
check = setdiff(check,interfacedata.aux_variables);
check = setdiff(check,interfacedata.evalVariables);
check = setdiff(check,interfacedata.extended_variables);
[lb,ub] = findulb(interfacedata.F_struc,interfacedata.K);
if ~all(lb(check)>=0)
% User appears to have explictly selected a GP solver
if ~isempty(strfind(options.solver,'geometric')) || ~isempty(strfind(options.solver,'mosek')) || ~isempty(strfind(options.solver,'gpposy'))
% There are missing non-negativity bounds
output = createOutputStructure(zeros(length(interfacedata.c),1)+NaN,[],[],18,yalmiperror(18,''),[],[],nan);
diagnostic.yalmiptime = etime(clock,yalmiptime);
diagnostic.solvertime = output.solvertime;
try
diagnostic.info = output.infostr;
catch
diagnostic.info = yalmiperror(output.problem,solver.tag);
end
diagnostic.problem = output.problem;
if options.dimacs
diagnostic.dimacs = dimacs;
end
return
else
% YALMIP selected solver and picked a GP solver. As this is
% no GP, we call again, but this time explicitly tell
% YALMIP that it isn't a GP
options.thisisnotagp = 1;
varargin{3} = options;
diagnostic = solvesdp(varargin{:});
return
end
end
end
end
% *************************************************************************
% TRY TO SOLVE PROBLEM
% *************************************************************************
if options.debug
eval(['output = ' solver.call '(interfacedata);']);
else
try
eval(['output = ' solver.call '(interfacedata);']);
catch
output = createOutputStructure(zeros(length(interfacedata.c),1)+NaN,[],[],9,yalmiperror(9,lasterr),[],[],nan);
end
end
if options.dimacs
try
b = -interfacedata.c;
c = interfacedata.F_struc(:,1);
A = -interfacedata.F_struc(:,2:end)';
x = output.Dual;
y = output.Primal;
% FIX this nonlinear crap (return variable type in
% compileinterfacedata)
if options.relax == 0 & any(full(sum(interfacedata.monomtable,2)~=0))
if ~isempty(find(sum(interfacedata.monomtable | interfacedata.monomtable,2)>1))
z=real(exp(interfacedata.monomtable*log(y+eps)));
y = z;
end
end
if isfield(output,'Slack')
s = output.Slack;
else
s = [];
end
dimacs = computedimacs(b,c,A,x,y,s,interfacedata.K);
catch
dimacs = [nan nan nan nan nan nan];
end
else
dimacs = [nan nan nan nan nan nan];
end
% ********************************
% ORIGINAL COORDINATES
% ********************************
output.Primal = recoverdata.x_equ+recoverdata.H*output.Primal;
% ********************************
% OUTPUT
% ********************************
diagnostic.yalmiptime = etime(clock,yalmiptime)-output.solvertime;
diagnostic.solvertime = output.solvertime;
try
diagnostic.info = output.infostr;
catch
diagnostic.info = yalmiperror(output.problem,solver.tag);
end
diagnostic.problem = output.problem;
if options.dimacs
diagnostic.dimacs = dimacs;
end
% Some more info is saved internally
solution_internal = diagnostic;
solution_internal.variables = recoverdata.used_variables(:);
solution_internal.optvar = output.Primal;
if ~isempty(interfacedata.parametric_variables)
diagnostic.mpsol = output.solveroutput;
options.savesolveroutput = actually_save_output;
end;
if interfacedata.options.savesolveroutput
diagnostic.solveroutput = output.solveroutput;
end
if interfacedata.options.savesolverinput
diagnostic.solverinput = output.solverinput;
end
if interfacedata.options.saveyalmipmodel
diagnostic.yalmipmodel = interfacedata;
end
if options.warning & warningon & isempty(findstr(diagnostic.info,'No problems detected'))
disp(['Warning: ' output.infostr]);
end
if ismember(output.problem,options.beeponproblem)
try
beep; % does not exist on all ML versions
catch
end
end
% And we are done! Save the result
if ~isempty(output.Primal)
if size(output.Primal,2)>1
for j = 1:size(output.Primal,2)
temp = solution_internal;
temp.optvar = temp.optvar(:,j);
yalmip('setsolution',temp,j);
end
else
yalmip('setsolution',solution_internal);
end
end
if interfacedata.options.saveduals & solver.dual
if isempty(interfacedata.Fremoved) | (nnz(interfacedata.Q)>0)
try
setduals(F,output.Dual,interfacedata.K);
catch
end
else
try
% Duals related to equality constraints/free variables
% have to be recovered b-A*x-Ht == 0
b = -interfacedata.oldc;
A = -interfacedata.oldF_struc(1+interfacedata.oldK.f:end,2:end)';
H = -interfacedata.oldF_struc(1:interfacedata.oldK.f,2:end)';
x = output.Dual;
b_equ = b-A*x;
newdual = H\b_equ;
setduals(interfacedata.Fremoved + F,[newdual;output.Dual],interfacedata.oldK);
catch
% this is a new feature...
disp('Dual recovery failed. Please report this issue.');
end
end
end
% Hack to recover original QCQP duals from gurobi
if strcmp(solver.tag,'GUROBI-GUROBI')
if length(ForiginalQuadratics) > 0
if isfield(output,'qcDual')
if length(output.qcDual) == length(ForiginalQuadratics)
% Ktemp.l = length(output.qcDual);
% Ktemp.f = 0;
% Ktemp.q = 0;
% Ktemp.s = 0;
% Ktemp.r = 0;
Ftemp = F + ForiginalQuadratics;
K = interfacedata.K;
Ktemp = K;
Ktemp.l = Ktemp.l + length(ForiginalQuadratics);
tempdual = output.Dual;
tempdual = [tempdual(1:K.f + K.l);-output.qcDual;tempdual(1+K.f+K.l:end)];
setduals(Ftemp,tempdual,Ktemp);
% setduals(ForiginalQuadratics,-output.qcDual,Ktemp);
end
end
end
end
function yesno = warningon
s = warning;
yesno = isequal(s,'on');
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
deriveBasis.m
|
.m
|
LMI-Matlab-master/yalmip/modules/sos/deriveBasis.m
| 323 |
utf_8
|
0401f866c43215ad1c43d68dd8499dd3
|
function H = deriveBasis(A_equ)
[L,U,P] = lu(A_equ);
[L,U,P] = lu(A_equ');
r = colspaces(L');
AA = L';
H1 = AA(:,r);
H2 = AA(:,setdiff(1:size(AA,2),r));
H = P'*[-H1\H2;speye(size(H2,2))];
function [indx]=colspaces(A)
indx = [];
for i = 1:size(A,2)
s = max(find(A(:,i)));
indx = [indx s];
end
indx = unique(indx);
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
postprocesssos.m
|
.m
|
LMI-Matlab-master/yalmip/modules/sos/postprocesssos.m
| 3,702 |
utf_8
|
6716cb77d4d92dcbb793f478d7c47993
|
function [BlockedQ,residuals] = postprocesssos(BlockedA,Blockedb,BlockedQ,sparsityPattern,options);
BlockedQ=applysparsity(BlockedQ,sparsityPattern);
for passes = 1:1:options.sos.postprocess
for constraint = 1:length(BlockedQ)
mismatch = computeresiduals(BlockedA,Blockedb,BlockedQ,constraint);
[ii,jj ]= sort(abs(mismatch));
jj = flipud(jj);
for j = jj(:)'%1:size(BlockedA{constraint}{1},1)
if abs(mismatch(j))>0
for i = 1:length(BlockedA{constraint})
n=sqrt(size(BlockedA{constraint}{i},2));
Ai=reshape(BlockedA{constraint}{i}(j,:),n,n);
nnzAi = nnz(Ai);
if nnzAi>0
dAi = Ai*mismatch(j)/nnzAi;
Qi = BlockedQ{constraint}{i};
% [R,p] = chol(BlockedQ{constraint}{i}-Ai*mismatch(j)/nnzAi);
[R,p] = chol(Qi-dAi);
if p
% gevps=eig(BlockedQ{constraint}{i},full(Ai)*mismatch(j)/nnzAi);
gevps=eig(Qi,full(dAi));
gevps=gevps(gevps>=0);
gevps=gevps(~isinf(gevps));
if isempty(gevps)
gevps=1;
end
lambda=max(0,min(1,min(gevps)));
% [R,p] = chol(BlockedQ{constraint}{i}-Ai*lambda*mismatch(j)/nnzAi);
[R,p] = chol(Qi-dAi*lambda);
else
lambda = 1;
end
% dAi = Ai*mismatch(j)/nnzAi;
if ~p
% BlockedQ{constraint}{i}=BlockedQ{constraint}{i}-Ai*lambda*mismatch(j)/nnzAi;
BlockedQ{constraint}{i}=Qi-dAi*lambda;
mismatch(j)=mismatch(j)*(1-lambda);
else
%lambda=1;
while lambda>1e-4 & (p~=0)
lambda=lambda/sqrt(2);
% [R,p]= chol(BlockedQ{constraint}{i}-Ai*lambda*mismatch(j)/nnzAi);
[R,p]= chol(Qi-dAi*lambda);
end
% if min(eig(BlockedQ{constraint}{i}-Ai*lambda*mismatch(j)/nnzAi))>=0
if min(eig(Qi-dAi*lambda))>=0
% BlockedQ{constraint}{i}=BlockedQ{constraint}{i}-Ai*lambda*mismatch(j)/nnzAi;
BlockedQ{constraint}{i}=Qi-dAi*lambda;
mismatch(j)=mismatch(j)*(1-lambda);
end
end
end
end
end
end
end
end
for constraint = 1:length(BlockedQ)
residuals(constraint,1) = norm(computeresiduals(BlockedA,Blockedb,BlockedQ,constraint),'inf');
end
function mismatch = computeresiduals(BlockedA,Blockedb,BlockedQ,constraint);
lhs=0;
for k=1:length(BlockedA{constraint})
lhs=lhs+BlockedA{constraint}{k}*BlockedQ{constraint}{k}(:);
end
mismatch = lhs-Blockedb{constraint};
function BlockedQ=applysparsity(BlockedQ,sparsityPattern);
if ~isempty(sparsityPattern)
for i = 1:length(BlockedQ)
for j = 1:length(BlockedQ{i})
BlockedQ{i}{j}(sparsityPattern{i}{j}) = 0;
end
end
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
generate_kernel_representation_data.m
|
.m
|
LMI-Matlab-master/yalmip/modules/sos/generate_kernel_representation_data.m
| 9,746 |
utf_8
|
408c65ea2806342ae7e7998a85c98ba7
|
function [A,b] = generate_kernel_representation_data(N,N_unique,exponent_m2,exponent_p,p,options,p_base_parametric,ParametricIndicies,MonomIndicies,FirstRun)
persistent saveData
exponent_p_parametric = exponent_p(:,ParametricIndicies);
exponent_p_monoms = exponent_p(:,MonomIndicies);
pcoeffs = getbase(p);
if any(exponent_p_monoms(1,:))
pcoeffs=pcoeffs(:,2:end); % No constant term in p
end
b = [];
parametric = full((~isempty(ParametricIndicies) & any(any(exponent_p_parametric))));
% For problems with a lot of similar cones, this saves some time
reuse = 0;
if ~isempty(saveData) && isequal(saveData.N,N) & ~FirstRun
n = saveData.n;
ind = saveData.ind;
if isequal(saveData.N_unique,N_unique) & isequal(saveData.exponent_m2,exponent_m2)% & isequal(saveData.epm,exponent_p_monoms)
reuse = 1;
end
else
% Congruence partition sizes
for k = 1:size(N,1)
n(k) = size(N{k},1);
end
% Save old SOS definition
saveData.N = N;
saveData.n = n;
saveData.N_unique = N_unique;
saveData.exponent_m2 = exponent_m2;
saveData.N_unique = N_unique;
end
if reuse & options.sos.reuse
% Get old stuff
if size(exponent_m2{1},2)==2 % Stupid (sos(parametric)) case
ind = spalloc(1,1,0);
ind(1)=1;
allj = 1:size(exponent_p_monoms,1);
used_in_p = ones(size(exponent_p_monoms,1),1);
else
allj = [];
used_in_p = zeros(size(exponent_p_monoms,1),1);
hash = randn(size(exponent_p_monoms,2),1);
p_hash = exponent_p_monoms*hash;
exponent_p_monoms_hash = exponent_p_monoms*hash;
for i = 1:size(N_unique,1)
monom = sparse(N_unique(i,3:end));
j = find(exponent_p_monoms_hash == (monom*hash));
if isempty(j)
b = [b 0];
allj(end+1,1) = 0;
else
used_in_p(j) = 1;
allj(end+1,1:length(j)) = j(:)';
end
end
ind = saveData.ind;
end
else
allj = [];
used_in_p = zeros(size(exponent_p_monoms,1),1);
if size(exponent_m2{1},2)==2 % Stupid (sos(parametric)) case
ind = spalloc(1,1,0);
ind(1)=1;
allj = 1:size(exponent_p_monoms,1);
used_in_p = ones(size(exponent_p_monoms,1),1);
else
% To speed up some searching, we random-hash data
hash = randn(size(exponent_p_monoms,2),1);
for k = 1:length(exponent_m2)
if isempty(exponent_m2{k})
exp_hash{k}=[];
else
exp_hash{k} = sparse((exponent_m2{k}(:,3:end)))*hash; % SPARSE NEEDED DUE TO STRANGE NUMERICS IN MATLAB ON 0s (the stuff will differ on last bit in hex format)
end
end
p_hash = exponent_p_monoms*hash;
ind = spalloc(size(N_unique,1),sum(n.^2),0);
for i = 1:size(N_unique,1)
monom = N_unique(i,3:end);
monom_hash = sparse(monom)*hash;
LHS = 0;
start = 0;
for k = 1:size(N,1)
j = find(exp_hash{k} == monom_hash);
if ~isempty(j)
pos=exponent_m2{k}(j,1:2);
nss = pos(:,1);
mss = pos(:,2);
indicies = nss+(mss-1)*n(k);
ind(i,indicies+start) = ind(i,indicies+start) + 1;
end
start = start + (n(k))^2;
% start = start + (matrixSOSsize*n(k))^2;
end
j = find(p_hash == monom_hash);
if isempty(j)
allj(end+1,1) = 0;
else
used_in_p(j) = 1;
allj(end+1,1:length(j)) = j(:)';
end
end
end
end
saveData.ind = ind;
% Some parametric terms in p(x,t) do not appear in v'Qv
% So these have to be added 0*Q = b
not_dealt_with = find(used_in_p==0);
while ~isempty(not_dealt_with)
%j = findrows(exponent_p_monoms,exponent_p_monoms(not_dealt_with(1),:));
j = find(p_hash == p_hash(not_dealt_with(1)));
allj(end+1,1:length(j)) = j(:)';
used_in_p(j) = 1;
not_dealt_with = find(used_in_p==0);
ind(end+1,1)=0;
end
matrixSOSsize = length(p);
if parametric
% Inconsistent behaviour in MATLAB
if size(allj,1)==1
uu = [0;p_base_parametric];
b = sum(uu(allj+1))';
else
b = [];
for i = 1:matrixSOSsize
for j = i:matrixSOSsize
if i~=j
uu = [0;2*p_base_parametric(:,(i-1)*matrixSOSsize+j)];
else
uu = [0;p_base_parametric(:,(i-1)*matrixSOSsize+j)];
end
b = [b sum(uu(allj+1),2)'];
end
end
end
else
if matrixSOSsize == 1
uu = [zeros(size(pcoeffs,1),1) pcoeffs]';
b = sum(uu(allj+1,:),2)';
else
b = [];
for i = 1:matrixSOSsize
for j = i:matrixSOSsize
if i~=j
uu = [0;2*pcoeffs((i-1)*matrixSOSsize+j,:)'];
else
uu = [0;pcoeffs((i-1)*matrixSOSsize+j,:)'];
end
b = [b;sum(uu(allj+1,:),2)'];
end
end
end
% uu = [0;pcoeffs(:)];
% b = sum(uu(allj+1),2)';
end
b = b';
dualbase = ind;
j = 1;
A = cell(size(N,1),1);
for k = 1:size(N,1)
if matrixSOSsize==1
A{k} = dualbase(:,j:j+n(k)^2-1);
else
% Quick fix for matrix SOS case, should be optimized
A{k} = inflate(dualbase(:,j:j+n(k)^2-1),matrixSOSsize,n(k));
end
j = j + n(k)^2;
end
b = b(:);
function newAi = inflate(Ai,matrixSOSsize,n);
% Quick fix for matrix SOS case, should be optimized
newAi = [];
newAi = [];
newAj = [];
newAk = [];
top = 1;
for i = 1:matrixSOSsize
for r = i:matrixSOSsize
for m = 1:size(Ai,1)
ai = reshape(Ai(m,:),n,n);
if 1
% V = spalloc(matrixSOSsize,matrixSOSsize,2);
% V(i,r)=1;
% V(r,i)=1;
% aii = kron(V,ai);
% aii = aii(:);
% [ii,jj,kk] = find(aii);
% newAj = [newAj ii(:)'];
% newAi = [newAi repmat(top,1,length(ii))];
% newAk = [newAk kk(:)'];
[dnewAj,dnewAi,dnewAk] = inflatelocal(ai,matrixSOSsize,r,i,top);
newAj = [newAj dnewAj];
newAi = [newAi dnewAi];
newAk = [newAk dnewAk];
% newAi = [newAi;ai(:)'];
else
[dnewAjC,dnewAiC,dnewAkC] = inflatelocal(ai,matrixSOSsize,r,i,top);
[dnewAj,dnewAi,dnewAk] = inflatelocalnew(ai,matrixSOSsize,r,i,top,n);
AA=reshape(full(sparse(dnewAi*0+1,dnewAj,dnewAk,1,(matrixSOSsize*n)^2)),n*matrixSOSsize,[]);
AA2=reshape(full(sparse(dnewAiC*0+1,dnewAjC,dnewAkC,1,(matrixSOSsize*n)^2)),n*matrixSOSsize,[]);
if norm(AA-AA2)>0
1
end
newAj = [newAj dnewAj];
newAi = [newAi dnewAi];
newAk = [newAk dnewAk];
%
% [ii,jj,kk] = find(ai-diag(diag(ai)));
% iii = [(i-1)*n+ii;(r-1)*n+jj]
% jjj = [(r-1)*n+jj;(i-1)*n+ii]
% kkk = [kk;kk];
% indexi = repmat(top,1,length(iii));
% index = iii+(jjj-1)*matrixSOSsize*n
% newAj = [newAj index(:)'];
% newAi = [newAi indexi];
% newAk = [newAk kkk(:)'];
%
% [ii,jj,kk] = find(diag(diag(ai)));
% iii = [(i-1)*n+ii]
% jjj = [(r-1)*n+jj]
% kkk = [kk];
% indexi = repmat(top,1,length(iii));
% index = iii+(jjj-1)*matrixSOSsize*n
% newAj = [newAj index(:)'];
% newAi = [newAi indexi];
% newAk = [newAk kkk(:)'];
end
%sparse(indexi,index,kkk,1,(n*nA)^2)
top = top+1;
end
end
end
newAi = sparse(newAi,newAj,newAk,top-1,(matrixSOSsize*n)^2);
function [Z,Q1,R] = sparsenull(A)
[Q,R] = qr(A');
n = max(find(sum(abs(R),2)));
Q1 = Q(:,1:n);
R = R(1:n,:);
Z = Q(:,n+1:end); % New basis
function [dnewAj,dnewAi,dnewAk] = inflatelocal(ai,matrixSOSsize,r,i,top)
V = spalloc(matrixSOSsize,matrixSOSsize,2);
V(i,r)=1;
V(r,i)=1;
aii = kron(V,ai);
aii = aii(:);
[ii,jj,kk] = find(aii);
dnewAj = ii(:)';
dnewAi = repmat(top,1,length(ii));
dnewAk = kk(:)';
% newAi = [newAi;ai(:)'];
function [dnewAj,dnewAi,dnewAk] = inflatelocalnew(ai,matrixSOSsize,r,i,top,n)
if r==i
[ii,jj,kk] = find(ai-diag(diag(ai)));
else
[ii,jj,kk] = find(ai);
end
iii = [(i-1)*n+ii;(r-1)*n+jj];
jjj = [(r-1)*n+jj;(i-1)*n+ii];
kkk = [kk;kk];
indexi = repmat(top,1,length(iii));
index = iii+(jjj-1)*matrixSOSsize*n;
dnewAj = [ index(:)'];
dnewAi = [ indexi];
dnewAk = [ kkk(:)'];
[ii,jj,kk] = find(diag(diag(ai)));
iii = [(i-1)*n+ii];
jjj = [(r-1)*n+jj];
kkk = [kk];
indexi = repmat(top,1,length(iii));
index = iii+(jjj-1)*matrixSOSsize*n;
dnewAj = [dnewAj index(:)'];
dnewAi = [dnewAi indexi];
dnewAk = [dnewAk kkk(:)'];
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
solvebilevel.m
|
.m
|
LMI-Matlab-master/yalmip/modules/bilevel/solvebilevel.m
| 28,584 |
utf_8
|
ca5553a39360503959e1ef8141c91ee0
|
function [sol,info] = solvebilevel(OuterConstraints,OuterObjective,InnerConstraints,InnerObjective,InnerVariables,options)
%SOLVEBILEVEL Simple global bilevel solver
%
% min CO(x,y)
% subject to OO(x,y)>0
% y = arg min OI(x,y)
% subject to CI(x,y)>0
%
% [DIAGNOSTIC,INFO] = SOLVEBILEVEL(CO, OO, CI, OI, y, options)
%
% diagnostic : Struct with standard YALMIP diagnostics
% info : Bilevel solver specific information
%
% Input
% CI : Outer constraints (linear elementwise)
% OI : Outer objective (convex quadratic)
% CI : Inner constraints (linear elementwise)
% OI : Inner objective (convex quadratic)
% y : Inner variables
% options : solver options from SDPSETTINGS.
%
% The behaviour of the bilevel solver can be controlled
% using the field 'bilevel' in SDPSETTINGS
%
% bilevel.outersolver : Solver for outer problems with inner KKT removed
% bilevel.innersolver : Solver for inner problem
% bilevel.rootcut : Number of cuts (based on complementary
% constraints) added in root (experimental)
% bilevel.relgaptol : Termination tolerance
% bilevel.compslacktol: Tolerance for accepting complementary slackness
% bilevel.feastol : Tolerance for feasibility in outer problem
%
%
% See also SDPVAR, SDPSETTINGS, SOLVESDP
% min f(x,y) s.t g(x,y)<0, y = argmin [x;y]'*H*[x;y]+e'[x;y]+f, E[x;y]<d
if nargin<6
options = sdpsettings;
elseif isempty(options)
options = sdpsettings;
end
y = InnerVariables;
if ~isempty(InnerConstraints)
if any(is(InnerConstraints,'sos2'))
error('SOS2 structures not allowed in inner problem');
end
end
% User wants to use fmincon, cplex or something like
if strcmp(options.bilevel.algorithm,'external')
% Derive KKT conditions of inner problem, append with outer, and solve
% using standard solver
z = [depends(OuterConstraints) depends(OuterObjective) depends(InnerObjective) depends(InnerConstraints)];
z = setdiff(z,depends(y));
z = recover(unique(z));
[K,details] = kkt(InnerConstraints,InnerObjective,z,options);
Constraints = [K,OuterConstraints];
sol = solvesdp(Constraints,OuterObjective,options);
info = [];
return
end
% Export the inner model, and select solver
options.solver = options.bilevel.innersolver;
if isa(InnerObjective, 'double') || is(InnerObjective,'linear')
[Imodel,Iax1,Iax2,inner_p] = export(InnerConstraints,InnerObjective,options,[],[],0);
elseif is(InnerObjective,'quadratic')
% We have to be a bit careful about cases such as x'y. This is convex in
% the inner problem, since x is constant there.
% [Q,c,f,dummy,nonquadratic] = vecquaddecomp(InnerObjective);
% Extract model for a fake quadratic model
% [InnerConstraints,failure] = expandmodel(InnerConstraints,InnerObjective)
%[Imodel,Iax1,Iax2,inner_p] = export(InnerConstraints,dummy'*diag(1+diag(Q{1}))*dummy+c{1}'*dummy,options,[],[],0);
% toptions = options;
% toptions.expandbilinear = 1;
yy = recover(setdiff(depends(y),setdiff(depends(InnerObjective),depends(y))));
[Imodel,Iax1,Iax2,inner_p] = export(InnerConstraints,yy'*yy+sum(recover(depends(InnerObjective))),options,[],[],0);
[Q,c,f,dummy,nonquadratic] = vecquaddecomp(InnerObjective,recover(inner_p.used_variables));
%[Imodel,Iax1,Iax2,inner_p] = export(InnerConstraints,InnerObjective,options,[],[],0);
% Now plug in the real quadratic function
if ~isequal(getvariables(dummy),inner_p.used_variables)
error('This quadratic form is not supported yet. Please make feature request')
else
inner_p.Q = Q{1};
inner_p.c = c{1};
inner_p.f = f{1};
end
else
error('Only LPs or convex QPs allowed as inner problem');
end
% Modeling of inner problem might have lead to more decision variables in
% the inner problem. Append these
v1 = getvariables(y);
v2 = inner_p.used_variables(inner_p.extended_variables);
v3 = inner_p.used_variables(inner_p.aux_variables(:));
y = recover(unique([v1(:);v2(:);v3(:)]));
% Export the outer model, and select solver
options.solver = options.bilevel.outersolver;
[Omodel,Oax1,Oax2,outer_p] = export(OuterConstraints,OuterObjective,options,[],[],0);
if isstruct(Oax2)
sol = Oax2;
info = 2;
return
end
% Export a joint model with KKT removed, to simplify some setup later
% [Auxmodel,Auxax1,Auxax2,outerinner_p] = export([OuterConstraints,InnerConstraints],OuterObjective+pi*InnerObjective,options,[],[],0);
if ~all(inner_p.variabletype==0) | ~isequal(inner_p.K.s,0) | ~isequal(inner_p.K.q,0)
error('Only LPs or convex QPs allowed as inner problem');
end
if options.bilevel.rootcuts & (~(isequal(outer_p.K.s,0) & isequal(outer_p.K.q,0)))
disp('Disjunctive cuts currently only supported when inner is a QP')
options.bilevel.rootcuts = 0;
end
FRP0 = inner_p;
[merged_mt,merged_vt] = mergemonoms(inner_p,outer_p);
if ~isequal(inner_p.used_variables,outer_p.used_variables)
invar = inner_p.used_variables;
outvar = outer_p.used_variables;
binary_variables = unique([inner_p.used_variables(inner_p.binary_variables) outer_p.used_variables(outer_p.binary_variables)]);
integer_variables = unique([inner_p.used_variables(inner_p.integer_variables) outer_p.used_variables(outer_p.integer_variables)]);
semi_variables = unique([inner_p.used_variables(inner_p.semicont_variables) outer_p.used_variables(outer_p.semicont_variables)]);
all_variables = unique([inner_p.used_variables outer_p.used_variables]);
if ~isequal(all_variables,inner_p.used_variables )
inner_p = pad(inner_p,all_variables);
FRP0 = inner_p;
FRP0.monomtable = speye(length(inner_p.c));
end
if ~isequal(all_variables,outer_p.used_variables )
outer_p = pad(outer_p,all_variables);
end
else
binary_variables = unique([inner_p.used_variables(inner_p.binary_variables) outer_p.used_variables(outer_p.binary_variables)]);
integer_variables = unique([inner_p.used_variables(inner_p.integer_variables) outer_p.used_variables(outer_p.integer_variables)]);
semi_variables = unique([inner_p.used_variables(inner_p.semicont_variables) outer_p.used_variables(outer_p.semicont_variables)]);
all_variables = inner_p.used_variables;
end
outer_p.monomtable = merged_mt;
outer_p.variabletype = merged_vt;
inner_p.variabletype = merged_vt;
inner_p.monomtable = merged_vt;
% Index to inner variables
for i = 1:length(y)
y_var(i) = find(all_variables == getvariables(y(i)));
end
% Index to outer variables
x_var = setdiff(1:length(all_variables),y_var);
% Index to binary variables
bin_var = [];
for i = 1:length(binary_variables)
bin_var(i) = find(all_variables == binary_variables(i));
end
int_var = [];
for i = 1:length(integer_variables)
int_var(i) = find(all_variables == integer_variables(i));
end
semi_var = [];
for i = 1:length(semi_variables)
semi_var(i) = find(all_variables == semi_variables(i));
end
if ~isempty(intersect(y_var,bin_var))
error('Only LPs or convex QPs allowed as inner problem (inner variables can not be binary)');
end
if ~isempty(intersect(y_var,int_var))
error('Only LPs or convex QPs allowed as inner problem (inner variables can not be integer)');
end
if ~isempty(intersect(y_var,semi_var))
error('Only LPs or convex QPs allowed as inner problem (inner variables can not be semi-continuous)');
end
inner_p.binary_variables = bin_var;
outer_p.binary_variables = bin_var;
inner_p.integer_variables = int_var;
outer_p.integer_variables = int_var;
inner_p.semicont_variables = semi_var;
outer_p.semicont_variables = semi_var;
% Number of inequalities in inner model = #bounded dual variables
ninequalities = inner_p.K.l;
nequalities = inner_p.K.f;
% Add dual related to inequalities in inner model to the model
dual_var = length(all_variables)+1:length(all_variables)+ninequalities;
%dual_var = length(inner_p.c)+1:length(inner_p.c)+ninequalities;
% Add dual related to inequalities in inner model to the model
eqdual_var = dual_var(end)+1:dual_var(end)+inner_p.K.f;
% No cost of duals in outer objective
p = outer_p;
if ~isempty(dual_var)
p.c(dual_var(end))=0;
p.Q(dual_var(end),dual_var(end)) = sparse(0);
end
if ~isempty(eqdual_var)
p.c(eqdual_var(end))=0;
p.Q(eqdual_var(end),eqdual_var(end)) = sparse(0);
end
% Structure of the constraints
%
% Stationary equalities
% Outer equalities
% Inner equalities
% Inner LP inequalities
% Duals positive
% Outer inequalities (LP, SOCP, SDP)
% Add stationarity to outer model
stationary = [inner_p.c(y_var) 2*inner_p.Q(y_var,:)];
if length(dual_var)>0
stationary = [stationary -inner_p.F_struc(1+inner_p.K.f:inner_p.K.f+inner_p.K.l,1+y_var)'];
end
if length(eqdual_var)>0
stationary = [stationary -inner_p.F_struc(1:inner_p.K.f,1+y_var)'];
end
p.F_struc = [stationary;p.F_struc spalloc(size(p.F_struc,1),length(dual_var) + length(eqdual_var),0)];
p.K.f = p.K.f + length(y_var);
% Add dual>0 to outer model
p.F_struc = [p.F_struc(1:p.K.f,:);spalloc(ninequalities,length(x_var)+length(y_var)+1,0) speye(ninequalities) spalloc(ninequalities,nequalities,0);p.F_struc(1+p.K.f:end,:)];
p.K.l = p.K.l + ninequalities;
% Add inner level constraints to outer model
p.F_struc = [p.F_struc(1:p.K.f,:);inner_p.F_struc spalloc(ninequalities+nequalities,ninequalities+nequalities,0);p.F_struc(1+p.K.f:end,:)];
p.K.f = p.K.f + inner_p.K.f;
p.K.l = p.K.l + inner_p.K.l;
slack_index = p.K.f+1:+p.K.f+ninequalities;
%p.lb = outerinner_p.lb;
%p.ub = outerinner_p.ub;
p.lb(dual_var) = 0;
p.ub(dual_var) = inf;
p.lb(eqdual_var) = -inf;
p.ub(eqdual_var) = inf;
p.x0 = [];
%p.variabletype = outerinner_p.variabletype;
%p.monomtable = outerinner_p.monomtable;
%p.evalMap = outerinner_p.evalMap;
%p.evalVariables = outerinner_p.evalVariables;
for i = 1:length(dual_var)
p.monomtable(dual_var(i),dual_var(i))=1;
p.variabletype(dual_var(i)) = 0;
end
for i = 1:length(eqdual_var)
p.monomtable(eqdual_var(i),eqdual_var(i))=1;
p.variabletype(eqdual_var(i)) = 0;
end
% xy = sdpvar(length(x_var)+length(y_var),1);
% z = sdpvar(length(dual_var),1);
% res = p.F_struc*[1;xy;z]
%
% F_bilevel = [res(1:p.K.f) == 0,res(p.K.f+1:end)>0]
%
% Enable outer problem to be nonconvex etc
p = build_recursive_scheme(p);
% Turned off, generates crash. Unit test in test_bilevel_1
% p = compress_evaluation_scheme(p);
p.lower = -inf;
p.options.verbose = max([0 options.verbose-1]);
p.level = 0;
p.as_free = true(ninequalities,1);
list{1} = p;
lower = -inf;
upper = inf;
iter = 0;
tol = 1e-8;
ndomcuts = 0;
ninfeascuts = 0;
% Extract the inequalities in the inner problem. These are really the
% interesting ones
inner_p.F_struc = [inner_p.F_struc(1+inner_p.K.f:end,:) spalloc(inner_p.K.l,ninequalities+nequalities,0)];
if options.verbose
disp('* Starting YALMIP bilevel solver.');
disp(['* Outer solver : ' outer_p.solver.tag]);
disp(['* Inner solver : ' inner_p.solver.tag]);
disp(['* Max iterations : ' num2str(p.options.bnb.maxiter)]);
disp(' Node Upper Gap(%) Lower Open');
end
gap = inf;
xsol = [];
sol.problem = 0;
iter = 0;
inner_p = detectdisjoint(inner_p);
while length(list)>0 & gap > options.bilevel.relgaptol & iter < options.bilevel.maxiter
iter = iter + 1;
[p,list,lower] = select(list);
Comment = '';
if p.lower<upper
if strcmp(p.options.solver,'bmibnb') & ~isinf(upper)
% Allow early termination in bmibnb if it is used in outer
% probllem
p.options.bmibnb.lowertarget = upper;
end
output = feval(p.solver.call,p);
if output.problem==2
Comment = 'Unbounded node';
end
if output.problem==1
% Infeasible
ninfeascuts = ninfeascuts + 1;
Comment = 'Infeasible in solver';
else
switch output.problem
case 0
Comment = 'Solved to optimality';
otherwise
Comment = yalmiperror(output.problem);
end
z = apply_recursive_evaluation(p,output.Primal);
cost = z'*p.Q*z + p.c'*z + p.f;
ActuallyFeasible = checkfeasiblefast(p,z,options.bilevel.feastol);
if ~ActuallyFeasible
% Try to solve the relaxed feasibility problem using the
% inner solver (i.e. treat as LP. If infeasible, it is for
% sure infeasible
pAux = p;pAux.c = p.c*0;pAux.Q = p.Q*0;
outputCheck = feval(inner_p.solver.call,pAux);
if outputCheck.problem == 1
% We will not continue branching, and let the user now
% that this choice
Comment = ['Infeasible'];
cost = inf;
sol.problem = 4;
else
% Hard to say anything
Comment = ['Infeasible solution returned, resolve => continue'];
sol.problem = 4;
cost = p.lower;
end
end
if cost<inf
if strcmp(p.options.solver,'bmibnb')
if output.problem == -6
sol.problem = -6;
sol.info = yalmiperror(-6);
info = [];
return
end
p.lb = max([p.lb output.extra.propagatedlb],[],2);
p.ub = min([p.ub output.extra.propagatedub],[],2);
end
% These are duals in the original inner problem
lambda = output.Primal(dual_var);
% Constraint slacks in original inner problem
slack = inner_p.F_struc*[1;output.Primal];
% Outer variables
xi = z(x_var);
% Inner variables
yi = z(y_var);
res = (slack).*lambda;
if ActuallyFeasible
res = (slack).*lambda;
else
% Generate a dummy residual, to make sure we branch on
% the first free
res = (slack).*lambda*0;
res(find(p.as_free)) = 1:length(find(p.as_free));
end
if (all(p.as_free==0) | max(abs(res(p.as_free)))<options.bilevel.compslacktol) & ActuallyFeasible
% Feasible!
if upper>cost
upper = cost;
xsol = xi;
zsol = yi;
dualsol = output.Primal(dual_var);
end
elseif cost>upper-1e-10
ndomcuts = ndomcuts + 1;
else
% No official code, just playing around
if ActuallyFeasible & options.bilevel.solvefrp
FRP = FRP0;
if 0
FRP = fixvariables(FRP0,x_var,xi,y_var);
else
FRP.F_struc = [xi -sparse(1:length(x_var),x_var,ones(length(x_var),1),length(x_var),length(x_var)+length(y_var));FRP.F_struc];
FRP.K.f = FRP.K.f + length(xi);
FRP.options.verbose = 0;
QQ = sparse(FRP0.Q);
cc = sparse(FRP0.c);
FRP.c(y_var) = FRP.c(y_var) + 2*FRP.Q(x_var,y_var)'*xi;
FRP.Q(x_var,y_var)=0;
FRP.Q(y_var,x_var)=0;
FRP.Q(x_var,x_var)=0;
end
outputFRP = feval(inner_p.solver.call,FRP);
if outputFRP.problem == 0
if 0
z = zeros(length(outer_p.c),1);
z(x_var) = xi;
z(y_var) = outputFRP.Primal;
z2 = apply_recursive_evaluation(p,z);
else
z2 = apply_recursive_evaluation(p,outputFRP.Primal);
end
costFRP = z2'*outer_p.Q*z2 + outer_p.c'*z2 + outer_p.f;
if costFRP < upper & isfeasible(outer_p,z2)
upper = costFRP;
xsol = z2(x_var);
zsol = z2(y_var);
end
end
end
[ii,jj_tmp] = max(res(p.as_free));
ind_tmp = (1:length(res))';
ind_tmp = ind_tmp(p.as_free);
jj = ind_tmp(jj_tmp);
if strcmp(p.options.solver,'bmibnb')
% Since BMIBNB solves a relaxation of relaxation, it
% can generate a lower bound which is lower than
% the lower bound before a compl. slack constraint
% was added.
p.lower = max(output.lower,lower);
else
p.lower = cost;
end
if iter<=options.bilevel.rootcuts
% Add a disjunction cut
p = disjunction(p,dual_var(jj),inner_p.F_struc(jj,:),output.Primal);
% Put in queuee, it will be pulled back immediately
list = {list{:},p};
else
p1 = p;
p2 = p;
% Add dual == 0 on p1
p1.K.f = p1.K.f + 1;
p1.F_struc = [zeros(1,size(p1.F_struc,2));p1.F_struc];
p1.F_struc(1,1+dual_var(jj))=1;
p1.lb(dual_var(jj)) = -inf;
p1.ub(dual_var(jj)) = inf;
newequality = p1.F_struc(1,:);
redundantinequality = findrows(p1.F_struc(p1.K.f+1:end,:),newequality);
if ~isempty(redundantinequality)
p1.F_struc(p1.K.f+redundantinequality,:)=[];
p1.K.l = p1.K.l-length(redundantinequality);
end
% Add slack == 0
p2.K.f = p2.K.f + 1;
newequality = inner_p.F_struc(jj,:);
p2.F_struc = [newequality;p2.F_struc];
redundantinequality = findrows(p2.F_struc(p2.K.f+1:end,:),newequality);
if ~isempty(redundantinequality)
p2.F_struc(p2.K.f+redundantinequality,:)=[];
p2.K.l = p2.K.l-length(redundantinequality);
end
p1.as_free(jj) = false;
p2.as_free(jj) = false;
if ~isempty(inner_p.disjoints)
here = find(inner_p.disjoints(:,1) == j);
if ~isempty(here)
p1.as_free(inner_p.disjoints(here,2))=false;
p2.as_free(inner_p.disjoints(here,2))=false;
else
here = find(inner_p.disjoints(:,2) == j);
if ~isempty(here)
p1.as_free(inner_p.disjoints(here,1))=false;
p2.as_free(inner_p.disjoints(here,1))=false;
end
end
end
p1.level = p.level+1;
p2.level = p.level+1;
list = {list{:},p1};
list = {list{:},p2};
end
end
end
end
else
ndomcuts = ndomcuts + 1;
end
[list,lower] = prune(list,upper);
gap = abs((upper-lower)/(1e-3+abs(upper)+abs(lower)));
if isnan(gap)
gap = inf;
end
if options.verbose
fprintf(' %4.0f : %12.3E %7.2f %12.3E %2.0f %s\n',iter,full(upper),100*full(gap),full(lower),length(list),Comment)
end
end
info.upper = upper;
info.iter = iter;
info.ninfeascuts = ninfeascuts;
info.ndomcuts = ndomcuts;
if ~isempty(xsol)
assign(recover(all_variables(x_var)),xsol);
assign(recover(all_variables(y_var)),zsol);
else
sol.problem = 1;
end
function [list,lower] = prune(list,upper)
l = [];
for i = 1:length(list)
l = [l list{i}.lower];
end
j = find(upper > l+1e-10);
list = {list{j}};
if length(list) == 0
lower = upper;
else
lower = min(l(j));
end
function [p,list,lower] = select(list)
l = [];
for i = 1:length(list)
l = [l list{i}.lower];
end
[i,j] = min(l);
p = list{j};
list = {list{1:j-1},list{j+1:end}};
lower = min(l);
function p = addzero(p,i);
p.K.f = p.K.f + 1;
p.F_struc = [zeros(1,size(p.F_struc,2));p.F_struc];
p.F_struc(1,1+i)=1;
function outer_p = pad(outer_p,all_variables)
[i,loc] = find(ismember(all_variables,outer_p.used_variables));
p = outer_p;
% Set all bounds to infinite, and then place the known bounds
p.lb = -inf(length(all_variables),1);
p.lb(loc) = outer_p.lb;
p.ub = inf(length(all_variables),1);
p.ub(loc) = outer_p.ub;
% Set all variables as linear
p.variabletype = zeros(1,length(all_variables));
p.variabletype(loc) = outer_p.variabletype;
p.c = spalloc(length(all_variables),1,0);
p.c(loc) = outer_p.c;
if ~isempty(p.F_struc)
p.F_struc = spalloc(size(p.F_struc,1),length(all_variables)+1,nnz(p.F_struc));
p.F_struc(:,1) = outer_p.F_struc(:,1);
p.F_struc(:,1+loc) = outer_p.F_struc(:,2:end);
end
% if ~isempty(p.binary_variables)
% end
p.Q = spalloc(length(all_variables),length(all_variables),nnz(outer_p.Q));
p.Q(loc,loc) = outer_p.Q;
outer_p = p;
function p = disjunction(p,variable,const,xstar)
neq = p.K.f+1;
x = sdpvar(length(p.c),1);
e = p.F_struc*[1;x];
Model1 = [x(variable)==0,-e(1:p.K.f)==0, e(1+p.K.f:end)>=0];
Model2 = [const*[1;x]==0,-e(1:p.K.f)==0, e(1+p.K.f:end)>=0];
Ab1 = getbase(sdpvar(Model1));
Ab2 = getbase(sdpvar(Model2));
b1 = -Ab1(:,1);
A1 = Ab1(:,2:end);
b2 = -Ab2(:,1);
A2 = Ab2(:,2:end);
% b1c = [0;-p.F_struc(:,1)];
% b2c = [const(1);-p.F_struc(:,1)];
% A1c = [-eyev(length(p.c),variable)';p.F_struc(:,2:end)];
% A2c = [-const(2:end);p.F_struc(:,2:end)];
%norm(b1-b1c)
%norm(b2-b2c)
%norm(A1-A1c,inf)
%norm(A2-A2c,inf)
alpha = sdpvar(length(xstar),1);
beta = sdpvar(1);
mu1 = sdpvar(length(b1),1);
mu2 = sdpvar(length(b2),1);
Objective = alpha'*xstar-beta;
Constraint = [alpha' == mu1'*A1,alpha' == mu2'*A2,beta <= mu1'*b1, beta <= mu2'*b2,mu1(neq+1:end)>0,mu2(neq+1:end)>0];
%Constraint = [alpha' == mu1'*A1,alpha' == mu2'*A2,beta == mu1'*b1, beta == mu2'*b2,mu1(neq+1:end)>0,mu2(neq+1:end)>0];
%Constraint = [Constraint,-10<alpha<10,sum(mu1(neq+1:end))-sum(mu1(1:neq))<10,sum(mu2(neq+1:end))-sum(mu2(1:neq))<10];
%Constraint = [Constraint,-1<alpha<1,mu1(1)+mu2(1) == 1];
Constraint = [Constraint,-1<alpha<1,sum(mu1)+sum(mu2)==1];
%Constraint = [Constraint,sum(mu1(neq+1:end))-sum(mu1(1:neq))<10,sum(mu2(neq+1:end))-sum(mu2(1:neq))<10];
solvesdp(Constraint,Objective,sdpsettings('verbose',0));
p.K.l = p.K.l + 1;
p.F_struc = [p.F_struc;-double(beta) double(alpha)'];
function p = disjunctionFAST(p,variable,const,xstar)
neq = p.K.f+1;
n = length(p.c);
b1 = [0;-p.F_struc(:,1)];
b2 = [const(1);-p.F_struc(:,1)];
A1 = [-eyev(length(p.c),variable)';p.F_struc(:,2:end)];
A2 = [-const(2:end);p.F_struc(:,2:end)];
alpha_ind = 1:length(xstar);
beta_ind = alpha_ind(end)+1;
mu1_ind = (1:length(b1))+beta_ind;
mu2_ind = (1:length(b2))+mu1_ind(end);
alpha = sdpvar(length(xstar),1);
beta = sdpvar(1);
mu1 = sdpvar(length(b1),1);
mu2 = sdpvar(length(b2),1);
p_hull = p;
p_hull.c = zeros(mu2_ind(end),1);
p_hull.c(alpha_ind) = xstar;
p_hull.c(beta_ind) = -1;
% equalities alpha = Ai'*mui, sum(mu)==1
p_hull.K.f = length(xstar)*2+1;
p_hull.F_struc = [zeros(length(xstar),1) eye(length(xstar)) zeros(length(xstar),1) -A1' zeros(length(xstar),length(b2))];
p_hull.F_struc = [p_hull.F_struc;
zeros(length(xstar),1) eye(length(xstar)) zeros(length(xstar),1) zeros(length(xstar),length(b2)) -A2'];
p_hull.F_struc = [p_hull.F_struc ;1 zeros(1,length(xstar)) 0 -ones(1,length(b1)+length(b2))];
% Inequalities
p_hull.F_struc = [p_hull.F_struc ;0 zeros(1,length(xstar)) -1 b1' b2'*0];
p_hull.F_struc = [p_hull.F_struc ;0 zeros(1,length(xstar)) -1 0*b1' b2'];
npmu = length(b1)-neq;
p_hull.F_struc = [p_hull.F_struc; zeros(npmu,1) zeros(npmu,length(xstar)) zeros(npmu,1) zeros(npmu,neq) eye(npmu) zeros(npmu,length(b2))];
p_hull.F_struc = [p_hull.F_struc; zeros(npmu,1) zeros(npmu,length(xstar)) zeros(npmu,1) zeros(npmu,length(b1)) zeros(npmu,neq) eye(npmu)];
p_hull.F_struc = [p_hull.F_struc; ones(length(xstar),1) -eye(length(xstar)) zeros(length(xstar),1+2*length(b1))];
p_hull.F_struc = [p_hull.F_struc; ones(length(xstar),1) eye(length(xstar)) zeros(length(xstar),1+2*length(b1))];
p_hull.K.l = 2+npmu*2+2*length(xstar);
p_hull.lb = [];
p_hull.ub = [];
output = feval(p_hull.solver.call,p_hull);
alpha = output.Primal(alpha_ind);
beta = output.Primal(beta_ind);
% Objective = alpha'*xstar-beta;
% Constraint = [alpha' == mu1'*A1,alpha' == mu2'*A2,beta <= mu1'*b1, beta <= mu2'*b2,mu1(neq+1:end)>0,mu2(neq+1:end)>0];
% Constraint = [Constraint,-1<alpha<1,sum(mu1)+sum(mu2)==1];
% Constraint = [Constraint,sum(mu1)+sum(mu2)==1];
% solvesdp(Constraint,Objective,sdpsettings('verbose',0));
p.K.l = p.K.l + 1;
p.F_struc = [p.F_struc;-double(beta) double(alpha)'];
function feas = isfeasible(p,x)
feas = checkfeasiblefast(p,x,1e-8);
function p = detectdisjoint(p);
p.disjoints = [];
% for i = 1:p.K.l
% row1 = p.F_struc(i+p.K.f,:);
% for j = 2:1:p.K.l
% row2 = p.F_struc(j+p.K.f,:);
%
% if all(abs(row1)-abs(row2)==0)
% % candidate
% if nnz(row1 == -row2 & row1~=0)==1
% p.disjoints = [p.disjoints;i j];
% end
% end
% end
% end
%
function FRP = fixvariables(FRP0,x_var,xi,y_var);
% Copy current model
FRP = FRP0;
%
FRP.c(y_var) = FRP.c(y_var) + 2*FRP.Q(x_var,y_var)'*xi;
FRP.c(x_var) = [];
FRP.Q(:,x_var) = [];
FRP.Q(x_var,:) = [];
FRP.lb(x_var) = [];
FRP.ub(x_var) = [];
B = FRP.F_struc(:,1+x_var);
FRP.F_struc(:,1+x_var)=[];
FRP.F_struc(:,1) = FRP.F_struc(:,1) + B*xi;
% FRP.F_struc = [xi -sparse(1:length(x_var),x_var,ones(length(x_var),1),length(x_var),length(x_var)+length(y_var));FRP.F_struc];
% FRP.K.f = FRP.K.f + length(xi);
% FRP.options.verbose = 0;
% QQ = FRP0.Q;
% cc = FRP0.c;
% FRP.c(y_var) = FRP.c(y_var) + 2*FRP.Q(x_var,y_var)'*xi;
% FRP.Q(x_var,y_var)=0;
% FRP.Q(y_var,x_var)=0;
% FRP.Q(x_var,x_var)=0;
function [merged_mt,merged_vt] = mergemonoms(inner_p,outer_p);
if isequal(inner_p.used_variables,outer_p.used_variables)
merged_mt = inner_p.monomtable;
merged_vt = inner_p.variabletype;
else
invar = inner_p.used_variables;
outvar = outer_p.used_variables;
all_variables = unique([invar outvar]);
[i_inner,loc_inner] = find(ismember(all_variables,inner_p.used_variables));
[i_outer,loc_outer] = find(ismember(all_variables,outer_p.used_variables));
merged_mt = spalloc(length(all_variables),length(all_variables),0);
merged_vt = zeros(1,length(all_variables));
for i = 1:length(i_inner)
[ii,jj,kk] = find(inner_p.monomtable(i,:));
merged_mt(loc_inner(i),loc_inner(jj)) = kk;
merged_vt(loc_inner(i)) = inner_p.variabletype(i);
end
for i = 1:length(i_outer)
[ii,jj,kk] = find(outer_p.monomtable(i,:));
merged_mt(loc_outer(i),loc_outer(jj)) = kk;
merged_vt(loc_outer(i)) = outer_p.variabletype(i);
end
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
robust_classify_variables_newest.m
|
.m
|
LMI-Matlab-master/yalmip/modules/robust/robust_classify_variables_newest.m
| 5,032 |
utf_8
|
71ff61f98a9e321589e7b9db9f902871
|
function [VariableType,F_x,F_w,F_xw,h] = robust_classify_variables_newest(F,h,ops,w);
Dependency = iterateDependance( yalmip('monomtable') | yalmip('getdependence') | yalmip('getdependenceUser'));
DependsOnw = find(any((Dependency(:,getvariables(w))),2));
h_variables = getvariables(h);
h_w = find(ismember(h_variables,DependsOnw));
if ~isempty(h_w)
base = getbase(h);
h0 = base(1);
base = base(2:end);base = base(:);
sdpvar t
F = [F,base(h_w(:))'*recover(h_variables(h_w)) <= t];
base(h_w) = 0;
h = base(:)'*recover(h_variables) + t;
Dependency = iterateDependance(yalmip('monomtable') | yalmip('getdependence') | yalmip('getdependenceUser'));
DependsOnw = find(any((Dependency(:,getvariables(w))),2));
end
DoesNotDependOnw = find(~any((Dependency(:,getvariables(w))),2));
[notused,x_variables] = find(Dependency(DoesNotDependOnw,:));
F_w = [];
F_x = [];
F_xw = [];
for i = 1:length(F)
F_vars = getvariables(F(i));
F_vars = find(any((Dependency(F_vars,:)),1));
if all(ismember(F_vars,DependsOnw))
F_w = F_w + F(i);
elseif all(ismember(F_vars,DoesNotDependOnw))
F_x = F_x + F(i);
else
F_xw = F_xw + F(i);
end
end
ops.removeequalities = 0;
[F_x,failure,cause] = expandmodel(F_x,h,ops);
[F_w,failure,cause] = expandmodel(F_w,[],ops);
ops.expandbilinear = 1;
ops.reusemodel = 1; % Might be case x+norm(w)<1, norm(w)<1
[F_xw,failure,cause] = expandmodel(F_xw,h,ops,w);
w_variables = depends(F_w);
x_variables = unique([depends(F_x) depends(F_xw) depends(h)]);
x_variables = setdiff(x_variables,w_variables);
% After exanding the conic represntable, we have introduced new variables
Dependency = iterateDependance(yalmip('monomtable') | yalmip('getdependence') | yalmip('getdependenceUser'));
auxiliary = unique([yalmip('extvariables') yalmip('auxvariables')]);
if 1%~isempty(auxiliary)
DependsOnw = find(any((Dependency(:,getvariables(w))),2));
DoesNotDependOnw = find(~any((Dependency(:,getvariables(w))),2));
temp = intersect(DependsOnw,x_variables);
x_variables = setdiff(x_variables,DependsOnw);
aux_with_w_dependence = temp;
else
aux_with_w_dependence = [];
end
% aux_w_or_w = union(aux_with_w_dependence,w_variables);
% old_w_variables = [];
% while ~isequal(w_variables,old_w_variables);
% old_w_variables = w_variables;
% for i = 1:length(F_xw)
% if all(ismember(depends(F_xw(i)),aux_w_or_w))
% if ~any(ismember(depends(F_xw(i)),x_variables))
% new_w = intersect(depends(F_xw(i)),aux_w_or_w);
% w_variables = union(w_variables,new_w);
% aux_with_w_dependence = setdiff(aux_with_w_dependence,new_w);
% goon = 1;
% end
% end
% end
% aux_w_or_w = union(aux_with_w_dependence,w_variables);
% end
x = recover(x_variables);
w = recover(w_variables);
if ~isempty(F_xw)
F_xw_scalar = F_xw(find(is(F_xw,'elementwise') | is(F_xw,'equality')));
F_xw_multi = F_xw - F_xw_scalar;
else
F_xw_scalar = [];
F_xw_multi = F_xw;
end
[MonomTable,Nonlinear] = yalmip('monomtable');
Dependency = yalmip('getdependenceUser');
evar = yalmip('extvariables');
if length(F_xw_scalar)>0
% Optimize dependency graph
X = sdpvar(F_xw_scalar);
Xvar = getvariables(X);
Xbase = getbase(X);Xbase = Xbase(:,2:end);
for i = 1:size(Xbase,1)
used = Xvar(find(Xbase(i,:)));
if any(Nonlinear(used))
used = find(any(MonomTable(used,:),1));
end
auxUsed = intersect(used,aux_with_w_dependence);
if ~isempty(auxUsed)
wUsed = intersect(used,w_variables);
if ~isempty(wUsed)
Dependency(auxUsed,wUsed) = 1;
end
eUsed = intersect(used,evar);
if ~isempty(eUsed)
Dependency(eUsed,auxUsed) = 1;
end
end
end
end
if length(F_xw_multi) > 0
for i = 1:length(F_xw_multi)
used = getvariables(F_xw_multi(i));
used = find(any(MonomTable(used,:),1));
auxUsed = intersect((used),aux_with_w_dependence);
wUsed = intersect((used),w_variables);
if ~isempty(auxUsed) & ~isempty(wUsed)
Dependency(auxUsed,wUsed) = 1;
end
eUsed = intersect((used),evar);
if ~isempty(auxUsed) & ~isempty(eUsed)
Dependency(eUsed,auxUsed) = 1;
end
end
end
UserDependency = yalmip('getdependenceUser');
fixed = find(any(UserDependency,2));
Dependency(fixed,:) = UserDependency(fixed,:);
Dependency = iterateDependance(Dependency);
VariableType.Graph = Dependency;
VariableType.x_variables = x_variables;
VariableType.w_variables = w_variables;
VariableType.aux_with_w_dependence = aux_with_w_dependence;
function Graph = iterateDependance(Graph)
Graph = Graph + speye(length(Graph));
Graph0 = double(Graph*Graph ~=0);
while ~isequal(Graph,Graph0)
Graph = Graph0;
Graph0 = double(Graph*Graph~=0);
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
filter_polya.m
|
.m
|
LMI-Matlab-master/yalmip/modules/robust/filter_polya.m
| 5,003 |
utf_8
|
55fbd1c40e70d698b82c42d8aee3287f
|
function [F_xw,F_polya] = filter_polya(F_xw,w,N)
F_polya = [];
Fvars = getvariables(F_xw);
wvars = getvariables(w);
[mt,vt] = yalmip('monomtable');
if ~(N==ceil(N)) & (N>=0)
error('The power in robust.polya must be a non-negative integer');
end
F_new = [];
if any(sum(mt(Fvars,wvars),2)>1)
removeF = zeros(length(F_xw),1);
for i = 1:length(F_xw)
Fi = sdpvar(F_xw(i));
if length(Fi)>1 & is(Fi,'symmetric')
% FIXME: SUUUUUPER SLOW
P = polyapolynomial(sdpvar(Fi),w,N);
C = [];
V = [];
for ii=1:length(P)
t2 = [];
for jj=1:length(P)
if isa(P(ii,jj),'double')
cc = cc*0;
else
[cc,vv] = (coefficients(P(ii,jj),w));
end
try
C = [C cc];
V = [V vv];
catch
error('Polya filter not yet implemented for all SDP cone cases. Please report bug')
end
end
end
if ~isa(diff(V'),'double')
error('Polya filter not yet implemented for all SDP cone cases. Please report bug')
end
for k = 1:size(C,1)
F_new = F_new + (reshape(C(k,:),size(P,1),size(P,1)) >= 0);
end
removeF(i) = 1;
else
Fi = Fi(:);
removeFi = zeros(length(Fi),1);
if ~isempty(intersect(depends(Fi),wvars))
for k = 1:length(Fi)
% disp('This was changed from >1')
if any(sum(mt(getvariables(Fi(k)),wvars),2)>=1)
p_polya = polyapolynomial(Fi(k),w,N);
ci = coefficients(p_polya,w);
if isa(ci,'sdpvar')
F_polya = F_polya + (ci >= 0);
else
if any(ci)<0
error('Trivially infeasible. there are unparameterized negative coefficients in Polya relaxation')
end
% disp('Whoops, take care of this silly case in filter_polya...')
end
%F_polya = F_polya + (coefficients(p_polya,w) > 0);
% this element has been taken care of
removeFi(k) = 1;
else
% 1
end
end
end
if all(removeFi)
% all elements removed, so we can remove the whole
% constraint
removeF(i) = 1;
else
% Keep some of the elements
F_xw(i) = (Fi(find(~removeFi)) >= 0);
end
end
end
F_xw(find(removeF)) = [];
end
F_polya = F_polya + F_new;
function pi_monoms = homogenize_(pi_monoms,precalc)%w,Nmax,Nj)
%pi_monoms = pi_monoms*sum(w)^(Nmax - Nj);
pi_monoms = pi_monoms*precalc;
function P = polyapolynomial(p,w,N)
for i = 1:size(p,1)
for j = 1:size(p,2)
[pi_coeffs{i,j},pi_monoms{i,j}] = coefficients(p(i,j),w);
end
end
Nmax = -inf;
mt = yalmip('monomtable');
for i = 1:size(p,1)
for j = 1:size(p,2)
for k = 1:length(pi_monoms{i,j})
% deg_pi_monom{i,j}(k) = degree(pi_monoms{i,j}(k));
if isa(pi_monoms{i,j}(k),'double')
deg_pi_monom{i,j}(k) = 0;
else
deg_pi_monom{i,j}(k) = sum(mt(getvariables(pi_monoms{i,j}(k)),:));
end
Nmax = max(Nmax,deg_pi_monom{i,j}(k));
end
end
end
for i = 1:size(p,1)
for j = 1:size(p,2)
if isa(pi_monoms{i,j},'sdpvar')
preCalc = cell(Nmax+1,1);
InvolvedDegrees = unique(deg_pi_monom{i,j});
for degrees = InvolvedDegrees(:)'
k = find(deg_pi_monom{i,j} == degrees);
%for k = 1:length(pi_monoms{i,j})
% Nj = 1+Nmax-deg_pi_monom{i,j}(k);
Nj = 1+Nmax-degrees;
if isempty(preCalc{Nj})
preCalc{Nj} = sum(w)^(Nj-1);
% pi_monoms{i,j}(k) = homogenize_(pi_monoms{i,j}(k),w,Nmax,deg_pi_monom{i,j}(k));
pi_monoms{i,j}(k) = homogenize_(pi_monoms{i,j}(k),preCalc{Nj});
else
pi_monoms{i,j}(k) = homogenize_(pi_monoms{i,j}(k),preCalc{Nj});
end
end
end
end
end
P = [];
sumNmax = sum(w)^(N + Nmax);
sumN = sum(w)^(N);
for i = 1:size(p,1)
temp = [];
for j = 1:size(p,2)
if isa(pi_monoms{i,j},'sdpvar')
pij = (pi_coeffs{i,j}'*pi_monoms{i,j})*sumN;
else
pij = p(i,j)*sumNmax;
end
temp = [temp pij];
end
P = [P;temp];
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
dualtososrobustness.m
|
.m
|
LMI-Matlab-master/yalmip/modules/robust/dualtososrobustness.m
| 3,252 |
utf_8
|
499ac2bd7b66be04e843f1a3edb0611c
|
function SOSModel = dualtososrobustness(UncertainConstraint,UncertaintySet,UncertainVariables,DecisionVariables,p_tau_degree,localizer_tau_degree,Z_degree)
[E,F] = getEFfromSET(UncertaintySet);
[F0,Fz,Fx,Fxz] = getFzxfromSET(UncertainConstraint,UncertainVariables,DecisionVariables);
if is(UncertainConstraint,'sdp')
n = length(F0);
v = sdpvar(n,1);
d = v'*F0*v;
b = [];for i = 1:length(Fx);b = [b;v'*Fx{i}*v];end
c = [];for i = 1:length(Fz);c = [c;v'*Fz{i}*v];end
if ~isempty(Fxz)
A = [];
for i = 1:size(Fxz,1);
a = [];
for j = 1:size(Fxz,2);
a = [a v'*Fxz{i,j}*v];
end
A = [A;a];
end
else
A = zeros(length(Fx),length(Fz));
end
elseif is(UncertainConstraint,'socp')
n = length(F0)-1;
v = sdpvar(n,1);
d = [1 v']*F0;
b = [];for i = 1:length(Fx);b = [b;[1 v']*Fx{i}];end
c = [];for i = 1:length(Fz);c = [c;[1 v']*Fz{i}];end
A = zeros(length(Fx),length(Fz));
end
[Z,coeffs] = createDualParameterization(UncertaintySet,v,Z_degree);
coeffs = [DecisionVariables;coeffs(:)]
Zblock = blkdiag(Z{:});
D = [];
for i = 1:length(F)
D = [D, coefficients(trace(Zblock'*F{i})-(A(:,i)'*DecisionVariables + c(i)),v)==0];
end
[trivialFixed,thevalue] = fixedvariables(D);
while ~isempty(trivialFixed) && length(D)>0
D = replace(D,trivialFixed,thevalue);
% if ~isempty(D)
% D =
% D = sdpvar(replace(D,trivialFixed,thevalue))==0;
for i = 1:length(Z)
Z{i} = replace(Z{i},trivialFixed,thevalue);
end
Zblock = replace(Zblock, trivialFixed,thevalue);
if length(D)>0
[trivialFixed,thevalue] = fixedvariables(D);
end
end
% At this point Z is a function of v where v was used to scalarize the
% uncertain constraint. Now we must ensure Z{i}(v) in cone
gv = (1-v'*v);
for i = 1:length(Z)
if is(UncertaintySet(i),'sdp')
% We use the matrix sos approach
[tau,coefftau] = polynomial(v,localizer_tau_degree);
coeffs = [coeffs;coefftau];
D=[D,sos(Z{i}-eye(length(Z{i}))*tau*gv)];
elseif is(UncertaintySet(i),'socp')
% To get a SOS condition on dual Z{i}(v) in socp, we have to
% introduce a new variable to scalarize the socp
u = sdpvar(length(Z{i})-1,1);
[tau,coefftau] = polynomial(v,localizer_tau_degree);
coeffs = [coeffs;coefftau];
D = [D, sos([1 u']*Z{i}-tau*(1-u'*u))];
elseif is(UncertaintySet(i),'elementwise')
[tau,coefftau] = polynomial(v,localizer_tau_degree);
coeffs = [coeffs;coefftau];
D = [D, sos(Z{i}-tau*gv)];
elseif is(UncertaintySet(i),'equality')
% No constraints on dual
end
end
[tau,coefftau] = polynomial(v,p_tau_degree);
p = -(trace(Zblock'*E) + b'*DecisionVariables + d) - tau*gv;
coeffs = [coeffs;coefftau];
SOSModel = compilesos([D, sos(p)],[],sdpsettings('sos.model',2,'sos.scale',0),coeffs);
function [z,val] = fixedvariables(D)
Base = getbase(sdpvar(D));
A = -Base(:,2:end);
b = Base(:,1);
v = getvariables(D);
z = [];
val = [];
for i = 1:size(A,1)
j = find(A(i,:));
if length(j)==1
z = [z v(j)];
val = [val b(i)/A(i,j)];
end
end
z = recover(z);
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
filter_duality.m
|
.m
|
LMI-Matlab-master/yalmip/modules/robust/filter_duality.m
| 8,603 |
utf_8
|
91b1d25b80045a73fe70a694f9784e1a
|
function [F,feasible] = filter_duality(F_xw,Zmodel,x,w,ops)
% Creates robustified version of the uncertain set of linear inequalities
% s.t A(w)*x <= b(w) for all F(w) >= 0 where F(w) is a conic set, here
% given in YALMIP numerical format.
%
% Based on Robust Optimization - Methodology and Applications. A. Ben-Tal
% and A. Nemerovskii. Mathematical Programming (Series B), 92:453-480, 2002
% Note, there are some sign errors in the paper.
%
% The method introduces a large amount of new variables, equalities and
% inequalities. By turning on robust.reducedual, the equalities are
% eliminated, thus reducing the number of variables slightly
feasible = 1;
if length(F_xw) == 0
F = [];
return
end
X = sdpvar(F_xw);
b = [];
A = [];
% Some pre-calc
xw = [x;w];
xind = find(ismembcYALMIP(getvariables(xw),getvariables(x)));
wind = find(ismembcYALMIP(getvariables(xw),getvariables(w)));
[Qs,cs,fs,dummy,nonquadratic] = vecquaddecomp(X,xw);
c_wTbase = [];
AAA = [];
ccc = [];
for i = 1:length(X)
Q = Qs{i};
c = cs{i};
f = fs{i};
if nonquadratic
error('Constraints can be at most quadratic, with the linear term uncertain');
end
Q_ww = Q(wind,wind);
Q_xw = Q(xind,wind);
Q_xx = Q(xind,xind);
c_x = c(xind);
c_w = c(wind);
%b = [b;f + c_w'*w];
%A = [A;-c_x'-w'*2*Q_xw'];
% A = [A -c_x-2*Q_xw*w];
AAA = [AAA;sparse(-2*Q_xw)];
ccc = [ccc;-sparse(c_x)];
b = [b;f];
c_wTbase = [c_wTbase;c_w'];
end
b = b + c_wTbase*w;
% Ac = A';
A = reshape(ccc + AAA*w,size(c_x,1),[]);
if isa(A,'double')
A = sparse(A);
end
A = A';
% Try to find variables that only have simple bound constraints. These
% variables can explicitly be optimized and thus speed up the construction,
% and allow a model with fewer variables.
[Zmodel2,lower,upper] = find_simple_variable_bounds(Zmodel);
% Partition the uncertain variables
simple_w = find( ~isinf(lower) & ~isinf(upper));
general_w = find( isinf(lower) | isinf(upper));
simple_w = recover(simple_w);
general_w = recover(general_w);
% Linear uncertain constraint is (Bbetai*x + cdi) >= 0 for all w, or
% (bi' + (Bi*w)')*x + (ci'*w + di).
cd = b;
Bbeta = -A;
F = ([]);
top = 1;
% To speed up the construction, compute the ci vectors for all constraints
% in one call ci_basis = [c1 c2 ...]
ci_basis = basis(cd',w);
if ops.verbose
disp(' - Using duality to eliminate uncertainty');
end
nv = yalmip('nvars');
simple_rows = [];
for i = 1:length(b)
Bbetai = Bbeta(i,:);
if (nnz(ci_basis(:,i))==0) & isa(Bbetai,'double')
% This constraint row does not depend on uncertainty
%row = Bbetai*x + cdi;
row = X(i);
if isa(row,'sdpvar')
% F = F + (row >= 0);
simple_rows = [simple_rows;i];
else
if row<0
feasible = 0;
return
end
end
else
cdi = cd(i);
if isempty(general_w)
ci = ci_basis(:,i);
di = basis(cdi,0);
if isa(Bbetai,'double')
Bi = zeros(1,length(w));
else
Bi = basis(Bbetai,w)';
end
bi = basis(Bbeta(i,:),0)';
% Scale to -1,1 uncertainty
T = diag((upper-lower))/2;
e = (upper+lower)/2;
if nnz(Bi) == 0
if nnz(bi)==0
% Basically constant + w > 0
if (di+e'*ci) - norm(T*ci,1) < 0
error('Problem is trivially infeasible');
feasible = 0;
return
end
else
F = F + (bi'*x + (di+e'*ci) - norm(T*ci,1) >= 0);
end
else
non_zeroBirow = find(sum(abs(Bi'),2));
zeroBirow = find(sum(abs(Bi'),2) == 0);
if length(non_zeroBirow)>1
t = sdpvar(length(non_zeroBirow),1);
F = F + ((bi'+e'*Bi')*x + (di+e'*ci) - sum(t) >= 0) + (-t <= T(non_zeroBirow,:)*(ci+Bi'*x) <= t);
else
F = F + ((bi'+e'*Bi')*x + (di+e'*ci) - T(non_zeroBirow,:)*(ci+Bi'*x) >= 0) ;
F = F + ((bi'+e'*Bi')*x + (di+e'*ci) + T(non_zeroBirow,:)*(ci+Bi'*x) >= 0) ;
end
end
else
lhs1 = 0;
lhs2 = 0;
top = 1;
Flocal = [];
if Zmodel.K.f > 0
zeta = sdpvar(Zmodel.K.f,1);
lhs1 = lhs1 + Zmodel.F_struc(top:top + Zmodel.K.f-1,2:end)'*zeta;
lhs2 = lhs2 - Zmodel.F_struc(top:top + Zmodel.K.f-1,1)'*zeta;
top = top + Zmodel.K.f;
end
if Zmodel.K.l > 0
zeta = sdpvar(Zmodel.K.l,1);
Flocal = Flocal + (zeta >= 0);
lhs1 = lhs1 + Zmodel.F_struc(top:top + Zmodel.K.l-1,2:end)'*zeta;
lhs2 = lhs2 - Zmodel.F_struc(top:top + Zmodel.K.l-1,1)'*zeta;
top = top + Zmodel.K.l;
end
if Zmodel.K.q(1) > 0
for j = 1:length(Zmodel.K.q)
zeta = sdpvar(Zmodel.K.q(j),1);
if length(zeta)>2
Flocal = Flocal + (cone(zeta));
else
Flocal = Flocal + (zeta(2) <= zeta(1)) + (-zeta(2) <= zeta(1));
end
lhs1 = lhs1 + Zmodel.F_struc(top:top + Zmodel.K.q(j)-1,2:end)'*zeta(:);
lhs2 = lhs2 - Zmodel.F_struc(top:top + Zmodel.K.q(j)-1,1)'*zeta(:);
top = top + Zmodel.K.q(j);
end
end
if Zmodel.K.s(1) > 0
for j = 1:length(Zmodel.K.s)
zeta = sdpvar(Zmodel.K.s(j));
Flocal = Flocal + (zeta >= 0);
lhs1 = lhs1 + Zmodel.F_struc(top:top + Zmodel.K.s(j)^2-1,2:end)'*zeta(:);
lhs2 = lhs2 - Zmodel.F_struc(top:top + Zmodel.K.s(j)^2-1,1)'*zeta(:);
top = top + Zmodel.K.s(j)^2;
end
end
% if isempty(simple_w)
ci = basis(cd(i),w);
di = basis(cd(i),0);
Bi = basis(Bbeta(i,:),w)';
bi = basis(Bbeta(i,:),0)';
if ops.robust.reducedual
Ablhs = getbase(lhs1);
blhs = Ablhs(:,1);
Alhs = Ablhs(:,2:end);
% b+A*zeta == Bi'*x + ci
% A*zeta == -b + Bi'*x + ci
% zets ==...
Anull = null(full(Alhs));
zeta2 = (Alhs\(-blhs + Bi'*x + ci))+Anull*sdpvar(size(Anull,2),1);
lhs2 = replace(lhs2,recover(depends(lhs1)),zeta2);
Flocal = replace(Flocal,recover(depends(lhs1)),zeta2);
else
Flocal = [Flocal,lhs1 == Bi'*x + ci];
% F = F + (lhs1 == Bi'*x + ci);
end
if isa(Bi,'double') & ops.robust.reducesemiexplicit
ops2=ops;ops2.verbose = 0;
sol = solvesdp([Flocal],-lhs2,ops);
if sol.problem == 0
F = F + (double(lhs2) >= - (bi'*x + di));
else
F = F + Flocal;
F = F + (lhs2 >= - (bi'*x + di));
end
else
F = F + Flocal;
F = F + (lhs2 >= - (bi'*x + di));
end
end
end
end
if ~isempty(simple_rows)
F = [F, X(simple_rows)>=0];
end
function b = basis(p,w)
if isequal(w,0)
b = getbasematrix(p,0);
else
n = length(w);
if isequal(getbase(w),[spalloc(n,1,0) speye(n)])
if 0
b = [];
lmi_variables = getvariables(w);
for i = 1:length(w)
b = [b ; getbasematrix(p,lmi_variables(i))];
end
else
lmi_variables = getvariables(w);
b = spalloc(n,length(p),0);
[~,loc] = ismember(getvariables(w),getvariables(p));
p_basis = getbase(p);p_basis = p_basis(:,2:end);
used = find(loc);
for i = 1:length(used)
b(used(i),:) = p_basis(:,loc(used(i)))';
end
end
% if norm(b-b2)>1e-10
% error('sdfsdfsd')
% end
else
b = [];
for i = 1:length(w)
b = [b ; getbasematrix(p,getvariables(w(i)))];
end
end
end
b = full(b);
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
filter_enumeration.m
|
.m
|
LMI-Matlab-master/yalmip/modules/robust/filter_enumeration.m
| 7,383 |
utf_8
|
fd994fa46ec75e11eb5b4480d7d81510
|
function [F,mptmissing] = filter_enumeration(F_xw,Zmodel,x,w,ops,uncertaintyTypes,separatedZmodel,VariableType)
mptmissing = 0;
if length(F_xw) == 0
F = [];
return;
else
if any(Zmodel.K.q) | any(Zmodel.K.s)
error('Only polytope uncertainty supported in duality based robustification');
else
if isempty(intersect(depends(F_xw),getvariables(w)))
F = F_xw;
elseif length(uncertaintyTypes)==1 & isequal(uncertaintyTypes{1},'inf-norm')
if any(isinf((separatedZmodel{1}.lb))) | any(isinf(separatedZmodel{1}.ub))
error('You have unbounded uncertain variables')
else
n = length(separatedZmodel{1}.lb);
vertices = [];
lb = separatedZmodel{1}.lb(:)';
ub = separatedZmodel{1}.ub(:)';
E = dec2bin(0:2^n-1,n)';
E = double(E(:))-48;
E = reshape(E,n,2^n);
vertices = (repmat(lb(:),1,2^n) + E.*(repmat(ub(:),1,2^n)-repmat(lb(:),1,2^n)))';
%for i = 0:2^n-1
% vertices = [vertices;lb+dec2decbin(i,n).*(ub-lb)];
%end
if ops.verbose
disp([' - Enumerated ' num2str(2^n) ' vertices'])
end
vertices = unique(vertices,'rows');
if ops.verbose & 2^n > size(vertices,1)
disp([' - Reduced to ' num2str( size(vertices,1)) ' unique vertices'])
end
F = replaceVertices(F_xw,w,vertices',VariableType,ops);
end
elseif length(uncertaintyTypes)==1 & isequal(uncertaintyTypes{1},'simplex')
k = abs(Zmodel.F_struc(1,1));
n = length(w);
vertices = zeros(n,1);
for i = 1:n
v = zeros(n,1);
v(i) = k;
vertices = [vertices v];
end
if ops.verbose
disp([' - Enumerated ' num2str(n) ' vertices'])
end
vertices = pruneequalities(vertices,Zmodel);
F = replaceVertices(F_xw,w,vertices,VariableType,ops);
else
% FIX : Assumes all uncertainty in all constraints
K = Zmodel.K;
A = -Zmodel.F_struc((1+K.f):(K.f + K.l),2:end);
b = Zmodel.F_struc((1+K.f):(K.f + K.l),1);
try
% Some preprocessing to extract bounds from equality
% constraints in order to make the uncertainty polytope
% bounded (required since we are going to run vertex
% enumeration)
% We might have x>=0, sum(x)=1, and this code simply extracts
% the implied bounds x<=1
[lo,up] = findulb(Zmodel.F_struc(1:K.f + K.l,:),K);
Zmodel.lb = lo;Zmodel.ub = up;
Zmodel = propagate_bounds_from_equalities(Zmodel);
up = Zmodel.ub;
lo = Zmodel.lb;
upfi = find(~isinf(up));
lofi = find(~isinf(lo));
aux = Zmodel;
aux.F_struc = [aux.F_struc;-lo(lofi) sparse(1:length(lofi),lofi,1,length(lofi),size(A,2))];
aux.F_struc = [aux.F_struc;up(upfi) -sparse(1:length(upfi),upfi,1,length(upfi),size(A,2))] ;
aux.K.l = aux.K.l + length(lofi) + length(upfi);
K = aux.K;
A = -aux.F_struc((1+K.f):(K.f + K.l),2:end);
b = aux.F_struc((1+K.f):(K.f + K.l),1);
P = polytope(full(A),full(b));
try
vertices = extreme(P)';
catch
error('The uncertainty space is unbounded (could be an artefact of YALMIPs modelling of nonolinear oeprators).')
end
%if ~isbounded(P)
% error('The uncertainty space is unbounded (could be an artefact of YALMIPs modelling of nonolinear oeprators).')
%else
% vertices = extreme(polytope(A,b))';
%end
catch
mptmissing = 1;
if ops.verbose>0
%lasterr
disp(' - Enumeration of uncertainty polytope failed. Missing Multiparametric Toolbox?')
disp(' - Switching to duality based approach')
%disp('You probably need to install MPT (needed for vertex enumeration)')
%disp('http://control.ee.ethz.ch/~joloef/wiki/pmwiki.php?n=Solvers.MPT')
%disp('Alternatively, you need to add bounds on your uncertainty.')
%disp('Trying to switch to dualization approach')
%error('MPT missing');
end
F = [];
return
end
% The vertex enumeration was done without any equality constraints.
% We know check all vertices so see if they satisfy equalities.
vertices = pruneequalities(vertices,Zmodel);
if ops.verbose
disp([' - Enumerated ' num2str(size(vertices,2)) ' vertices'])
end
F = replaceVertices(F_xw,w,vertices,VariableType,ops);
end
end
end
function F = replaceVertices(F_xw,w,vertices,VariableType,ops)
% Doing LP constraints in a vectorized manner saves a lot of time
F_xw_lp = F_xw(find(is(F_xw,'elementwise')));
F_xw_socp_sdp = F_xw - F_xw_lp;
F = ([]);
x_Flp = depends(F_xw_lp);
uncAux = yalmip('auxvariablesW');
uncAux = recover(intersect(x_Flp,VariableType.aux_with_w_dependence));
if isequal(ops.robust.auxreduce,'none')
uncAux = [];
end
w = flush(w);
if length(F_xw_lp)>0
rLP = [];
if ~isempty(uncAux)
z = sdpvar(repmat(length(uncAux),1,size(vertices,2)),repmat(1,1,size(vertices,2)),'full');
end
for i = 1:size(vertices,2)
temp = replace(sdpvar(F_xw_lp),w,vertices(:,i),0);
if ~isempty(uncAux)
temp = replace(temp,uncAux,z{i});
end
rLP = [rLP;temp];
end
% FIXME: More general detection of silly constraints
if isa(rLP,'double') & all(rLP>=-eps^0.75)
F = ([]);
else
% Easily generates redundant constraints
[aux,index] = uniquesafe(getbase(rLP),'rows');
try
F = (rLP(index(randperm(length(index)))) >= 0);
catch
1
end
end
end
% Remaining conic stuff
for j = 1:length(F_xw_socp_sdp)
for i = 1:size(vertices,2)
temp = replace(F_xw_socp_sdp(j),w,vertices(:,i),0);
if ~isempty(uncAux)
temp = replace(temp,uncAux,z{i});
end
F = F + lmi(temp);
end
end
function vertices = pruneequalities(vertices,Zmodel)
K = Zmodel.K;
% The vertex enumeration was done without any equality constraints.
% We know check all vertices so see if they satisfy equalities.
if K.f > 0
Aeq = -Zmodel.F_struc(1:K.f,2:end);
beq = Zmodel.F_struc(1:K.f,1);
feasible = sum(abs(Aeq*vertices - repmat(beq,1,size(vertices,2))),1) < 1e-6;
vertices = vertices(:,feasible);
if isempty(feasible)
error('The uncertainty space is infeasible.')
end
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
decomposeUncertain.m
|
.m
|
LMI-Matlab-master/yalmip/modules/robust/decomposeUncertain.m
| 25,501 |
utf_8
|
de52048b978e055a2e273ab28fa80d85
|
function [UncertainModel,Uncertainty,VariableType,ops,failure] = decomposeUncertain(F,h,w,ops)
failure = 0;
% Do we have any uncertainty declarations variables?
[F,w] = extractUncertain(F,w);
if isempty(w)
error('There is no uncertainty in the model.');
end
% Partition the model into
% F_x : Constraints in decision variables only
% F_w : The uncertainty description
% F_xw : The uncertain constraints
% Note that this analysis might also declare som of the auxiliary variables
% as simple uncertain variables. It might also create a new objective
% function in order to have all uncertainty in the constraints
F_original = F;
[VariableType,F_x,F_w,F_xw,h] = robust_classify_variables_newest(F,h,ops,w);
if length(F_w)==0
error('There is no uncertainty description in the model.');
end
if ops.verbose
dispsilent(ops.verbose,'***** Starting YALMIP robustification module. *********************');
if length(w)<length(VariableType.w_variables)
dispsilent(ops.verbose,[' - Detected ' num2str(length(VariableType.w_variables)) ' uncertain variables (' num2str(length(VariableType.w_variables)-length(w)) ' artificial)']);
else
dispsilent(ops.verbose,[' - Detected ' num2str(length(w)) ' uncertain variables']);
end
end
% Integer variables are OK in x, but not in the uncertainty
integervars = [yalmip('binvariables') yalmip('intvariables')];
ind = find(is(F_original,'integer') | is(F_original,'binary'));
if ~isempty(ind)
integervars = [integervars getvariables(F(ind))];
if any(ismember(VariableType.w_variables,integervars))
failure = 1;
return
end
end
% Convert quadratic constraints in uncertainty model to SOCPs. This will
% enable us to use duality based removal of uncertainties in linear
% inequalities. We keep information about quadratic expression though, in
% order to use them if possible in 2-norm explicit maximization
F_w = convertquadratics(F_w);
% Export uncertainty model to numerical format
ops.solver = '';
ops.removeequalities = 0;
[aux1,aux2,aux3,Zmodel] = export(F_w-F_w(find(is(F_w,'uncertain'))),[],ops,[],[],1);
if ~isempty(Zmodel)
if length(Zmodel.c) ~= length(VariableType.w_variables)
error('Some uncertain variables are unconstrained.')
end
else
error('Failed when exporting a model of the uncertainty.')
end
% The uncertainty model is in the full w-space. However, it might
% happen that the uncertainty is separable. Find groups of uncertain
% variables that can/should be treated separately.
uncertaintyGroups = findSeparate(Zmodel);
if ops.verbose
dispsilent(ops.verbose,[' - Detected ' num2str(length(uncertaintyGroups)) ' independent group(s) of uncertain variables']);
end
% Trying to take care of cases such as norm([x+w;x-w]), i.e. epigraphs with
% uncertainty.
%[F_xw,ops] = prepareforAuxiliaryRemoval(VariableType,F_xw,F_w,ops);
x = recover(VariableType.x_variables);
w = recover(VariableType.w_variables);
ops.robust.forced_enumeration = 0;
switch ops.robust.auxreduce
case {'none','affine','','projection','enumeration'}
otherwise
disp(' ');
dispsilent(ops.verbose,['The flag ''auxreduce'' is wrong. Turning off removal of auxilliary variables']);
disp(' ');
ops.robust.auxreduce = 'none';
end
if ~isempty(VariableType.aux_with_w_dependence)
if ~strcmp('none',ops.robust.auxreduce)
dispsilent(ops.verbose,[' - Detected ' num2str(length(VariableType.aux_with_w_dependence)) ' uncertainty dependent auxilliary variables']);
end
if strcmp('none',ops.robust.auxreduce)
dispsilent(ops.verbose,[' - Using possibly conservative approach to deal with uncertainty dependent auxilliary variables.']);
dispsilent(ops.verbose,[' - (change robust.auxreduce to ''projection'', ''enumeration'' for exact solution.)'])
VariableType.x_variables = unique([VariableType.aux_with_w_dependence(:)' VariableType.x_variables(:)']);
elseif strcmp('affine',ops.robust.auxreduce)
% Add linear feedback on all uncertainty dependent auxilliary
% variables. The new model is dealt with as usual
dispsilent(ops.verbose,[' - Adding affine feedback on auxilliary variables.'])
[F_xw,xnew,info] = adjustable(F_xw,w,unique(VariableType.aux_with_w_dependence),uncertaintyGroups,ops,VariableType);
dispsilent(ops.verbose,[' - Feedback structure had sparsity ' num2str(info.sparsity) ' and required ' num2str(info.nvars) ' variable(s)'])
%xnew = [];
VariableType.x_variables = unique([VariableType.aux_with_w_dependence(:)' VariableType.x_variables(:)' getvariables(xnew)]);
elseif strcmp('enumeration',ops.robust.auxreduce)
% Uncertainty model is polytopic, hence enumeration can be used.
if isequal(Zmodel.K.s,0) & isequal(Zmodel.K.q,0)
dispsilent(ops.verbose,[' - Using enumeration to deal with uncertainty dependent auxilliary variables']);
ops.robust.lplp = 'enumeration';
else
disp([' - Cannot robustify model exactly with uncertainty dependent auxilliary variables and conic uncertainty']);
disp([' - Change the option robust.auxreduce to ''projection'', ''affine'' or ''none''.'])
error('robustification failed');
end
% User wants to do enumeration based removal of auxilliary
% variables, hence we cannot switch later
ops.robust.forced_enumeration = 1;
elseif ~(any(is(F_xw,'sdp')) | any(is(F_xw,'socp'))) | ~isempty(strfind('projection',ops.robust.auxreduce))
if isequal(ops.robust.auxreduce,'projection')
dispsilent(ops.verbose,[' - Projecting out auxilliary variables. This can take time...']);
end
F_xw = projectOut(F_xw,w,unique(VariableType.aux_with_w_dependence),uncertaintyGroups,ops);
else
dispsilent(ops.verbose,[' - Cannot robustify model exactly with uncertainty dependent auxilliary variables and conic uncertainty']);
dispsilent(ops.verbose,[' - Change the option robust.auxreduce to ''affine'' or ''none'' to compute conservative solution'])
error('robustification failed');
end
end
% Separate the uncertainty models accroding to uncertainty groups
separatedZmodel = separateUncertainty(Zmodel,uncertaintyGroups);
% Conversion of bounded variables that have been modelled using
% the norm operator (remove the epigraph variable to ensure explicit
% maximization is used). This will be generalized in the next version
[separatedZmodel, uncertaintyGroups] = convertUncertaintyGroups(separatedZmodel,uncertaintyGroups,VariableType);
% Code will be added to detect uncertainty cases in a more general and
% modular way. Additional code will also be added to find hidden simple
% structures, such as norm(w,1)<1, which currently is treated as a general
% polytopic uncertainty, since the expansion hides the simplicity
% 'Bounds', 'Simplex', 'Conic', 'Polytopic', '2-norm', '1-norm', 'inf-norm'
[uncertaintyTypes,separatedZmodel,uncertaintyGroups] = classifyUncertainty(separatedZmodel,uncertaintyGroups,w);
% Misplaced constraints. Really isn't uncertain, but when we expanded an
% uncertain operator, a new auxilliary variable was introduced, but has not
% made dependent on w. Hewnce, it should really be moved. Taken care
% outside now though
% if length(F_xw)>0
% move = zeros(length(F_xw),1);
% for i = 1:length(F_xw)
% if all(ismember(getvariables(F_xw(i)),VariableType.x_variables))
% move(i) = 1;
% end
% end
% if any(move)
% F_x = [F_x, F_xw(find(move))];
% F_xw(find(move))=[];
% end
% end
UncertainModel.F_x = F_x;
UncertainModel.F_xw = F_xw;
UncertainModel.h = h;
Uncertainty.F_w = F_w;
Uncertainty.Zmodel = Zmodel;
Uncertainty.separatedZmodel = separatedZmodel;
Uncertainty.uncertaintyTypes = uncertaintyTypes;
Uncertainty.separatedZmodel = separatedZmodel;
Uncertainty.uncertaintyGroups = uncertaintyGroups;
VariableType.x = recover(VariableType.x_variables);
VariableType.w = recover(VariableType.w_variables);
failure = 0;
function dispsilent(notsilent,text)
if notsilent
disp(text);
end
function [F,w] = extractUncertain(F,w);
if isempty(w)
unc_declarations = is(F,'uncertain');
if any(unc_declarations)
w = recover(getvariables(sdpvar(F(find(unc_declarations)))));
F = F(find(~unc_declarations));
else
error('There is no uncertainty definition in the model.')
end
end
function groups = findSeparate(model)
% This is an early bail-out to avoid any errors during the devlopment of
% new features. Separable constraints are only support for Polya models
if any(model.K.s > 0) %| any(model.K.q >0)
groups{1} = 1:size(model.F_struc,2)-1;
return
end
X = zeros(size(model.F_struc,2)-1);
top = 1;
if model.K.f + model.K.l > 0
A = model.F_struc(top:model.K.f+model.K.l,2:end);
for i = 1:size(A,1)
X(find(A(i,:)),find(A(i,:))) = 1;
end
top = top + model.K.f + model.K.l;
end
if any(model.K.q > 0)
for j = 1:length(model.K.q)
A = model.F_struc(top:top+model.K.q(j)-1,2:end);top = top + model.K.q(j);
A = sum(abs(A),1);
for i = 1:size(A,1)
X(find(A),find(A)) = 1;
end
end
end
if any(model.K.s > 0)
for j = 1:length(model.K.s)
A = model.F_struc(top:top+model.K.s(j)^2-1,2:end);top = top + model.K.s(j)^2;
A = sum(abs(A),1);
for i = 1:size(A,1)
X(find(A),find(A)) = 1;
end
end
end
[a,b,c,d] = dmperm(X);
for i = 1:length(d)-1
groups{i} = sort(a(d(i):d(i+1)-1));
end
function newModels = separateUncertainty(Zmodel,uncertaintyGroups);
for i = 1:length(uncertaintyGroups)
data = Zmodel.F_struc(:,[1 1+uncertaintyGroups{i}]);
data_f = data(1:Zmodel.K.f,:);
data_l = data(Zmodel.K.f+1:Zmodel.K.f+Zmodel.K.l,:);
%data_q = data(Zmodel.K.f+Zmodel.K.l+1:sum(Zmodel.K.q),:);
%data_s = data(Zmodel.K.f+Zmodel.K.l+1:sum(Zmodel.K.q)+1:end,:);
eqIndex = find(any(data_f(:,2:end),2));
liIndex = find(any(data_l(:,2:end),2));
newModels{i}.F_struc = [data_f(eqIndex,:);data_l(liIndex,:)];
newModels{i}.K.f = length(eqIndex);
newModels{i}.K.l = length(liIndex);
newModels{i}.K.q = [];
top = Zmodel.K.f+Zmodel.K.l+1;
if Zmodel.K.q(1)>0
for j = 1:length(Zmodel.K.q)
data_q = data(top:top+Zmodel.K.q(j)-1,:);
if nnz(data_q(:,2:end))>0
newModels{i}.K.q(end+1) = Zmodel.K.q(j);
newModels{i}.F_struc = [newModels{i}.F_struc;data_q];
end
top = top + Zmodel.K.q(j);
end
end
if isempty(newModels{i}.K.q)
newModels{i}.K.q = 0;
end
newModels{i}.K.s = [];
top = Zmodel.K.q+Zmodel.K.f+Zmodel.K.l+1;
if Zmodel.K.s(1)>0
for j = 1:length(Zmodel.K.s)
data_s = data(top:top+Zmodel.K.s(j)^2-1,:);
if nnz(data_s(:,2:end))>0
newModels{i}.K.s(end+1) = Zmodel.K.s(j);
newModels{i}.F_struc = [newModels{i}.F_struc;data_s];
end
top = top + Zmodel.K.s(j)^2;
end
end
if isempty(newModels{i}.K.s)
newModels{i}.K.s = 0;
end
% newModels{i}.K.s = 0;
newModels{i}.variabletype = Zmodel.variabletype(uncertaintyGroups{i});
end
function Zmodel = convertuncertainty(Zmodel);
% Temoporary hack, will be generalized once the framework for multiple
% uncertainty models is supported
% We are looking for k>t, -tw<t
if size(Zmodel,1) == 1+(size(Zmodel,2)-1)*2 & Zmodel.K.f==0 & Zmodel.K.l==size(Zmodel.F_struc,1)
n = size(Zmodel.F_struc,2)-1;
if isequal(Zmodel.F_struc(:,2:end),sparse([zeros(1,n-1) -1;[eye(n-1);-eye(n-1)] ones(2*(n-1),1)]))
Zmodel.F_struc = [ones(2*n,1)*Zmodel.F_struc(1,1) [eye(n);-eye(n)]];
Zmodel.K.l = 2*n;
end
end
function [separatedZmodel, uncertaintyGroups,VariableType] = convertUncertaintyGroups(separatedZmodel,uncertaintyGroups,VariableType);
% Temporary hack, will be generalized once the framework for multiple
% uncertainty models is supported
% We are looking for k>t, -t<w<t. This is the slightly redundant model for
for i = 1:length(separatedZmodel)
% -k<w<k genrated when YALMIP encounters norm(w,inf)<k
if size(separatedZmodel{i},1) == 1+(size(separatedZmodel{i},2)-1)*2 & separatedZmodel{i}.K.f==0 & separatedZmodel{i}.K.l==size(separatedZmodel{i}.F_struc,1)
n = size(separatedZmodel{i}.F_struc,2)-1;
if isequal(separatedZmodel{i}.F_struc(:,2:end),sparse([zeros(1,n-1) -1;[eye(n-1);-eye(n-1)] ones(2*(n-1),1)]))
k = separatedZmodel{i}.F_struc(1,1);
c = separatedZmodel{i}.F_struc(2:2+n-2,1);
separatedZmodel{i}.F_struc = [[k-c;k;c+k;k] [-eye(n);eye(n)]];
separatedZmodel{i}.K.l = 2*n;
end
elseif separatedZmodel{i}.K.l == 1 & separatedZmodel{i}.K.q(1)>0 & length(separatedZmodel{i}.K.q(1))==1
% Could be norm(w,2) < r.
[n,m] = size(separatedZmodel{i}.F_struc);
if n==m & n>=3
n = n-2;
if isequal(separatedZmodel{i}.F_struc(:,2:end),sparse([zeros(2,n) [-1;1];eye(n) zeros(n,1)]))% isequal(separatedZmodel{i}.F_struc(:,2:end),sparse([zeros(2,n) [-1;1];eye(n) zeros(n,1)]))
if separatedZmodel{i}.F_struc(1,1)>0 %& nnz(separatedZmodel{i}.F_struc(2:end,1))==0
% the user has written norm(w-xc) < r. YALMIP will handle
% this using the nonlinear operator framework, and
% introduce a variable t and write it as norm(w)<t, t<r
% The variable t will be appended to the list of
% uncertain variables, and thus the model is a SOCP
% uncertainty in (w,t). We however want to interpret it
% as a simple quadratic model. We thus write it as
% (w,t)Q(w,t) < 1. Since t actually is unbounded in
% this form (since t is an auxilliary variable that we
% now through away, we arbitrarily say Q=[I/r^2 0;0 1]
r = separatedZmodel{i}.F_struc(1,1);
center = -separatedZmodel{i}.F_struc(3:end,1);
% separatedZmodel{i}.Q = blkdiag(r*eye(n),1);
% separatedZmodel{i}.center = [center;0];
separatedZmodel{i}.K.l = 0;
separatedZmodel{i}.K.q = n+1;
% r = separatedZmodel{i}.F_struc(1,1);
% center = -separatedZmodel{i}.F_struc(3:end,1);
separatedZmodel{i}.F_struc = [[1+r^2;-2*center;1-r^2] [zeros(1,n);eye(n)*2;zeros(1,n)]];
% VariableType.w_variables = setdiff(VariableType.w_variables,uncertaintyGroups{1}(end));
uncertaintyGroups{1}(end)=[];
end
end
end
end
end
function [uncertaintyTypes,separatedZmodel,uncertaintyGroups] = classifyUncertainty(separatedZmodel,uncertaintyGroups,w)
for i = 1:length(separatedZmodel)
uncertaintyTypes{i} = 'unclassified';
end
% Look for simplicies, which can be used in Polya
simplex_members = find_simplex_models(separatedZmodel);
for i = find(simplex_members)
uncertaintyTypes{i} = 'simplex';
end
% Look for simple bounds, and them combine them
for i = 1:length(separatedZmodel)
if strcmp(uncertaintyTypes{i},'unclassified')
if any(separatedZmodel{i}.K.q > 0) | any(separatedZmodel{i}.K.s > 0)
simplebounds = 0;
else
[aux,lower,upper] = find_simple_variable_bounds(separatedZmodel{i});
simplebounds = ~isinf(lower) & ~isinf(upper);
end
if all(simplebounds)
if aux.K.l == 0
uncertaintyTypes{i} = 'inf-norm';
separatedZmodel{i}.lb = lower;
separatedZmodel{i}.ub = upper;
end
end
end
end
j = find(strcmp(uncertaintyTypes,'inf-norm'));
if length(j)>1
allBounded = [];
lb = [];
ub = [];
for i = 1:length(j)
allBounded = [allBounded; uncertaintyGroups{j(i)}];
lb = [lb;separatedZmodel{j(i)}.lb];
ub = [ub;separatedZmodel{j(i)}.ub];
if any(lb > ub)
error('There are inconsistent bounds in the uncertainty model');
end
end
separatedZmodel{j(1)}.lb = lb;
separatedZmodel{j(1)}.ub = ub;
uncertaintyGroups{j(1)} = allBounded(:)';
keep = setdiff(1:length(separatedZmodel),j(2:end));
separatedZmodel = {separatedZmodel{keep}};
uncertaintyGroups = {uncertaintyGroups{keep}};
uncertaintyTypes = {uncertaintyTypes{keep}};
end
% Look for 2-norm balls norm(w) < r, written using norm(w) or w'*w
for i = 1:length(separatedZmodel)
if strcmp(uncertaintyTypes{i},'unclassified')
if ~any(separatedZmodel{i}.K.s > 0) & separatedZmodel{i}.K.l==0 & separatedZmodel{i}.K.f==0 & length(separatedZmodel{i}.K.q)==1
% Hmm, only 1 SOCP
if all(separatedZmodel{i}.F_struc(1,2:end)==0)
if isequal(separatedZmodel{i}.F_struc(2:end,2:end),speye(separatedZmodel{i}.K.q(1)-1))
% r > norm(x)
uncertaintyTypes{i} = '2-norm';
separatedZmodel{i}.center = -separatedZmodel{i}.F_struc(2:end,1);
separatedZmodel{i}.r = sqrt(max(0,separatedZmodel{i}.F_struc(1,1)^2));
elseif size(separatedZmodel{i}.F_struc,1)==size(separatedZmodel{i}.F_struc,2)
S = separatedZmodel{i}.F_struc(2:end,2:end);
[ii,jj,kk] = find(S);
if all(kk==2) & length(ii)==length(unique(ii)) & length(jj)==length(unique(jj))
% r > norm(x-center)
uncertaintyTypes{i} = '2-norm';
separatedZmodel{i}.center = -separatedZmodel{i}.F_struc(2:end,1)/2;
separatedZmodel{i}.r = sqrt((separatedZmodel{i}.F_struc(1,1)^2-separatedZmodel{i}.F_struc(end,1)^2)/4);
end
elseif separatedZmodel{i}.F_struc(1,1)^2-separatedZmodel{i}.F_struc(end,1)^2>0
S = separatedZmodel{i}.F_struc(2:end,2:end);
[ii,jj,kk] = find(S);
if all(kk==2) & length(ii)==length(unique(ii)) & length(jj)==length(unique(jj))
uncertaintyTypes{i} = '2-norm';
separatedZmodel{i}.center = -separatedZmodel{i}.F_struc(2:end-1,1)/2;
separatedZmodel{i}.r = sqrt((separatedZmodel{i}.F_struc(1,1)^2-separatedZmodel{i}.F_struc(end,1)^2)/4);
end
end
end
end
end
end
% Look for quadratic constraints, other than norm models
% A bit redundant code should be integrated with the case above
for i = 1:length(separatedZmodel)
if strcmp(uncertaintyTypes{i},'unclassified')
if ~any(separatedZmodel{i}.K.s > 0) & separatedZmodel{i}.K.l==0 & separatedZmodel{i}.K.f==0 & length(separatedZmodel{i}.K.q)==1
% 1 single SOCP ||Ax+b|| <= cx+d
A = separatedZmodel{i}.F_struc(2:end,2:end);
b = separatedZmodel{i}.F_struc(2:end,1);
c = separatedZmodel{i}.F_struc(1,2:end);
d = separatedZmodel{i}.F_struc(1,1);
if min(eig(full(A'*A-c'*c)))
% Originates in a quadratic constraint
rhs = c*w(uncertaintyGroups{i}(:))+d;
lhs = A*w(uncertaintyGroups{i}(:))+b;
uncertaintyTypes{i} = 'quadratic';
separatedZmodel{i}.g = rhs^2-lhs'*lhs;
separatedZmodel{i}.center = [];
separatedZmodel{i}.r = [];
end
end
end
end
% Look for 1-norm balls |w|1 < r^2
% We are looking for the auto-generate model -t<w<t, sum(t)<s,s<r^2
for i = 1:length(separatedZmodel)
if strcmp(uncertaintyTypes{i},'unclassified')
if ~any(separatedZmodel{i}.K.s > 0) & separatedZmodel{i}.K.l>0 & separatedZmodel{i}.K.f==0 & ~any(separatedZmodel{i}.K.q > 0)
%if all(separatedZmodel{i}.F_struc(2:end,1)==0)
if separatedZmodel{i}.F_struc(1,1)>0
n = (size(separatedZmodel{i}.F_struc,2)-2)/2;
if n==fix(n)
try
if all(separatedZmodel{i}.F_struc(n+2:end-1,1)==-separatedZmodel{i}.F_struc(2:2+n-1,1))
shouldbe = [zeros(1,n) -1 zeros(1,n);eye(n) zeros(n,1) eye(n);-eye(n) zeros(n,1) eye(n);zeros(1,n) 1 -ones(1,n)];%; zeros(n,n+1) eye(n)];;zeros(1,n) 1 zeros(1,n)];
if isequal(full(separatedZmodel{i}.F_struc(:,2:end)),shouldbe)
%if all(separatedZmodel{i}.F_struc(2:end,1)==0)
uncertaintyTypes{i} = '1-norm';
separatedZmodel{i}.r = separatedZmodel{i}.F_struc(1,1);
separatedZmodel{i}.center =separatedZmodel{i}.F_struc(n+2:end-1,1);
%end
end
end
catch
%FIX ME, caught by -1<w<1, sum(w)<1
end
end
end
%end
end
end
end
% Look for polytopes
for i = 1:length(separatedZmodel)
if strcmp(uncertaintyTypes{i},'unclassified')
if ~any(separatedZmodel{i}.K.q > 0) | any(separatedZmodel{i}.K.s > 0)
end
end
end
function Fnew = projectOut(F,w,newAuxVariables,uncertaintyGroups,ops)
w_variables = getvariables(w);
Graph = yalmip('getdependence');
for i = 1:length(uncertaintyGroups)
Graph(w_variables(uncertaintyGroups{i}),w_variables(uncertaintyGroups{i}))=1;
end
aux_and_w = union(newAuxVariables,w_variables);
F_lp = F(find(is(F,'elementwise')));
X = sdpvar(F_lp);
Xvars = getvariables(X);
G = getbase(X);G = G(:,2:end);
for i = 1:size(G,1)
j = Xvars(find(G(i,:)));
Graph(j,j) = 1;
end
Graph(:,setdiff(1:size(Graph,2),aux_and_w))=0;
Graph(setdiff(1:size(Graph,2),aux_and_w),:)=0;
Graph2 = Graph(1:max(aux_and_w),1:max(aux_and_w));
Graph2 = Graph2 + speye(length(Graph2));
[aa,bb,cc,dd] = dmperm(Graph2);
commonProjections = {};
for r = 1:length(dd)-1
comps = dd(r):dd(r+1)-1;
comps = intersect(aa(comps),newAuxVariables);
if ~isempty(comps)
commonProjections{end+1} = comps;
end
end
if ops.verbose
disp([' - * Partitioned these into ' num2str(length(commonProjections)) ' group(s)']);
end
keep = ones(length(F),1);
Fnew = [];
started = 0;
for clique = 1:length(commonProjections)
F_lp = [];
for i = 1:length(F)
F_vars = getvariables(F(i));
var_w = intersect(F_vars,w_variables);
var_aux = intersect(F_vars,commonProjections{clique});
if is(F(i),'elementwise')
if any(ismember(getvariables(F(i)),commonProjections{clique}))
F_lp = [F_lp, F(i)];
keep(i) = 0;
if ~started
started = 1;
if ops.verbose
disp([' - * Performing projections of uncertain graph variables...']);
end
end
end
end
end
if ~isempty(F_lp)
X = sdpvar(F_lp);
[Ab] = getbase(X);
b = Ab(:,1);
A = -Ab(:,2:end);
allVariables = getvariables(X);
newAuxVariables = intersect(commonProjections{clique},allVariables);
j = [];
for i = 1:length(newAuxVariables)
j(i) = find(allVariables==newAuxVariables(i));
end
[A,b] = fourierMotzkin(A,b,sort(j));
A(:,j)=[];
left = recover(setdiff(allVariables,newAuxVariables));
X = b-A*left(:);
Fnew = [Fnew,[[X >= 0] : 'Projected uncertain']];
end
end
if started & ops.verbose
d = length(sdpvar(Fnew))-length(sdpvar(F_lp));
if d>0
disp([' - * Done with projections (generated ' num2str(d) ' new constraints)']);
elseif d<0
disp([' - * Done with projections (actually reduced model with ' num2str(-d) ' constraints)']);
else
disp([' - * Done with projections (model size unchanged)']);
end
end
Fnew = [Fnew,F(find(keep))];
function [A,b] = fourierMotzkin(A,b,dim)
% Brute-force projection through Fourier-Motzkin
for i = dim(:)'
a = A(:,i);
zero = find(a == 0);
pos = find(a > 0);
neg = find(a < 0);
T = spalloc(length(zero) + length(pos)*length(neg),size(A,1),length(zero)+length(pos)*length(neg)*2);
row = 1;
for j = zero(:)'
T(row,j) = 1;
row = row+1;
end
for j = pos(:)'
for k = neg(:)'
T(row,j) = -a(k);
T(row,k) = a(j);
row = row+1;
end
end
A = T*A;
b = T*b;
end
function [Fnew,xnew,info] = adjustable(F,w,newAuxVariables,uncertaintyGroups,ops,VariableType)
L = sdpvar(length(recover(newAuxVariables)),length(w),'full');
L = L.*VariableType.Graph(VariableType.aux_with_w_dependence,VariableType.w_variables);
y0 = sdpvar(length(recover(newAuxVariables)),1);
Fnew = replace(F,recover(newAuxVariables),y0+L*w);
xnew = recover([getvariables(y0) getvariables(L)]);
info.nvars = length(getvariables(L));
info.sparsity = length(getvariables(L))/prod(size(L));
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
root_node_tighten.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/root_node_tighten.m
| 4,114 |
utf_8
|
337752e11e3b4918818b979f123b0ae4
|
% *************************************************************************
% Tighten bounds at root
% *************************************************************************
function p = root_node_tighten(p,upper);
p.feasible = all(p.lb<=p.ub) & p.feasible;
if p.options.bmibnb.roottight & p.feasible
pin = p;
if ~isempty(p.bilinears)
% f = p.F_struc(1:p.K.f,:);
% p.F_struc(1:p.K.f,:)=[];
% p = addBilinearVariableCuts(p);
% p.F_struc = [f;p.F_struc];
% % p.K.l = 0;
end
if ~isempty(p.bilinears) && ~isinf(upper)
p_cut = p;
for i = 1:size(p.bilinears,1)
if p_cut.c(p.bilinears(i,1))
p_cut.Q(p.bilinears(i,2),p.bilinears(i,3)) = p_cut.c(p.bilinears(i,1))/2;
p_cut.Q(p.bilinears(i,3),p.bilinears(i,2)) = p_cut.Q(p.bilinears(i,3),p.bilinears(i,2))+p_cut.c(p.bilinears(i,1))/2;
p_cut.c(p.bilinears(i,1)) = 0;
end
end
if all(eig(p_cut.Q)>=0)
[u,s,v] = svd(full(p_cut.Q));
% f + c'*x + x'*Q*x <= U
% c'*x + x'*R*R*x <= U - f - c'*x
% ||Rx||^2 <= upperbound U - f - c'*x
% ||Rx||_inf <= n*sqrt(upperbound U - f - c'*x)
rhs = upper - p.f
neg = find(p_cut.c<0);
pos = find(p_cut.c>0);
rhs = rhs - sum(p.ub(neg).*p_cut.c(neg));
rhs = rhs - sum(p.lb(pos).*p_cut.c(pos));
if rhs > 0
R = diag(diag(s).^.5)*v';
R = R(diag(s)>1e-10,:);
% -n*sqrt(rhs) <= R*x <= n*sqrt(R)
p.F_struc = [p.F_struc;size(v,2)*sqrt(rhs)*ones(size(R,1),1) -R;size(v,2)*sqrt(rhs)*ones(size(R,1),1) R];
p.K.l = p.K.l + 2*size(R,1);
end
end
end
if all(p.K.q == 0) & all(p.K.s == 0) & all(p.K.r == 0)
lowersolver = eval(['@' p.solver.lpcall]);
else
lowersolver = eval(['@' p.solver.lowercall]);
end
c = p.c;
Q = p.Q;
mt = p.monomtable;
p.monomtable = eye(length(c));
i = 1;
% Add an upper bound cut?
if (upper < inf)
% p.c'*x+p.f < upper
newline = [upper-p.f -p.c'];
p.F_struc = [p.F_struc(1:p.K.f,:);newline;p.F_struc(1+p.K.f:end,:)];
p.K.l = p.K.l + 1;
end
while i<=length(p.linears) & p.feasible
j = p.linears(i);
if p.lb(j) < p.ub(j) & (ismember(j,p.branch_variables) | (p.options.bmibnb.roottight == 2))
p.c = eyev(length(p.c),j);
output = feval(lowersolver,removenonlinearity(p));
p.counter.lowersolved = p.counter.lowersolved + 1;
if (output.problem == 0) & (output.Primal(j)>p.lb(j)+1e-4)
p.lb(j) = output.Primal(j);
p = updateonenonlinearbound(p,j);
p = clean_bounds(p);
end
if output.problem == 1
p.feasible = 0;
elseif p.lb(j) < p.ub(j) % We might have updated lb
p.c = -eyev(length(p.c),j);
output = feval(lowersolver,removenonlinearity(p));
p.counter.lowersolved = p.counter.lowersolved + 1;
if (output.problem == 0) & (output.Primal(j) < p.ub(j)-1e-4)
p.ub(j) = output.Primal(j);
if p.ub(j)<p.lb(j)
p.ub(j) = p.lb(j);
end
p = updateonenonlinearbound(p,j);
p = clean_bounds(p);
end
if output.problem == 1
p.feasible = 0;
end
i = i+1;
end
else
i = i + 1;
end
end
% if upper < inf
% p.F_struc(1+p.K.f,:) = [];
% p.K.l = p.K.l - 1;
% end
%
% p.c = c;
% p.Q = Q;
% p.monomtable = mt;
p.lb(p.lb<-1e10) = -inf;
p.ub(p.ub>1e10) = inf;
pin.lb = p.lb;
pin.ub = p.ub;
pin.feasible = p.feasible;
pin.counter = p.counter;
p = pin;
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
update_sumsepquad_bounds.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/update_sumsepquad_bounds.m
| 2,079 |
utf_8
|
88e62143dd9ee6975b8f13423f79f346
|
function p = update_sumsepquad_bounds(p);
% Looking for case z = b+ q1(x1) + q2(x2) + ... where q quadratic
if p.boundpropagation.sepquad
found = 0;
for j = 1:p.K.f
a = p.F_struc(j,2:end);
b = p.F_struc(j,1);
used = find(a);
data = [];
if nnz(a) > 2 && all(p.variabletype(used) == 2 | p.variabletype(used) == 0)
% Only linear or quadratic and there is a mixed term
nonlinears = find(p.variabletype(used)==2);
if (nnz(a) > length(nonlinears)) && length(nonlinears) > 0
data = [];
for i = 1:length(nonlinears)
linear = find(p.monomtable(nonlinears(i),:));
data = [data;linear a(linear) a(nonlinears(i))];
a(linear)=0;
a(nonlinears(i))=0;
found = 1;
end
end
end
if nnz(a) == 1 & ~isempty(data)
k = find(a);
ai = a(k);
data(:,2:end) = data(:,2:end)/(-ai);
b = b/-ai;
L = b;
U = b;
for i = 1:size(data,1)
[Li,Ui] = wcquad(data(i,2:3),p.lb(data(i,1)),p.ub(data(i,1)));
L = L + Li;
U = U + Ui;
end
p.lb(k) = max(p.lb(k),L);
p.ub(k) = min(p.ub(k),U);
end
end
if ~found
% Turn off this feature
p.boundpropagation.sepquad = 0;
end
end
function [Li,Ui] = wcquad(c,lb,ub)
% Stationary point
xs = -c(1)/(2*c(2));
fs = c(1)*xs+c(2)*xs^2;
stationaryInBounds = (xs >= lb) && (xs <= ub);
if isinf(lb)
if c(2)>0
fl = inf;
else
fl = -inf;
end
else
fl = c(1)*lb+c(2)*lb^2;
end
if isinf(ub)
if c(2)>0
fu = inf;
else
fu = -inf;
end
else
fu = c(1)*ub+c(2)*ub^2;
end
if stationaryInBounds
Li = min([fl fu fs]);
Ui = max([fl fu fs]);
else
Li = min([fl fu]);
Ui = max([fl fu]);
end
if any(isnan([Li Ui]))
error('hej')
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
updateonenonlinearbound.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/updateonenonlinearbound.m
| 805 |
utf_8
|
97d5341706cf3ef397ec3383332f38ee
|
% *************************************************************************
% Code for setting the numerical values of nonlinear terms
% *************************************************************************
function p = updateonenonlinearbound(p,changed_var)
if ~isempty(p.bilinears)
impactedVariables = find((p.bilinears(:,2) == changed_var) | (p.bilinears(:,3) == changed_var));
x = p.bilinears(impactedVariables,2);
y = p.bilinears(impactedVariables,3);
z = p.bilinears(impactedVariables,1);
x_lb = p.lb(x);
x_ub = p.ub(x);
y_lb = p.lb(y);
y_ub = p.ub(y);
bounds = [x_lb.*y_lb x_lb.*y_ub x_ub.*y_lb x_ub.*y_ub];
p.lb(z) = max([p.lb(z) min(bounds,[],2)],[],2);
p.ub(z) = min([p.ub(z) max(bounds,[],2)],[],2)';
p.lb(impactedVariables(x==y)<0) = 0;
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
dmpermblockeig.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/dmpermblockeig.m
| 3,417 |
utf_8
|
7b1470816ba1634bcc2b8457f9a98036
|
function [V,D,permutation,failure] = dmpermblockeig(X,switchtosparse)
[permutation,aux1,aux2,blocks] = dmperm(X+speye(length(X)));
Xpermuted = X(permutation,permutation);
V = [];
D = [];
V = zeros(size(X,1),1);
top = 1;
left = 1;
anycholfail = 0;
failure = 0;
for i = 1:length(blocks)-1
Xi = Xpermuted(blocks(i):blocks(i+1)-1,blocks(i):blocks(i+1)-1);
[R,fail] = chol(Xi);
anycholfail = anycholfail | fail;
if fail
if length(Xi) >= switchtosparse
[vi,di,eigfail] = eigs(Xi,5,'SA');
if eigfail || isempty(di)
res = 0;
for j = 1:size(vi,2)
res(j) = norm(Xi*vi(:,j)-vi(:,j)*di(j,j));
end
% We only trust these
notfailed = abs(res) <= 1e-12;
vi = vi(:,notfailed);
di = di(notfailed,notfailed);
if length(vi) == 0
[vi,di,eigfail] = eigs(sparse(Xi),25,'SA');
if eigfail
res = 0;
for j = 1:size(vi,2)
res(j) = norm(Xi*vi(:,j)-vi(:,j)*di(j,j));
end
% We only trust these
notfailed = abs(res) <= 1e-12;
vi = vi(:,notfailed);
di = di(notfailed,notfailed);
end
end
end
else
[vi,di] = eig(full(Xi));
end
for j = 1:length(di)
if di(j,j)<=0
V(top:top+length(Xi)-1,left)=vi(:,j);
left = left + 1;
D = blkdiag(D,di(1,1));
end
end
end
top = top + length(Xi);
end
if (anycholfail && isempty(V)) || (anycholfail && all(diag(D)>0))
% OK, we have a problem. The Cholesky factorization failed for some of
% the matrices, but yet no eigenvalue decomposition revealed a negative
% eigenvalue (due to convergence issues in the sparse eigs)
failure = 1;
end
function [vi,di,eigfail] = eigband(X,m)
eigfail = 0;
if nargin == 1
m = 5;
end
if m > 0
r = symrcm(X);
Z = X(r,r);
n = length(Z);
bw = yalmipbandwidth(Z);
% spy(Z);drawnow
if bw > n/3 || n < 200
[vi,di] = eig(full(X));
return
end
mid = ceil(n/2);
Z1 = Z(1:mid+bw,1:mid+bw);
Z2 = Z(mid-bw:end,mid-bw:end);
[v1,d1,eigfail] = eigband(Z1,m-1);
[v2,d2,eigfail] = eigband(Z2,m-1);
vi = blkdiag(v1,v2);
di = blkdiag(d1,d2);
i1 = find(diag(d1)<0);
i2 = find(diag(d2)<0);
vi = zeros(n,0);
for i = 1:length(i1)
vi(1:n/2+bw,end+1) = v1(:,i1(i));
end
for i = 1:length(i2)
vi(n/2-bw:end,end+1) = v2(:,i2(i));
end
di = blkdiag(d1(i1,i1),d2(i2,i2));
[~,ir] = ismember(1:length(r),r);
vi = vi(ir,:);
return
end
r = symrcm(X);
Z = X(r,r);
n = length(Z);
bw = yalmipbandwidth(Z);
mid = ceil(n/2);
Z1 = Z(1:mid+2*bw,1:mid+2*bw);
Z2 = Z(mid-2*bw:end,mid-2*bw:end);
[v1,d1] = eig(full(Z1));
[v2,d2] = eig(full(Z2));
i1 = find(diag(d1)<0);
i2 = find(diag(d2)<0);
vi = zeros(n,0);
for i = 1:length(i1)
vi(1:n/2+bw,end+1) = v1(:,i1(i));
end
for i = 1:length(i2)
vi(n/2-bw:end,end+1) = v2(:,i2(i));
end
di = blkdiag(d1(i1,i1),d2(i2,i2));
[~,ir] = ismember(1:length(r),r);
vi = vi(ir,:);
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
evaluate_nonlinear.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/evaluate_nonlinear.m
| 1,184 |
utf_8
|
6cee29d0c8963567fd541ab3ee434dcf
|
function x = evaluate_nonlinear(p,x,qq)
% FIX: We have to apply computations to make sure we are evaluating
% expressions such as log(1+sin(x.^2).^2) correctly
if ~isempty(p.bilinears) & all(p.variabletype <= 2) & length(p.evalMap)==0
x(p.bilinears(:,1)) = x(p.bilinears(:,2)).*x(p.bilinears(:,3));
else
oldx = 0*p.c;old(1:length(x))=x;
%try
x = process_polynomial(x,p);
%catch
% 1
%end
x = process_evaluations(x,p);
while norm(x - oldx)>1e-8
oldx = x;
x = process_polynomial(x,p);
x = process_evaluations(x,p);
end
end
function x = process_evaluations(x,p)
for i = 1:length(p.evalMap)
arguments = {p.evalMap{i}.fcn,x(p.evalMap{i}.variableIndex)};
arguments = {arguments{:},p.evalMap{i}.arg{2:end-1}};
x(p.evalVariables(i)) = feval(arguments{:});
if ~isempty(p.bilinears)
x = process_bilinear(x,p);
end
end
function x = process_bilinear(x,p)
x(p.bilinears(:,1)) = x(p.bilinears(:,2)).*x(p.bilinears(:,3));
function x = process_polynomial(x,p)
x = x(1:length(p.c));
nonlinear = find(p.variabletype);
x(nonlinear) = prod(repmat(x(:)',length(nonlinear),1).^p.monomtable(nonlinear,:),2);
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
cutsdp.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/cutsdp.m
| 26,156 |
utf_8
|
b0da964912e005a872def387efd1ee2c
|
function output = cutsdp(p)
% CUTSDP
%
% See also OPTIMIZE, BNB, BINVAR, INTVAR, BINARY, INTEGER
% *************************************************************************
%% INITIALIZE DIAGNOSTICS IN YALMIP
% *************************************************************************
bnbsolvertime = clock;
showprogress('Cutting plane solver started',p.options.showprogress);
% *************************************************************************
%% If we want duals, we may not extract bounds. However, bounds must be
% extracted in discrete problems.
% *************************************************************************
if p.options.cutsdp.recoverdual
warning('Dual recovery not implemented yet in CUTSDP')
end
% *************************************************************************
%% Define infinite bounds
% *************************************************************************
if isempty(p.ub)
p.ub = repmat(inf,length(p.c),1);
end
if isempty(p.lb)
p.lb = repmat(-inf,length(p.c),1);
end
% *************************************************************************
%% ADD CONSTRAINTS 0<=x<=1 FOR BINARY
% *************************************************************************
if ~isempty(p.binary_variables)
p.ub(p.binary_variables) = min(p.ub(p.binary_variables),1);
p.lb(p.binary_variables) = max(p.lb(p.binary_variables),0);
end
% *************************************************************************
%% Extract better bounds from model
% *************************************************************************
if ~isempty(p.F_struc)
[lb,ub,used_rows_eq,used_rows_lp] = findulb(p.F_struc,p.K);
if ~isempty([used_rows_eq(:);used_rows_lp(:)])
lower_defined = find(~isinf(lb));
if ~isempty(lower_defined)
p.lb(lower_defined) = max(p.lb(lower_defined),lb(lower_defined));
end
upper_defined = find(~isinf(ub));
if ~isempty(upper_defined)
p.ub(upper_defined) = min(p.ub(upper_defined),ub(upper_defined));
end
p.F_struc(p.K.f+used_rows_lp,:)=[];
p.F_struc(used_rows_eq,:)=[];
p.K.l = p.K.l - length(used_rows_lp);
p.K.f = p.K.f - length(used_rows_eq);
end
end
% *************************************************************************
%% ADD CONSTRAINTS 0<x<1 FOR BINARY
% *************************************************************************
if ~isempty(p.binary_variables)
p.ub(p.binary_variables) = min(p.ub(p.binary_variables),1);
p.lb(p.binary_variables) = max(p.lb(p.binary_variables),0);
end
p.ub = min(p.ub,p.options.cutsdp.variablebound');
p.lb = max(p.lb,-p.options.cutsdp.variablebound');
% *************************************************************************
%% PRE-SOLVE (nothing fancy coded)
% *************************************************************************
if isempty(find(isinf([p.ub;p.lb]))) & p.K.l>0
[p.lb,p.ub] = tightenbounds(-p.F_struc(1+p.K.f:p.K.f+p.K.l,2:end),p.F_struc(1+p.K.f:p.K.f+p.K.l,1),p.lb,p.ub,p.integer_variables);
end
% *************************************************************************
%% PERTURBATION OF LINEAR COST
% *************************************************************************
p.corig = p.c;
if nnz(p.Q)~=0
g = randn('seed');
randn('state',1253); %For my testing, I keep this the same...
% This perturbation has to be better. Crucial for many real LP problems
p.c = (p.c).*(1+randn(length(p.c),1)*1e-4);
randn('seed',g);
end
% *************************************************************************
%% We don't need this
% *************************************************************************
p.options.savesolverinput = 0;
p.options.savesolveroutput = 0;
% *************************************************************************
%% Display logics
% 0 : Silent
% 1 : Display cut progress
% 2 : Display node solver prints
% *************************************************************************
switch max(min(p.options.verbose,3),0)
case 0
p.options.cutsdp.verbose = 0;
case 1
p.options.cutsdp.verbose = 1;
p.options.verbose = 0;
case 2
p.options.cutsdp.verbose = 2;
p.options.verbose = 0;
case 3
p.options.cutsdp.verbose = 2;
p.options.verbose = 1;
otherwise
p.options.cutsdp.verbose = 0;
p.options.verbose = 0;
end
% *************************************************************************
%% START CUTTING
% *************************************************************************
[x_min,solved_nodes,lower,feasible,D_struc] = cutting(p);
%% --
% *************************************************************************
%% CREATE SOLUTION
% *************************************************************************
output.problem = 0;
if ~feasible
output.problem = 1;
end
if solved_nodes == p.options.cutsdp.maxiter
output.problem = 3;
end
output.solved_nodes = solved_nodes;
output.Primal = x_min;
output.Dual = D_struc;
output.Slack = [];
output.solverinput = 0;
output.solveroutput =[];
output.solvertime = etime(clock,bnbsolvertime);
%% --
function [x,solved_nodes,lower,feasible,D_struc] = cutting(p)
% *************************************************************************
%% Sanity check
% *************************************************************************
if any(p.lb>p.ub)
x = zeros(length(p.c),1);
solved_nodes = 0;
lower = inf;
feasible = 0;
D_struc = [];
return
end
% *************************************************************************
%% Create function handle to solver
% *************************************************************************
cutsolver = p.solver.lower.call;
% *************************************************************************
%% Create copy of model without
% the SDP part
% *************************************************************************
p_lp = p;
p_lp.F_struc = p_lp.F_struc(1:p.K.l+p.K.f,:);
p_lp.K.s = 0;
p_lp.K.q = 0;
% *************************************************************************
%% DISPLAY HEADER
% *************************************************************************
if p.options.cutsdp.verbose
disp('* Starting YALMIP cutting plane for MISDP based on MILP');
disp(['* Lower solver : ' p.solver.lower.tag]);
disp(['* Max iterations : ' num2str(p.options.cutsdp.maxiter)]);
end
if p.options.bnb.verbose;
if p.K.s(1)>0
disp(' Node Infeasibility Lower bound Upper bound LP cuts Infeasible SDP cones');
else
disp(' Node Infeasibility Lower bound Upper bound LP cuts');
end
end
%% Initialize diagnostic
infeasibility = -inf;
solved_nodes = 0;
feasible = 1;
lower = -inf;
saveduals = 1;
% Rhs of SOCP has to be non-negative
if ~p.solver.lower.constraint.inequalities.secondordercone.linear
p_lp = addSOCPCut(p,p_lp);
end
% SDP diagonal has to be non-negative
p_lp = addDiagonalCuts(p,p_lp);
% Experimentation with activation cuts on 2x2 structures in problems with
% all binary variables 2x2 = constant not psd + M(x) means some x has to be
% non-zero
p_lp = addActivationCuts(p,p_lp);
p_lp = removeRedundant(p_lp);
goon = 1;
rr = p_lp.integer_variables;
rb = p_lp.binary_variables;
only_solvelp = 0;
pplot = 0;
% *************************************************************************
% Crude lower bound
% FIX for quadratic case
% *************************************************************************
lower = 0;
if nnz(p.Q) == 0
for i = 1:length(p.c)
if p.c(i)>0
if isinf(p.lb(i))
lower = -inf;
break
else
lower = lower + p.c(i)*p.lb(i);
end
elseif p.c(i)<0
if isinf(p.ub(i))
lower = -inf;
break
else
lower = lower + p.c(i)*p.ub(i);
end
end
end
end
%lower = sum(sign(p.c).*(p.lb));
if isinf(lower) | nnz(p.Q)~=0
lower = -1e6;
end
% *************************************************************************
% Experimental stuff for variable fixing
% *************************************************************************
if p.options.cutsdp.nodefix & (p.K.s(1)>0)
top=1+p.K.f+p.K.l+sum(p.K.q);
for i=1:length(p.K.s)
n=p.K.s(i);
for j=1:size(p.F_struc,2)-1;
X=full(reshape(p.F_struc(top:top+n^2-1,j+1),p.K.s(i),p.K.s(i)));
X=(X+X')/2;
v=real(eig(X+sqrt(eps)*eye(length(X))));
if all(v>=0)
sdpmonotinicity(i,j)=-1;
elseif all(v<=0)
sdpmonotinicity(i,j)=1;
else
sdpmonotinicity(i,j)=nan;
end
end
top=top+n^2;
end
else
sdpmonotinicity=[];
end
hist_infeasibility = [];
mmm=[];
pool = [];
% Avoid data shuffling later on when creating cuts for SDPs
top = 1+p.K.f + sum(p.K.l)+sum(p.K.q);
% Slicing columns much faster
p.F_struc = p.F_struc';
for i = 1:length(p.K.s)
p.semidefinite{i}.F_struc = p.F_struc(:,top:top+p.K.s(i)^2-1)';
p.semidefinite{i}.index = 1:p.K.s(i)^2;
top = top + p.K.s(i)^2;
end
p.F_struc = p.F_struc';
p.F_struc = p.F_struc(1:p.K.f+p.K.l+sum(p.K.q),:);
upper = inf;
standard_options = p_lp.options;
while goon
p_lp = nodeTight(p,p_lp);
p_lp = nodeFix(p,p_lp);
% Add lower bound
if ~isinf(lower)
p_lp.F_struc = [p_lp.F_struc;-lower p_lp.c'];
p_lp.K.l = p_lp.K.l + 1;
end
goon_locally = 1;
p_lp.options = standard_options;
while goon_locally
if p.solver.lower.constraint.inequalities.secondordercone.linear
ptemp = p_lp;
ptemp.F_struc = [p_lp.F_struc;p.F_struc(1+p.K.f+p.K.l:p.K.f+p.K.l+sum(p.K.q),:)];
ptemp.K.q = p.K.q;
output = feval(cutsolver,ptemp);
else
output = feval(cutsolver,p_lp);
end
% Assume we won't find a feasible solution which we try to improve
goon_locally = 0;
% Remove lower bound (avoid accumulating them)
if ~isinf(lower)
p_lp.K.l = p_lp.K.l - 1;
p_lp.F_struc = p_lp.F_struc(1:end-1,:);
end
infeasible_socp_cones = ones(1,length(p.K.q));
infeasible_sdp_cones = ones(1,length(p.K.s));
eig_failure = 0;
if output.problem == 1 | output.problem == 12
% LP relaxation was infeasible, hence problem is infeasible
feasible = 0;
lower = inf;
goon = 0;
x = zeros(length(p.c),1);
lower = inf;
cost = inf;
else
% Relaxed solution
x = output.Primal;
cost = p.f+p.c'*x+x'*p.Q*x;
if output.problem == 0
lower = cost;
end
infeasibility = 0;
[p_lp,infeasibility,infeasible_socp_cones] = add_socp_cut(p,p_lp,x,infeasibility);
[p_lp,infeasibility,infeasible_sdp_cones,eig_failure] = add_sdp_cut(p,p_lp,x,infeasibility);
[p_lp,infeasibility] = add_nogood_cut(p,p_lp,x,infeasibility);
if ~isempty(pool)
res = pool*[1;x];
j = find(res<0)
if ~isempty(j)
p_lp.F_struc = [p_lp.F_struc;pool(j,:)];
p_lp.K.l = p_lp.K.l + length(j);
pool(j,:)=[];
end
end
if feasible && infeasibility >= p.options.cutsdp.feastol && ~eig_failure
% This was actually a feasible solution
upper = min(upper, cost);
if upper > lower
if isa(p_lp.options.cutsdp.resolver,'struct')
s = p_lp.options.verbose;
p_lp.options = p_lp.options.cutsdp.resolver;
p_lp.options.verbose = s;
goon_locally = 1;
end
end
end
goon = infeasibility <= p.options.cutsdp.feastol || output.problem ==3;
goon = goon & feasible;
goon = goon || eig_failure;% not psd, but no interesting eigenvalue correctly computed
goon = goon & (solved_nodes < p.options.cutsdp.maxiter-1);
goon = goon & ~(upper <=lower);
end
solved_nodes = solved_nodes + 1;
if eig_failure
infeasibility = nan;
end
if p.options.cutsdp.verbose;
if p.K.s(1)>0
if output.problem == 3
fprintf(' %4.0f : %12.3E %12.3E* %12.3E %2.0f %2.0f/%2.0f\n',solved_nodes,infeasibility,lower,upper,p_lp.K.l-p.K.l,nnz(infeasible_sdp_cones),length(p.K.s));
else
fprintf(' %4.0f : %12.3E %12.3E %12.3E %2.0f %2.0f/%2.0f\n',solved_nodes,infeasibility,lower,upper,p_lp.K.l-p.K.l,nnz(infeasible_sdp_cones),length(p.K.s));
end
else
fprintf(' %4.0f : %12.3E %12.3E %12.3E %2.0f\n',solved_nodes,infeasibility,lower,upper,p_lp.K.l-p.K.l);
end
end
end
end
D_struc = [];
function [p_lp,worstinfeasibility,infeasible_sdp_cones,eig_computation_failure] = add_sdp_cut(p,p_lp,x,infeasibility_in);
worstinfeasibility = infeasibility_in;
eig_computation_failure = 0;
infeasible_sdp_cones = zeros(1,length(p.K.s));
if p.K.s(1)>0
% Solution found by MILP solver
xsave = x;
infeasibility = -1;
eig_computation_failure = 1;
for i = 1:1:length(p.K.s)
x = xsave;
iter = 1;
keep_projecting = 1;
infeasibility = 0;
% lin = p_lp.K.l;
while iter <= p.options.cutsdp.maxprojections & (infeasibility(end) < -p.options.cutsdp.feastol) && keep_projecting
% Add one cut b + a'*x >= 0 (if x infeasible)
%l0 = p_lp.K.l;
[X,p_lp,infeasibility(iter),a,b,failure] = add_one_sdp_cut(p,p_lp,x,i);
eig_computation_failure = eig_computation_failure & failure;
if infeasibility(iter) < p_lp.options.cutsdp.feastol && p.options.cutsdp.cutlimit > 0
% Project current point on the hyper-plane associated with
% the most negative eigenvalue and move towards the SDP
% feasible region, and the iterate a couple of iterations
% to generate a deeper cut
x0 = x;
try
x = x + a*(-b-a'*x)/(a'*a);
catch
end
keep_projecting = norm(x-x0)>= p.options.cutsdp.projectionthreshold;
else
keep_projecting = 0;
end
worstinfeasibility = min(worstinfeasibility,infeasibility(iter));
iter = iter + 1;
end
infeasible_sdp_cones(i) = infeasibility(1) < p_lp.options.cutsdp.feastol;
end
else
worstinfeasibility = min(worstinfeasibility,0);
end
function [X,p_lp,infeasibility,asave,bsave,failure] = add_one_sdp_cut(p,p_lp,x,i);
newcuts = 0;
newF = [];
n = p.K.s(i);
X = p.semidefinite{i}.F_struc*sparse([1;x]);
X = reshape(X,n,n);X = (X+X')/2;
asave = [];
bsave = [];
% First check if it happens to be psd. Then we are done. Quicker
% than computing all eigenvalues
% This also acts as a slight safe-guard in case the sparse eigs
% fails to prove that the smallest eigenvalue is non-negative
%[R,indefinite] = chol(X+eye(length(X))*1e-12);
%if indefinite
% User is trying to solve by only generating no-good cuts
permutation = [];
failure = 0;
if p.options.cutsdp.cutlimit == 0
[R,indefinite] = chol(X+eye(length(X))*1e-12);
if indefinite
infeasibility = -1;
else
infeasibility = 0;
end
return
end
% For not too large problems, we simply go with a dense
% eigenvalue/vector computation
if 0%n <= p_lp.options.cutsdp.switchtosparse
[d,v] = eig(X);
failure = 0;
else
% Try to perform a block-diagonalization of the current solution,
% and compute eigenvalue/vectorsa for each block.
% Sparse eigenvalues can easily fails so we catch info about this
[d,v,permutation,failure] = dmpermblockeig(X,p_lp.options.cutsdp.switchtosparse);
end
if ~isempty(v)
d(abs(d)<1e-12)=0;
infeasibility = min(diag(v));
else
infeasibility = 0;
end
if infeasibility<0
[ii,jj] = sort(diag(v));
if ~isempty(permutation)
[~,inversepermutation] = ismember(1:length(permutation),permutation);
localFstruc = p.semidefinite{i}.F_struc';
else
localFstruc = p.semidefinite{i}.F_struc';
end
for m = jj(1:min(length(jj),p.options.cutsdp.cutlimit))'
if v(m,m)<-1e-12
if 0
index = reshape(1:n^2,n,n);
indexpermuted = index(permutation,permutation);
indexused = index(find(d(:,m)),find(d(:,m)));
localFstruc = p.F_struc(indexused,:);
dd=d(find(d(:,m)),m);
bA = dd'*(kron(dd,speye(length(dd))).'*localFstruc);
else
try
if ~isempty(permutation)
dhere = sparse(d(inversepermutation,m));
else
dhere = sparse(d(:,m));
end
if nnz(dhere)>100
[~,ii] = sort(-abs(dhere));
dhere(abs(dhere) <= abs(dhere(ii(100))))=0;
end
dd = dhere*dhere';dd = dd(:);
index = p.semidefinite{i}.index;
used = find(dd);
bA = (localFstruc(:,index(used))*sparse(dd(used)))';
catch
bA = sparse(dd(used))'*p.F_struc(index(used),:);
end
end
b = bA(:,1);
A = -bA(:,2:end);
newF = real([newF;[b -A]]);
newcuts = newcuts + 1;
if isempty(asave)
A(abs(A)<1e-12)=0;
b(abs(b)<1e-12)=0;
asave = -A(:);
bsave = b;
end
end
end
end
newF(abs(newF)<1e-12) = 0;
keep=find(any(newF(:,2:end),2));
newF = newF(keep,:);
if size(newF,1)>0
p_lp.F_struc = [p_lp.F_struc(1:p_lp.K.f,:);newF;p_lp.F_struc(1+p_lp.K.f:end,:)];
p_lp.K.l = p_lp.K.l + size(newF,1);
end
function [p_lp,infeasibility] = add_nogood_cut(p,p_lp,x,infeasibility)
if length(x) == length(p.binary_variables)
% Add a nogood cut. Might already have been generated by
% the SDP cuts, but it doesn't hurt to add it
zv = find(x == 0);
nz = find(x == 1);
a = zeros(1,length(x));
a(zv) = 1;
a(nz) = -1;
b = length(x)-length(zv)-1;
newF = [b a];
p_lp.F_struc = [p_lp.F_struc(1:p_lp.K.f,:);newF;p_lp.F_struc(1+p_lp.K.f:end,:)];
p_lp.K.l = p_lp.K.l + 1;
end
function [p_lp,infeasibility,infeasible_socp_cones] = add_socp_cut(p,p_lp,x,infeasibility);
infeasible_socp_cones = zeros(1,length(p.K.q));
% Only add these cuts if solver doesn't support SOCP cones
if ~p.solver.lower.constraint.inequalities.secondordercone.linear
if p.K.q(1)>0
% Add cuts
top = p.K.f+p.K.l+1;
for i = 1:1:length(p.K.q)
n = p.K.q(i);
X = p.F_struc(top:top+n-1,:)*[1;x];
X = [X(1) X(2:end)';X(2:end) eye(n-1)*X(1)];
Y = randn(n,n);
newcuts = 1;
newF = zeros(n,size(p.F_struc,2));
[d,v] = eig(X);
infeasibility = min(infeasibility,min(diag(v)));
dummy=[];
newF = [];
if infeasibility<0
[ii,jj] = sort(diag(v));
for m = jj(1:min(length(jj),p.options.cutsdp.cutlimit))'%find(diag(v<0))%1:1%length(v)
if v(m,m)<0
v1 = d(1,m);v2 = d(2:end,m);
newF = [newF;p.F_struc(top,:) + 2*v1*v2'*p.F_struc(top+1:top+n-1,:)];
newcuts = newcuts + 1;
end
end
end
newF(abs(newF)<1e-12) = 0;
keep= any(newF(:,2:end),2);
newF = newF(keep,:);
if size(newF,1)>0
p_lp.F_struc = [p_lp.F_struc;newF];
p_lp.K.l = p_lp.K.l + size(newF,1);
[i,j] = sort(p_lp.F_struc*[1;x]);
end
top = top+n;
end
end
end
function p_lp = addActivationCuts(p,p_lp)
if p.options.cutsdp.activationcut && p.K.s(1) > 0 && length(p.binary_variables) == length(p.c)
top = p.K.f + p.K.l+sum(p.K.q)+1;
for k = 1:length(p.K.s)
F0 = p.F_struc(top:top+p.K.s(k)^2-1,1);
% Fij = p.F_struc(top:top+p.K.s(k)^2-1,2:end);
% Fij = sum(Fij | Fij,2);
F0 = reshape(F0,p.K.s(k),p.K.s(k));
% Fij = reshape(Fij,p.K.s(k),p.K.s(k));
% Fall = F0 | Fij;
row = 1;
added = 0; % Avoid adding more than 2*n cuts (we hope for sparse model...)
while row <= p.K.s(k)-1 && added <= 2*p.K.s(k)
% if 1
j = find(F0(row,:));
if min(eig(F0(j,j)))<0
[ii,jj] = find(F0(j,j));
ii = j(ii);
jj = j(jj);
index = sub2ind([p.K.s(k),p.K.s(k)],ii,jj);
p.F_struc(top + index-1,2:end);
S = p.F_struc(top + index-1,2:end);
S = S | S;S = sum(S,1);S = S | S;
% Some of these have to be different from 0
p_lp.F_struc = [p_lp.F_struc;-1 S];
p_lp.K.l = p_lp.K.l + 1;
end
% else
j = find(F0(row,:));
j = j(j>row);
for col = j(:)'
if F0(row,row)*F0(col,col)-F0(row,col)^2<0
index = sub2ind([p.K.s(k),p.K.s(k)],[row row col],[row col col]);
p.F_struc(top + index-1,2:end);
S = p.F_struc(top + index-1,2:end);
S = S | S;S = sum(S,1);S = S | S;
% Some of these have to be different from 0
p_lp.F_struc = [p_lp.F_struc;-1 S];
added = added + 1;
p_lp.K.l = p_lp.K.l + 1;
end
end
row = row + 1;
end
end
end
function p_lp = addDiagonalCuts(p,p_lp)
if p.K.s(1)>0
top = p.K.f+p.K.l+sum(p.K.q)+1;
for i = 1:length(p.K.s)
n = p.K.s(i);
newF=[];
nouse = [];
for m = 1:p.K.s(i)
d = eyev(p.K.s(i),m);
index = (1+(m-1)*(p.K.s(i)+1));
ab = p.F_struc(top+index-1,:);
b = ab(1);
a = -ab(2:end);
% a*x <= b
pos = find(a>0);
neg = find(a<0);
if a(pos)*p.ub(pos) + a(neg)*p.lb(neg)>b
if length(p.binary_variables) == length(p.c)
if all(p.F_struc(top+index-1,2:end) == fix(p.F_struc(top+index-1,2:end)))
ab(1) = floor(ab(1));
if max(a)<=0 % Exclusive or in disguise
ab = sign(ab);
end
end
end
newF = [newF;ab];
else
nouse = [nouse m];
end
end
% Clean
newF(abs(newF)<1e-12) = 0;
keep=find(any(newF(:,2:end),2));
newF = newF(keep,:);
p_lp.F_struc = [p_lp.F_struc;newF];
p_lp.K.l = p_lp.K.l + size(newF,1);
top = top+n^2;
end
end
function p_lp = addSOCPCut(p,p_lp)
if p.K.q(1) > 0
top = p.K.f+p.K.l+1;
for i = 1:length(p.K.q)
n = p.K.q(i);
newF = p.F_struc(top,:);
% Clean
newF(abs(newF)<1e-12) = 0;
keep=find(any(newF(:,2:end),2));
newF = newF(keep,:);
p_lp.F_struc = [p_lp.F_struc;newF];
p_lp.K.l = p_lp.K.l + size(newF,1);
top = top+n;
end
end
function p_lp = nodeTight(p,p_lp);
if p.options.cutsdp.nodetight
% Extract LP part Ax<=b
A = -p_lp.F_struc(p_lp.K.f + (1:p_lp.K.l),2:end);
b = p_lp.F_struc(p_lp.K.f + (1:p_lp.K.l),1);
c = p_lp.c;
% Tighten bounds and find redundant constraints
[p_lp.lb,p_lp.ub,redundant,pss] = milppresolve(A,b,p_lp.lb,p_lp.ub,p.integer_variables,p.binary_variables,ones(length(p.lb),1));
A(redundant,:) = [];
b(redundant) = [];
p_lp.F_struc(p_lp.K.f+redundant,:) = [];
p_lp.K.l = p_lp.K.l-length(redundant);
end
function p_lp = nodeFix(p,p_lp);
if p.options.cutsdp.nodefix
% Try to find variables to fix w.l.o.g
[fix_up,fix_down] = presolve_fixvariables(A,b,c,p_lp.lb,p_lp.ub,sdpmonotinicity);
p_lp.lb(fix_up) = p_lp.ub(fix_up);
p_lp.ub(fix_down) = p_lp.lb(fix_down);
while ~(isempty(fix_up) & isempty(fix_down))
[p_lp.lb,p_lp.ub,redundant,pss] = milppresolve(A,b,p_lp.lb,p_lp.ub,p.integer_variables,p.binary_variables,ones(length(p.lb),1));
A(redundant,:) = [];
b(redundant) = [];
p_lp.F_struc(p_lp.K.f+redundant,:) = [];
p_lp.K.l = p_lp.K.l-length(redundant);
fix_up = [];
fix_down = [];
% Try to find variables to fix w.l.o.g
[fix_up,fix_down] = presolve_fixvariables(A,b,c,p_lp.lb,p_lp.ub,sdpmonotinicity);
p_lp.lb(fix_up) = p_lp.ub(fix_up);
p_lp.ub(fix_down) = p_lp.lb(fix_down);
end
end
function p_lp = removeRedundant(p_lp);
F = unique(p_lp.F_struc(1+p_lp.K.f:end,:),'rows');
if size(F,1) < p_lp.K.l
p_lp.F_struc = [p_lp.F_struc(1:p_lp.K.f,:);F];
p_lp.K.l = size(F,1);
end
function plotP(p)
b = p.F_struc(1+p.K.f:p.K.f+p.K.l,1);
A = -p.F_struc(1+p.K.f:p.K.f+p.K.l,2:end);
x = sdpvar(size(A,2),1);
plot([A*x <= b, p.lb <= x <= p.ub],x,'b',[],sdpsettings('plot.shade',.2));
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
bnb_solvelower.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/bnb_solvelower.m
| 6,280 |
utf_8
|
2876e0b094d4dbbf73fc45f1454c259e
|
function output = bnb_solvelower(lowersolver,relaxed_p,upper,lower)
if all(relaxed_p.lb==relaxed_p.ub)
x = relaxed_p.lb;
if checkfeasiblefast(relaxed_p,relaxed_p.lb,relaxed_p.options.bnb.feastol)
output.problem = 0;
else
output.problem = 1;
end
output.Primal = x;
return
end
p = relaxed_p;
p.solver.tag = p.solver.lower.tag;
removethese = p.lb==p.ub;
if nnz(removethese)>0 & all(p.variabletype == 0) & isempty(p.evalMap)% ~isequal(lowersolver,'callfmincongp') & ~isequal(lowersolver,'callgpposy')
if ~isinf(upper) & nnz(p.Q)==0 & isequal(p.K.m,0)
p.F_struc = [p.F_struc(1:p.K.f,:);upper-p.f -p.c';p.F_struc(1+p.K.f:end,:)];
p.K.l=p.K.l+1;
end
if ~isempty(p.F_struc)
if ~isequal(p.K.l,0) & p.options.bnb.ineq2eq
affected = find(any(p.F_struc(:,1+find(removethese)),2));
end
p.F_struc(:,1)=p.F_struc(:,1)+p.F_struc(:,1+find(removethese))*p.lb(removethese);
p.F_struc(:,1+find(removethese))=[];
end
idx = find(removethese);
p.f = p.f + p.c(idx)'*p.lb(idx);
p.c(idx)=[];
if nnz(p.Q)>0
p.c = p.c + 2*p.Q(find(~removethese),idx)*p.lb(idx);
p.f = p.f + p.lb(idx)'*p.Q(idx,idx)*p.lb(idx);
p.Q(:,find(removethese))=[];
p.Q(find(removethese),:)=[];
else
p.Q = spalloc(length(p.c),length(p.c),0);
end
p.lb(removethese)=[];
p.ub(removethese)=[];
p.x0(removethese)=[];
p.monomtable(:,find(removethese))=[];
p.monomtable(find(removethese),:)=[];
% This is not necessarily correct!! x*y^2, fix y and we have a linear!
p.variabletype(removethese) = [];
% p.variabletype = []; % Reset, to messy to recompute
if ~isequal(p.K.l,0) & p.options.bnb.ineq2eq
Beq = p.F_struc(1:p.K.f,1);
Aeq = -p.F_struc(1:p.K.f,2:end);
B = p.F_struc(1+p.K.f:p.K.l+p.K.f,1);
A = -p.F_struc(1+p.K.f:p.K.l+p.K.f,2:end);
affected = affected(affected <= p.K.f + p.K.l);
affected = affected(affected > p.K.f) - p.K.f;
aaa = zeros(p.K.l,1);aaa(affected) = 1;
A1 = A(find(~aaa),:);
B1 = B(find(~aaa),:);
[A,B,Aeq2,Beq2,index] = ineq2eq(A(affected,:),B(affected));
if ~isempty(index)
actuallyused = find(any([Aeq2,Beq2],2));
Beq2 = Beq2(actuallyused);if size(Beq2,1)==0;Beq2 = [];end
Aeq2 = Aeq2(actuallyused,:);if size(Aeq2,1)==0;Aeq2 = [];end
p.F_struc = [Beq -Aeq;Beq2 -Aeq2;B1 -A1;B -A;p.F_struc(1+p.K.f + p.K.l:end,:)];
p.K.f = length(Beq) + length(Beq2);
p.K.l = length(B) + length(B1);
end
end
% Find completely empty rows
zero_row = find(~any(p.F_struc,2));
zero_row = zero_row(zero_row <= p.K.f + p.K.l);
if ~isempty(zero_row)
p.F_struc(zero_row,:) = [];
p.K.l = p.K.l - nnz(zero_row > p.K.f);
p.K.f = p.K.f - nnz(zero_row <= p.K.f);
end
if p.K.l > 0
zero_row = find(~any(p.F_struc(1+p.K.f:p.K.f+p.K.l,2:end),2));
if ~isempty(zero_row)
lhs = p.F_struc(p.K.f + zero_row,1);
zero_row_pos = find(lhs >= 0);
remove_these = zero_row(zero_row_pos);
p.F_struc(p.K.f + remove_these,:) = [];
p.K.l = p.K.l - length(remove_these);
end
end
if p.K.q> 0
top = p.K.f + p.K.l+1;
for i = 1:length(p.K.q)
if ~any(p.F_struc(top,:))
i
end
%nnz(Ff(2:end,:))
% 1
%end
end
end
% Derive bounds from this model, and if we fix more variables, apply
% recursively
if isempty(p.F_struc)
lb = p.lb;
ub = p.ub;
else
[lb,ub] = findulb(p.F_struc,p.K);
end
newub = min(ub,p.ub);
newlb = max(lb,p.lb);
if any(newub == newlb)
dummy = p;
dummy.lb = newlb;
dummy.ub = newub;
output = bnb_solvelower(lowersolver,dummy,upper,lower);
else
if any(p.lb>p.ub+0.1)
output.problem = 1;
output.Primal = zeros(length(p.lb),1);
else
p.solver.version = p.solver.lower.version;
p.solver.subversion = p.solver.lower.subversion;
output = feval(lowersolver,p);
end
end
x=relaxed_p.c*0;
x(removethese)=relaxed_p.lb(removethese);
x(~removethese)=output.Primal;
output.Primal=x;
else
p.solver = p.solver.lower;
output = feval(lowersolver,p);
end
function [A, B, Aeq, Beq, ind_eq] = ineq2eq(A, B)
% Copyright is with the following author(s):
%
% (C) 2006 Johan Loefberg, Automatic Control Laboratory, ETH Zurich,
% [email protected]
% (C) 2005 Michal Kvasnica, Automatic Control Laboratory, ETH Zurich,
% [email protected]
[ne, nx] = size(A);
Aeq = [];
Beq = [];
ind_eq = [];
if isempty(A)
return
end
sumM = sum(A, 2) + B;
for ii = 1:ne-1,
s = sumM(1);
% get matrix which contains all rows starting from ii+1
sumM = sumM(2:end,:);
% possible candidates are those rows whose sum is equal to the sum of the
% original row
possible_eq = find(abs(sumM + s) < 1e-12);
if isempty(possible_eq),
continue
end
possible_eq = possible_eq + ii;
b1 = B(ii);
a1 = A(ii, :) ;
% now compare if the two inequalities (the second one with opposite
% sign) are really equal (hence they form an equality constraint)
for jj = possible_eq',
% first compare the B part, this is very cheap
if abs(b1 + B(jj)) < 1e-12,
% now compare the A parts as well
if norm(a1 + A(jj, :) , Inf) < 1e-12,
% jj-th inequality together with ii-th inequality forms an equality
% constraint
ind_eq = [ind_eq; ii jj];
break
end
end
end
end
if isempty(ind_eq),
% no equality constraints
return
else
% indices of remaining constraints which are inequalities
ind_ineq = setdiff(1:ne, ind_eq(:));
Aeq = A(ind_eq(:,1), :) ;
Beq = B(ind_eq(:,1));
A = A(ind_ineq, :) ;
B = B(ind_ineq);
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
addEvalVariableCuts.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/addEvalVariableCuts.m
| 4,555 |
utf_8
|
ab6e9b7499b292402a9904e71b95ce68
|
function pcut = addEvalVariableCuts(p)
pcut = p;
if ~isempty(p.evalMap)
pcut = emptyNumericalModel;
for i = 1:length(p.evalMap)
y = p.evalVariables(i);
x = p.evalMap{i}.variableIndex;
xL = p.lb(x);
xU = p.ub(x);
% Generate a convex hull polytope
if xL<xU
if ~isempty(p.evalMap{i}.properties.convexhull)
% A convex hull generator function is available!
% Might be able to reuse hull from last run node
if isfield(p.evalMap{i},'oldhull') && isequal(p.evalMap{i}.oldhull.xL,xL) && isequal(p.evalMap{i}.oldhull.xU,xU)
[Ax,Ay,b,K] = getOldHull(p,i);
else
[Ax,Ay,b,K,p] = updateHull(xL,xU,p,i);
end
else
if length(xL)>1
disp(['The ' p.evalMap{i}.fcn ' operator does not have a convex hull operator'])
disp('This is required for multi-input single output operators');
disp('Sampling approximation does not work in this case.');
error('Missing convex hull operator');
end
% sample function
z = linspace(xL,xU,100);
if isequal(p.evalMap{i}.fcn,'power_internal2')
% Special code for automatically converting sigmonial
% terms to be solvable with bmibnb
fz = feval(p.evalMap{i}.fcn,z,p.evalMap{i}.arg{2});
else
arg = p.evalMap{i}.arg;
arg{1} = z;
fz = real(feval(p.evalMap{i}.fcn,arg{1:end-1}));
% end
[minval,minpos] = min(fz);
[maxval,maxpos] = max(fz);
xtestmin = linspace(z(max([1 minpos-5])),z(min([100 minpos+5])),100);
xtestmax = linspace(z(max([1 maxpos-5])),z(min([100 maxpos+5])),100);
arg{1} = xtestmin;
fz1 = real(feval(p.evalMap{i}.fcn,arg{1:end-1}));
arg{1} = xtestmax;
fz2 = real(feval(p.evalMap{i}.fcn,arg{1:end-1}));
z = [z(:);xtestmin(:);xtestmax(:)];
fz = [fz(:);fz1(:);fz2(:)];
[z,sorter] = sort(z);
fz = fz(sorter);
[z,ii,jj]=unique(z);
fz = fz(ii);
end
% create 4 bounding planes
% f(z) < k1*(x-XL) + f(xL)
% f(z) > k2*(x-XL) + f(xL)
% f(z) < k3*(x-XU) + f(xU)
% f(z) > k4*(x-XU) + f(xU)
k1 = max((fz(2:end)-fz(1))./(z(2:end)-xL))+1e-12;
k2 = min((fz(2:end)-fz(1))./(z(2:end)-xL))-1e-12;
k3 = min((fz(1:end-1)-fz(end))./(z(1:end-1)-xU))+1e-12;
k4 = max((fz(1:end-1)-fz(end))./(z(1:end-1)-xU))-1e-12;
Ax = [-k1;k2;-k3;k4];
Ay = [1;-1;1;-1];
b = [k1*(-z(1)) + fz(1);-(k2*(-z(1)) + fz(1));k3*(-z(end)) + fz(end);-(k4*(-z(end)) + fz(end))];
K = [];
end
if ~isempty(b)
if isempty(K)
% Compatibility with old code
K.f = 0;
K.l = length(b);
end
F_structemp = zeros(size(b,1),length(p.c)+1);
F_structemp(:,1+y) = -Ay;
F_structemp(:,1+x) = -Ax;
F_structemp(:,1) = b;
localModel = createNumericalModel(F_structemp,K);
pcut = mergeNumericalModels(pcut,localModel);
end
end
end
pcut = mergeNumericalModels(p,pcut);
end
function [Ax,Ay,b,K] = getOldHull(p,i);
Ax = p.evalMap{i}.oldhull.Ax;
Ay = p.evalMap{i}.oldhull.Ay;
b = p.evalMap{i}.oldhull.b;
K = p.evalMap{i}.oldhull.K;
function [Ax,Ay,b,K,p] = updateHull(xL,xU,p,i);
try
[Ax,Ay,b,K]=feval(p.evalMap{i}.properties.convexhull,xL,xU, p.evalMap{i}.arg{2:end-1});
catch
[Ax,Ay,b]=feval(p.evalMap{i}.properties.convexhull,xL,xU, p.evalMap{i}.arg{2:end-1});
K = [];
end
p = saveOldHull(xL,xU,Ax,Ay,b,K,p,i);
function p = saveOldHull(xL,xU,Ax,Ay,b,K,p,i)
p.evalMap{i}.oldhull.xL = xL;
p.evalMap{i}.oldhull.xU = xU;
p.evalMap{i}.oldhull.Ax = Ax;
p.evalMap{i}.oldhull.Ay = Ay;
p.evalMap{i}.oldhull.b = b;
p.evalMap{i}.oldhull.K = K;
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
branch_and_bound.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/branch_and_bound.m
| 27,802 |
utf_8
|
c2c7870f7780fbb463c2844fe548b6b5
|
function [x_min,solved_nodes,lower,upper,lower_hist,upper_hist,timing,counter] = branch_and_bound(p,x_min,upper,timing)
% *************************************************************************
% Create handles to solvers
% *************************************************************************
lowersolver = p.solver.lowersolver.call; % For relaxed lower bound problem
uppersolver = p.solver.uppersolver.call; % Local nonlinear upper bound
lpsolver = p.solver.lpsolver.call; % LP solver for bound propagation
% *************************************************************************f
% GLOBAL PROBLEM DATA (these variables are the same in all nodes)
% *************************************************************************
c = p.c;
Q = p.Q;
f = p.f;
K = p.K;
options = p.options;
% *************************************************************************
% DEFINE UPPER BOUND PROBLEM. Basically just remove the cuts
% *************************************************************************
p_upper = cleanuppermodel(p);
% *************************************************************************
% Active constraints in main model
% 0 : Inactive constraint (i.e. a cut which unused)
% 1 : Active constraint
% inf : Removed constraint (found to be redundant)
% *************************************************************************
p.InequalityConstraintState = ones(p.K.l,1);
p.InequalityConstraintState(p.KCut.l,1) = 0;
p.EqualityConstraintState = ones(p.K.f,1);
% *************************************************************************
% LPs ARE USED IN BOX-REDUCTION
% *************************************************************************
p.lpcuts = p.F_struc(1+p.K.f:1:p.K.l+p.K.f,:);
p.cutState = ones(p.K.l,1);
p.cutState(p.KCut.l,1) = 0; % Don't use to begin with
% *************************************************************************
% INITIALITAZION
% *************************************************************************
p.depth = 0; % depth in search tree
p.dpos = 0; % used for debugging
p.lower = NaN;
lower = NaN;
gap = inf;
stack = [];
solved_nodes = 0;
numGlobalSolutions = 0;
% *************************************************************************
% Silly hack to speed up solver calls
% *************************************************************************
p.getsolvertime = 0;
counter = p.counter;
if options.bmibnb.verbose>0
disp('* Starting YALMIP global branch & bound.');
disp(['* Branch-variables : ' num2str(length(p.branch_variables))]);
disp(['* Upper solver : ' p.solver.uppersolver.tag]);
disp(['* Lower solver : ' p.solver.lowersolver.tag]);
if p.options.bmibnb.lpreduce
disp(['* LP solver : ' p.solver.lpsolver.tag]);
end
disp(' Node Upper Gap(%) Lower Open');
end
t_start = cputime;
go_on = 1;
reduction_result = [];
lower_hist = [];
upper_hist = [];
p.branchwidth = [];
pseudo_costgain=[];
pseudo_variable=[];
while go_on
% *********************************************************************
% ASSUME THAT WE WON'T FATHOME
% *********************************************************************
keep_digging = 1;
% *********************************************************************
% Strenghten variable bounds a couple of runs
% *********************************************************************
p.changedbounds = 1;
for i = 1:length(options.bmibnb.strengthscheme)
if ~p.feasible
break
end
switch options.bmibnb.strengthscheme(i)
case 1
p = updatebounds_recursive_evaluation(p);
case 2
p = updateboundsfromupper(p,upper,p.originalModel);
case 3
p = propagatequadratics(p);
case 4
p = propagate_bounds_from_complementary(p);
case 5
tstart = tic;
p = domain_reduction(p,upper,lower,lpsolver,x_min);
timing.domainreduce = timing.domainreduce + toc(tstart);
case 6
p = propagate_bounds_from_equalities(p);
otherwise
end
end
% *********************************************************************
% Detect redundant constraints
% *********************************************************************
p = remove_redundant(p);
% *********************************************************************
% SOLVE LOWER AND UPPER
% *********************************************************************
if p.feasible
[output,cost,p,timing] = solvelower(p,options,lowersolver,x_min,upper,timing);
if output.problem == -1
% We have no idea what happened.
% Behave as if it worked, so we can branch as see if things
% clean up nicely
cost = p.lower;
if isnan(cost)
cost = -inf;
end
output.problem = 3;
end
% Cplex sucks...
if output.problem == 12
pp = p;
pp.c = pp.c*0;
[output2,cost2] = solvelower(pp,options,lowersolver,[],[],timing);
if output2.problem == 0
output.problem = 2;
else
output.problem = 1;
end
end
% GLPK sucks in st_e06
if abs(p.lb(p.linears)-p.ub(p.linears)) <= 1e-3 & output.problem==1
x = (p.lb+p.ub)/2;
z = evaluate_nonlinear(p,x);
oldCount = numGlobalSolutions;
if numGlobalSolutions < p.options.bmibnb.numglobal
[upper,x_min,cost,info_text,numGlobalSolutions] = heuristics_from_relaxed(p_upper,x,upper,x_min,cost,numGlobalSolutions);
end
end
info_text = '';
switch output.problem
case {1,12} % Infeasible
info_text = 'Infeasible';
keep_digging = 0;
cost = inf;
feasible = 0;
case 2 % Unbounded (should not happen!)
cost = -inf;
x = output.Primal;
case {0,3,4} % No problems (disregard numerical problems)
if (output.problem == 3) | (output.problem == 4)
info_text = 'Numerical problems in lower bound solver';
end
x = output.Primal;
if ~isempty(p.branchwidth)
if ~isempty(p.lower)
pseudo_costgain = [pseudo_costgain (cost-p.lower)/p.branchwidth];
pseudo_variable = [pseudo_variable p.spliton];
end
end
% UPDATE THE LOWER BOUND
if isnan(lower)
lower = cost;
end
if ~isempty(stack)
lower = min(cost,min([stack.lower]));
else
lower = min(lower,cost);
end
relgap = 100*(upper-lower)/(1+abs(upper));
relgap_too_big = (isinf(lower) | isnan(relgap) | relgap>options.bmibnb.relgaptol);
if cost<upper-1e-5 & relgap_too_big
z = evaluate_nonlinear(p,x);
% Manage cuts etc
p = addsdpcut(p,z);
p = addlpcuts(p,x);
oldCount = numGlobalSolutions;
if numGlobalSolutions < p.options.bmibnb.numglobal
[upper,x_min,cost,info_text2,numGlobalSolutions] = heuristics_from_relaxed(p_upper,x,upper,x_min,cost,numGlobalSolutions);
if length(info_text)==0
info_text = info_text2;
elseif length(info_text2)>0
info_text = [info_text ' | ' info_text2];
else
info_text = info_text;
end
if ~isequal(p.solver.uppersolver.tag,'none')
if upper > p.options.bmibnb.target
if options.bmibnb.lowertarget > lower
[upper,x_min,info_text,numGlobalSolutions,timing] = solve_upper_in_node(p,p_upper,x,upper,x_min,uppersolver,info_text,numGlobalSolutions,timing);
p.counter.uppersolved = p.counter.uppersolved + 1;
end
end
end
end
else
keep_digging = 0;
info_text = 'Poor bound';
end
otherwise
cost = -inf;
x = (p.lb+p.ub)/2;
end
else
info_text = 'Infeasible';
keep_digging = 0;
cost = inf;
feasible = 0;
end
solved_nodes = solved_nodes+1;
% ************************************************
% PRUNE SUBOPTIMAL REGIONS BASED ON UPPER BOUND
% ************************************************
if ~isempty(stack)
[stack,lower] = prune(stack,upper,options,solved_nodes,p);
end
if isempty(stack)
if isinf(cost)
lower = upper;
else
lower = cost;
end
else
lower = min(lower,cost);
end
% ************************************************
% CONTINUE SPLITTING?
% ************************************************
if keep_digging & max(p.ub(p.branch_variables)-p.lb(p.branch_variables))>options.bmibnb.vartol
node = [];
% already_tested = []
% while ~isempty(setdiff(p.branch_variables,already_tested)) & isempty(node)
% temp = p.branch_variables;
% p.branch_variables=setdiff(p.branch_variables,already_tested);
spliton = branchvariable(p,options,x);
% p.branch_variables = union(p.branch_variables,already_tested);
% already_tested = [already_tested spliton];
if ismember(spliton,p.complementary)
i = find(p.complementary(:,1) == spliton);
if isempty(i)
i = find(p.complementary(:,2) == spliton);
end
% Either v1 or v2 is zero
v1 = p.complementary(i,1);
v2 = p.complementary(i,2);
gap_over_v1 = (p.lb(v1)<=0) & (p.ub(v1)>=0) & (p.ub(v1)-p.lb(v2))>0;
gap_over_v2 = (p.lb(v2)<=0) & (p.ub(v2)>=0) & (p.ub(v2)-p.lb(v2))>0;
if gap_over_v1
pp = p;
pp.complementary( find((pp.complementary(:,1)==v1) | (pp.complementary(:,2)==v1)),:)=[];
node = savetonode(pp,v1,0,0,-1,x,cost,p.EqualityConstraintState,p.InequalityConstraintState,p.cutState);
node.bilinears = p.bilinears;
node = updateonenonlinearbound(node,spliton);
if all(node.lb <= node.ub)
node.branchwidth=[];
stack = push(stack,node);
end
end
if gap_over_v2
pp = p;
%pp.complementary(i,:)=[];
pp.complementary( find((pp.complementary(:,1)==v2) | (pp.complementary(:,2)==v2)),:)=[];
node = savetonode(pp,v2,0,0,-1,x,cost,p.EqualityConstraintState,p.InequalityConstraintState,p.cutState);
node.bilinears = p.bilinears;
node = updateonenonlinearbound(node,spliton);
if all(node.lb <= node.ub)
node.branchwidth=[];
stack = push(stack,node);
end
end
end
if isempty(node)
bounds = partition(p,options,spliton,x);
if length(bounds)>3
error('REPORT BOUND LENGTH UNIMPLEMENTED BUG')
end
for i = 1:length(bounds)-1
if ismember(spliton,union(p.binary_variables,p.integer_variables)) & (i==2)
node = savetonode(p,spliton,bounds(i)+1,bounds(i+1),-1,x,cost,p.EqualityConstraintState,p.InequalityConstraintState,p.cutState);
else
node = savetonode(p,spliton,bounds(i),bounds(i+1),-1,x,cost,p.EqualityConstraintState,p.InequalityConstraintState,p.cutState);
end
node.bilinears = p.bilinears;
node = updateonenonlinearbound(node,spliton);
node.branchwidth = [p.ub(spliton)-p.lb(spliton)];
if all(node.lb <= node.ub)
stack = push(stack,node);
end
end
end
lower = min([stack.lower]);
end
if ~isempty(p)
counter = p.counter;
end
% ************************************************
% Pick and create a suitable node
% ************************************************
[p,stack] = selectbranch(p,options,stack,x_min,upper);
if isempty(p)
if ~isinf(upper)
relgap = 0;
end
if isinf(upper) & isinf(lower)
relgap = inf;
end
depth = 0;
else
relgap = 100*(upper-lower)/(1+max(abs(lower)+abs(upper))/2);
depth = p.depth;
end
if options.bmibnb.verbose>0
fprintf(' %4.0f : %12.3E %7.2f %12.3E %2.0f %s \n',solved_nodes,upper,relgap,lower,length(stack)+length(p),info_text);
end
absgap = upper-lower;
% ************************************************
% Continue?
% ************************************************
time_ok = cputime-t_start < options.bmibnb.maxtime;
iter_ok = solved_nodes < options.bmibnb.maxiter;
any_nodes = ~isempty(p);
relgap_too_big = (isinf(lower) | isnan(relgap) | relgap>100*options.bmibnb.relgaptol);
absgap_too_big = (isinf(lower) | isnan(absgap) | absgap>options.bmibnb.absgaptol);
uppertarget_not_met = upper > options.bmibnb.target;
lowertarget_not_met = lower < options.bmibnb.lowertarget;
go_on = uppertarget_not_met & lowertarget_not_met & time_ok & any_nodes & iter_ok & relgap_too_big & absgap_too_big;
lower_hist = [lower_hist lower];
upper_hist = [upper_hist upper];
end
if options.bmibnb.verbose>0
fprintf(['* Finished. Cost: ' num2str(upper) ' Gap: ' num2str(relgap) '\n']);
end
%save dummy x_min
% *************************************************************************
% Stack functionality
% *************************************************************************
function stack = push(stackin,p)
if ~isempty(stackin)
stack = [p;stackin];
else
stack(1)=p;
end
function [p,stack] = pull(stack,method,x_min,upper,branch_variables);
if ~isempty(stack)
switch method
case 'maxvol'
for i = 1:length(stack)
vol(i) = sum(stack(i).ub(branch_variables)-stack(i).lb(branch_variables));
end
[i,j] = max(vol);
p=stack(j);
stack = stack([1:1:j-1 j+1:1:end]);
case 'best'
[i,j]=min([stack.lower]);
p=stack(j);
stack = stack([1:1:j-1 j+1:1:end]);
otherwise
end
else
p =[];
end
function [stack,lower] = prune(stack,upper,options,solved_nodes,p)
if ~isempty(stack)
toolarge = find([stack.lower]>upper*(1+1e-4));
if ~isempty(toolarge)
stack(toolarge)=[];
end
if ~isempty(stack)
for j = 1:length(stack)
if nnz(p.c.*(stack(j).ub-stack(j).lb)) == 1 & nnz(p.Q)==0
i = find(p.c.*(stack(j).ub-stack(j).lb));
if p.c(i)>0
stack(j).ub(i) = min([stack(j).ub(i) upper]);
end
end
end
indPOS = find(p.c>0);
indNEG = find(p.c<0);
LB = [stack.lb];
UB = [stack.ub];
LOWER = p.c([indPOS(:);indNEG(:)])'*[LB(indPOS,:);UB(indNEG,:)];
toolarge = find(LOWER > upper*(1-1e-8));
stack(toolarge)=[];
end
end
if ~isempty(stack)
lower = min([stack.lower]);
else
lower = upper;
end
function node = savetonode(p,spliton,bounds1,bounds2,direction,x,cost,EqualityConstraintState,InequalityConstraintState,cutState);
node.lb = p.lb;
node.ub = p.ub;
node.lb(spliton) = bounds1;
node.ub(spliton) = bounds2;
node.lb(p.integer_variables) = ceil(node.lb(p.integer_variables));
node.ub(p.integer_variables) = floor(node.ub(p.integer_variables));
node.lb(p.binary_variables) = ceil(node.lb(p.binary_variables));
node.ub(p.binary_variables) = floor(node.ub(p.binary_variables));
node.complementary = p.complementary;
if direction == -1
node.dpos = p.dpos-1/(2^sqrt(p.depth));
else
node.dpos = p.dpos+1/(2^sqrt(p.depth));
end
node.spliton = spliton;
node.depth = p.depth+1;
node.x0 = x;
node.lpcuts = p.lpcuts;
node.lower = cost;
node.InequalityConstraintState = InequalityConstraintState;
node.EqualityConstraintState = EqualityConstraintState;
node.cutState = cutState;
% *************************************
% DERIVE LINEAR CUTS FROM SDPs
% *************************************
function p = addsdpcut(p,x)
if p.K.s > 0
top = p.K.f+p.K.l+1;
newcuts = 1;
newF = [];
for i = 1:length(p.K.s)
n = p.K.s(i);
X = p.F_struc(top:top+n^2-1,:)*[1;x];
X = reshape(X,n,n);
[d,v] = eig(X);
for m = 1:length(v)
if v(m,m)<0
for j = 1:length(x)+1;
newF(newcuts,j)= d(:,m)'*reshape(p.F_struc(top:top+n^2-1,j),n,n)*d(:,m);
end
% max(abs(newF(:,2:end)),[],2)
newF(newcuts,1)=newF(newcuts,1)+1e-6;
newcuts = newcuts + 1;
if size(p.lpcuts,1)>0
dist = p.lpcuts*newF(newcuts-1,:)'/(newF(newcuts-1,:)*newF(newcuts-1,:)');
if any(abs(dist-1)<1e-3)
newF = newF(1:end-1,:);
newcuts = newcuts - 1;
end
end
end
end
top = top+n^2;
end
if ~isempty(newF)
% Don't keep all
m = size(newF,2);
% size(p.lpcuts)
p.lpcuts = [newF;p.lpcuts];
p.cutState = [ones(size(newF,1),1);p.cutState];
violations = p.lpcuts*[1;x];
p.lpcuts = p.lpcuts(violations<0.1,:);
if size(p.lpcuts,1)>15*m
disp('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
violations = p.lpcuts*[1;x];
[i,j] = sort(violations);
%p.lpcuts = p.lpcuts(j(1:15*m),:);
%p.cutState = lpcuts = p.lpcuts(j(1:15*m),:);
%p.lpcuts = p.lpcuts(end-15*m+1:end,:);
end
end
end
function p = addlpcuts(p,z)
inactiveCuts = find(~p.cutState);
violation = p.lpcuts(inactiveCuts,:)*[1;z];
need_to_add = find(violation < -1e-4);
if ~isempty(need_to_add)
p.cutState(inactiveCuts(need_to_add)) = 1;
end
inactiveCuts = find(p.InequalityConstraintState == 0 );
violation = p.F_struc(p.K.f+inactiveCuts,:)*[1;z];
need_to_add = find(violation < -1e-4);
if ~isempty(need_to_add)
p.InequalityConstraintState(inactiveCuts(need_to_add)) = 1;
end
% *************************************************************************
% Strategy for deciding which variable to branch on
% *************************************************************************
function spliton = branchvariable(p,options,x)
% Split if box is to narrow
width = abs(p.ub(p.branch_variables)-p.lb(p.branch_variables));
% if ~isempty(p.binary_variables)
% width_bin = min([abs(1-x(p.binary_variables)) abs(x(p.binary_variables))],[],2);
% end
if isempty(p.bilinears) | ~isempty(p.evalMap) | any(p.variabletype > 2)%(min(width)/max(width) < 0.1) | (size(p.bilinears,1)==0) %
[i,j] = max(width);%.*p.weight(p.branch_variables));
spliton = p.branch_variables(j);
else
res = x(p.bilinears(:,1))-x(p.bilinears(:,2)).*x(p.bilinears(:,3));
[ii,jj] = sort(abs(res));
v1 = p.bilinears(jj(end),2);
v2 = p.bilinears(jj(end),3);
acc_res1 = sum(abs(res(find((p.bilinears(:,2)==v1) | p.bilinears(:,3)==v1))));
acc_res2 = sum(abs(res(find((p.bilinears(:,2)==v2) | p.bilinears(:,3)==v2))));
if abs(acc_res1-acc_res2)<1e-3 & ismember(v2,p.branch_variables) & ismember(v1,p.branch_variables)
if abs(p.ub(v1)-p.lb(v1))>abs(p.ub(v2)-p.lb(v2))
spliton = v1;
elseif abs(p.ub(v1)-p.lb(v1))<abs(p.ub(v2)-p.lb(v2))
spliton = v2;
else
% Oops, two with the same impact. To avoid that we keep pruning on
% a variable that doesn't influence the bounds, we flip a coin on
% which to branch on
if rand(1)>0.5
spliton = v1;
else
spliton = v2;
end
end
else
if (~ismember(v2,p.branch_variables) | (acc_res1>acc_res2)) & ismember(v1,p.branch_variables)
spliton = v1;
elseif ismember(v2,p.branch_variables)
spliton = v2;
else
[i,j] = max(width);
spliton = p.branch_variables(j);
end
end
end
% *************************************************************************
% Strategy for diving the search space
% *************************************************************************
function bounds = partition(p,options,spliton,x_min)
x = x_min;
if isinf(p.lb(spliton))
%bounds = [p.lb(spliton) x_min(spliton) p.ub(spliton)]
%return
p.lb(spliton) = -1e6;
end
if isinf(p.ub(spliton))
%bounds = [p.lb(spliton) x_min(spliton) p.ub(spliton)]
%return
p.ub(spliton) = 1e6;
end
switch options.bmibnb.branchrule
case 'omega'
if ~isempty(x_min)
U = p.ub(spliton);
L = p.lb(spliton);
x = x(spliton);
bounds = [p.lb(spliton) 0.5*max(p.lb(spliton),min(x_min(spliton),p.ub(spliton)))+0.5*(p.lb(spliton)+p.ub(spliton))/2 p.ub(spliton)];
else
bounds = [p.lb(spliton) (p.lb(spliton)+p.ub(spliton))/2 p.ub(spliton)];
end
case 'bisect'
bounds = [p.lb(spliton) (p.lb(spliton)+p.ub(spliton))/2 p.ub(spliton)];
otherwise
bounds = [p.lb(spliton) (p.lb(spliton)+p.ub(spliton))/2 p.ub(spliton)];
end
if isnan(bounds(2)) %FIX
if isinf(p.lb(spliton))
p.lb(spliton) = -1e6;
end
if isinf(p.ub(spliton))
p.ub(spliton) = 1e6;
end
bounds(2) = (p.lb(spliton)+p.ub(spliton))/2;
end
function [p,stack] = selectbranch(p,options,stack,x_min,upper,cost_improvements)
switch options.bmibnb.branchmethod
case 'maxvol'
[node,stack] = pull(stack,'maxvol',x_min,upper,p.branch_variables);
case 'best'
[node,stack] = pull(stack,'best',x_min,upper);
case 'best-estimate'
[node,stack] = pull(stack,'best-estimate',x_min,upper,[],cost_improvements);
otherwise
[node,stack] = pull(stack,'best',x_min,upper);
end
% Copy node data to p
if isempty(node)
p = [];
else
p.depth = node.depth;
p.dpos = node.dpos;
p.spliton = node.spliton;
p.lb = node.lb;
p.ub = node.ub;
p.lower = node.lower;
p.lpcuts = node.lpcuts;
p.x0 = node.x0;
p.InequalityConstraintState = node.InequalityConstraintState;
p.EqualityConstraintState = node.EqualityConstraintState;
p.complementary = node.complementary;
p.cutState = node.cutState;
p.feasible = 1;
p.branchwidth = node.branchwidth;
end
% *************************************************************************
% Heuristics from relaxed
% Basically nothing coded yet. Just check feasibility...
% *************************************************************************
function [upper,x_min,cost,info_text,numglobals] = heuristics_from_relaxed(p_upper,x,upper,x_min,cost,numglobals)
%load dummy;U = [x(1) x(2) x(4);0 x(3) x(5);0 0 x(6)];P=U'*U;i = find(triu(ones(length(A))-eye(length(A))));-log(det(U'*U))+trace(A*U'*U)+2*sum(invsathub(P(i),lambda))
x(p_upper.binary_variables) = round(x(p_upper.binary_variables));
x(p_upper.integer_variables) = round(x(p_upper.integer_variables));
z = apply_recursive_evaluation(p_upper,x(1:length(p_upper.c)));
%z = evaluate_nonlinear(p_upper,x);
relaxed_residual = constraint_residuals(p_upper,z);
eq_ok = all(relaxed_residual(1:p_upper.K.f)>=-p_upper.options.bmibnb.eqtol);
iq_ok = all(relaxed_residual(1+p_upper.K.f:end)>=p_upper.options.bmibnb.pdtol);
relaxed_feasible = eq_ok & iq_ok;
info_text = '';
if relaxed_feasible
this_upper = p_upper.f+p_upper.c'*z+z'*p_upper.Q*z;
if (this_upper < (1-1e-5)*upper) & (this_upper < upper - 1e-5)
x_min = x;
upper = this_upper;
info_text = 'Improved solution';
cost = cost-1e-10; % Otherwise we'll fathome!
numglobals = numglobals + 1;
end
end
% *************************************************************************
% Detect redundant constraints
% *************************************************************************
function p = remove_redundant(p);
b = p.F_struc(1+p.K.f:p.K.l+p.K.f,1);
A = -p.F_struc(1+p.K.f:p.K.l+p.K.f,2:end);
redundant = find(((A>0).*A*(p.ub-p.lb) - (b-A*p.lb) <-1e-2));
if length(redundant)>1
p.InequalityConstraintState(redundant) = inf;
end
if p.options.bmibnb.lpreduce
b = p.lpcuts(:,1);
A = -p.lpcuts(:,2:end);
redundant = find(((A>0).*A*(p.ub-p.lb) - (b-A*p.lb) <-1e-2));
if length(redundant)>1
p.lpcuts(redundant,:) = [];
p.cutState(redundant) = [];
end
end
if p.K.f > 0
b = p.F_struc(1:p.K.f,1);
A = -p.F_struc(1:p.K.f,2:end);
s1 = ((A>0).*A*(p.ub-p.lb) - (b-A*p.lb) <1e-6);
s2 = ((-A>0).*(-A)*(p.ub-p.lb) - ((-b)-(-A)*p.lb) <1e-6);
redundant = find(s1 & s2);
if length(redundant)>1
p.EqualityConstraintState(redundant) = inf;
end
end
% *************************************************************************
% Clean the upper bound model
% Remove cut constraints, and
% possibly unused variables not used
% *************************************************************************
function p = cleanuppermodel(p);
% We might have created a bilinear model from an original polynomial model.
% We should use the original model when we solve the upper bound problem.
p_bilinear = p;
p = p.originalModel;
% Remove cuts
p.F_struc(p.K.f+p.KCut.l,:)=[];
p.K.l = p.K.l - length(p.KCut.l);
n_start = length(p.c);
% Quadratic mode, and quadratic aware solver?
bilinear_variables = find(p.variabletype == 1 | p.variabletype == 2);
if ~isempty(bilinear_variables)
used_in_c = find(p.c);
quadraticterms = used_in_c(find(ismember(used_in_c,bilinear_variables)));
if ~isempty(quadraticterms) & p.solver.uppersolver.objective.quadratic.nonconvex
usedinquadratic = zeros(1,length(p.c));
for i = 1:length(quadraticterms)
Qij = p.c(quadraticterms(i));
power_index = find(p.monomtable(quadraticterms(i),:));
if length(power_index) == 1
p.Q(power_index,power_index) = Qij;
else
p.Q(power_index(1),power_index(2)) = Qij/2;
p.Q(power_index(2),power_index(1)) = Qij/2;
end
p.c(quadraticterms(i)) = 0;
end
end
end
% Remove SDP cuts
if length(p.KCut.s)>0
starts = p.K.f+p.K.l + [1 1+cumsum((p.K.s).^2)];
remove_these = [];
for i = 1:length(p.KCut.s)
j = p.KCut.s(i);
remove_these = [remove_these;(starts(j):starts(j+1)-1)'];
end
p.F_struc(remove_these,:)=[];
p.K.s(p.KCut.s) = [];
end
p.lb = p_bilinear.lb(1:length(p.c));
p.ub = p_bilinear.ub(1:length(p.c));
p.bilinears = [];
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
propagate_bounds_from_equalities.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/propagate_bounds_from_equalities.m
| 10,554 |
utf_8
|
9d6b2fdfcaad8b64e3dc2691481565a7
|
function p = propagate_bounds_from_equalities(p)
LU = [p.lb p.ub];
p_F_struc = p.F_struc;
n_p_F_struc_cols = size(p_F_struc,2);
fixedVars = find(p.lb == p.ub & p.variabletype(:) == 0);
if ~isempty(fixedVars)
p_F_struc_forbilin = p_F_struc;
p_F_struc_forbilin(:,1) = p_F_struc(:,1) + p_F_struc(:,1+fixedVars)*p.lb(fixedVars);
p_F_struc_forbilin(:,1+fixedVars) = 0;
else
p_F_struc_forbilin=p_F_struc;
end
usedVariables = find(any(p.F_struc(:,2:end)));
if all(isinf(p.lb(usedVariables))) & all(isinf(p.ub(usedVariables)))
return
end
if p.K.f >0
interestingRows = find(p_F_struc(1:p.K.f,1));
if ~isempty(interestingRows)
S = p_F_struc(interestingRows,:);
S(:,1)=0;
S = sum(S|S,2) - abs(sum(S,2));
interestingRows = interestingRows(find(S==0));
for j = interestingRows(:)'
thisrow = p_F_struc(j,:);
if thisrow(1)<0
thisrow = -thisrow;
end
[row,col,val] = find(thisrow);
% Find bounds from sum(xi) = 1, xi>0
if all(val(2:end) < 0)
usedVars = col(2:end)-1;
if all(p.lb(usedVars)>=0)
p.ub(usedVars) = min( p.ub(usedVars) , val(1)./abs(val(2:end)'));
end
end
end
end
% Presolve from bilinear x*y == k
if any(p.variabletype == 1)
for j = 1:p.K.f
if p_F_struc_forbilin(j,1)~=0
[row,col,val] = find(p_F_struc_forbilin(j,:));
% Find bounds from sum(xi) = 1, xi>0
if length(col)==2
val = val/val(2); % val(1) + x==0
var = col(2)-1;
if p.variabletype(var)==1
[ij] = find(p.monomtable(var,:));
if p.lb(ij(1))>=0 & p.lb(ij(2))>=0
% xi*xj == val(1)
if -val(1)<0
p.feasible = 0;
return
else
p.ub(ij(2)) = min( p.ub(ij(2)),-val(1)/p.lb(ij(1)));
p.ub(ij(1)) = min( p.ub(ij(1)),-val(1)/p.lb(ij(2)));
p.lb(ij(2)) = max( p.lb(ij(2)),-val(1)/p.ub(ij(1)));
p.lb(ij(1)) = max( p.lb(ij(1)),-val(1)/p.ub(ij(2)));
end
elseif -val(1)>0 & p.lb(ij(1))>=0
p.lb(ij(2)) = max(0,p.lb(ij(2)));
elseif -val(1)>0 & p.lb(ij(2))>=0
p.lb(ij(1)) = max(0,p.lb(ij(1)));
end
end
end
end
end
end
A = p.F_struc(1:p.K.f,2:end);
AT = A';
Ap = max(0,A);ApT = Ap';
Am = min(0,A);AmT = Am';
two_terms = sum(p.F_struc(1:p.K.f,2:end) | p.F_struc(1:p.K.f,2:end),2)==2;
for j = find(sum(p.F_struc(1:p.K.f,2:end) | p.F_struc(1:p.K.f,2:end),2)>1)'
% Simple x == y
done = 0;
b = full(p_F_struc(j,1));
if b==0 & two_terms(j)
[row,col,val] = find(p_F_struc(j,:));
if length(row) == 2
if val(1) == -val(2)
p.lb(col(1)-1) = max(p.lb(col(1)-1),p.lb(col(2)-1));
p.lb(col(2)-1) = max(p.lb(col(1)-1),p.lb(col(2)-1));
p.ub(col(1)-1) = min(p.ub(col(1)-1),p.ub(col(2)-1));
p.ub(col(2)-1) = min(p.ub(col(1)-1),p.ub(col(2)-1));
done = 1;
elseif val(1) == val(2)
p.lb(col(1)-1) = max(p.lb(col(1)-1),-p.ub(col(2)-1));
p.lb(col(2)-1) = max(-p.ub(col(1)-1),p.lb(col(2)-1));
p.ub(col(1)-1) = min(p.ub(col(1)-1),-p.lb(col(2)-1));
p.ub(col(2)-1) = min(-p.lb(col(1)-1),p.ub(col(2)-1));
done = 1;
end
end
end
if ~done
a = AT(:,j)';
ap = (ApT(:,j)');
am = (AmT(:,j)');
find_a = find(a);
p_ub = p.ub(find_a);
p_lb = p.lb(find_a);
inflb = isinf(p_lb);
infub = isinf(p_ub);
if ~all(inflb & infub)
if any(inflb) | any(infub)
[p_lb,p_ub] = propagatewINFreduced(full(a(find_a)),full(ap(find_a)),full(am(find_a)),p_lb,p_ub,b);
p.lb(find_a) = p_lb;
p.ub(find_a) = p_ub;
else
[p_lb,p_ub] = propagatewoINFreduced(full(a(find_a)),full(ap(find_a)),full(am(find_a)),p_lb,p_ub,b);
p.lb(find_a) = p_lb;
p.ub(find_a) = p_ub;
end
end
end
end
end
close = find(abs(p.lb - p.ub) < 1e-12);
p.lb(close) = (p.lb(close)+p.ub(close))/2;
p.ub(close) = p.lb(close);
p = update_integer_bounds(p);
if ~isequal(LU,[p.lb p.ub])
p.changedbounds = 1;
end
function [p_lb,p_ub] = propagatewINFreduced(a,ap,am,p_lb,p_ub,b);
%a = AT(:,j)';
%ap = (ApT(:,j)');
%am = (AmT(:,j)');
%p_ub = p.ub;
%p_lb = p.lb;
%find_a = find(a);
% find_a = find_a(min(find(isinf(p.lb(find_a)) | isinf(p.ub(find_a)))):end);
for k = 1:length(a)%find_a
p_ub_k = p_ub(k);
p_lb_k = p_lb(k);
if (p_ub_k-p_lb_k) > 1e-8
L = p_lb;
U = p_ub;
L(k) = 0;
U(k) = 0;
ak = a(k);
if ak < 0
ak = -ak;
aa = am;
am = -ap;
ap = -aa;
b = -b;
a = -a;
end
if ak > 0
use1 = find(ap'~=0);
use2 = find(am'~=0);
newlower = (-b - ap(use1)*U(use1) - am(use2)*L(use2))/ak;
newupper = (-b - am(use2)*U(use2) - ap(use1)*L(use1))/ak;
%newlower = (-b - ap*U - am*L)/ak;
%newupper = (-b - am*U - ap*L)/ak;
else
newlower = (-b - am*U - ap*L)/ak;
newupper = (-b - ap*U - am*L)/ak;
end
if p_ub_k>newupper
p_ub(k) = newupper;
end
if p_lb_k<newlower
p_lb(k) = newlower;
end
end
end
p.ub = p_ub;
p.lb = p_lb;
function [p_lb,p_ub] = propagatewoINFreduced(a,ap,am,p_lb,p_ub,b);
L = p_lb;
U = p_ub;
apU = ap*U;
amU = am*U;
apL = ap*L;
amL = am*L;
papU = ap.*U';
pamU = am.*U';
papL = ap.*L';
pamL = am.*L';
minusbminusapUminusamL = -b-apU-amL;
minusbminusamUminusapL = -b-amU-apL;
for k = 1:length(a)%find_a
p_ub_k = p_ub(k);
p_lb_k = p_lb(k);
if (p_ub_k-p_lb_k) > 1e-8
ak = a(k);
if ak > 0
%newlower = (-b-apU+papU(k)-amL+pamL(k) )/ak;
%newupper = (-b-amU+pamU(k)-apL+papL(k) )/ak;
newlower = -1e-15 + (minusbminusapUminusamL+papU(k)+pamL(k) )/ak;
newupper = 1e-15 + (minusbminusamUminusapL+pamU(k)+papL(k) )/ak;
else
newlower = -1e-15 + (minusbminusamUminusapL+pamU(k)+papL(k) )/ak;
newupper = 1e-15 + (minusbminusapUminusamL+papU(k)+pamL(k) )/ak;
end
if p_ub_k>newupper
p_ub(k) = newupper;
U(k) = newupper;
apU = ap*U;
amU = am*U;
papU = ap.*U';
pamU = am.*U';
minusbminusapUminusamL = -b-apU-amL;
minusbminusamUminusapL = -b-amU-apL;
end
if p_lb_k<newlower
p_lb(k) = newlower;
L(k) = newlower;
apL = ap*L;
amL = am*L;
papL = ap.*L';
pamL = am.*L';
minusbminusapUminusamL = -b-apU-amL;
minusbminusamUminusapL = -b-amU-apL;
end
end
end
%p.ub = p_ub;
%p.lb = p_lb;
function p = propagatewINF(p,AT,ApT,AmT,j,b);
a = AT(:,j)';
ap = (ApT(:,j)');
am = (AmT(:,j)');
p_ub = p.ub;
p_lb = p.lb;
find_a = find(a);
% find_a = find_a(min(find(isinf(p.lb(find_a)) | isinf(p.ub(find_a)))):end);
for k = find_a
p_ub_k = p_ub(k);
p_lb_k = p_lb(k);
if (p_ub_k-p_lb_k) > 1e-8
L = p_lb;
U = p_ub;
L(k) = 0;
U(k) = 0;
ak = a(k);
if ak > 0
newlower = (-b - ap*U - am*L)/ak;
newupper = (-b - am*U - ap*L)/ak;
else
newlower = (-b - am*U - ap*L)/ak;
newupper = (-b - ap*U - am*L)/ak;
end
% if isinf(newlower) | isinf(newupper)
% z = newlower;
% end
if p_ub_k>newupper
p_ub(k) = newupper;
end
if p_lb_k<newlower
p_lb(k) = newlower;
end
end
end
p.ub = p_ub;
p.lb = p_lb;
function p = propagatewoINF(p,AT,ApT,AmT,j,b);
a = full(AT(:,j)');
ap = full((ApT(:,j)'));
am = full((AmT(:,j)'));
p_ub = p.ub;
p_lb = p.lb;
find_a = find(a);
L = p_lb;
U = p_ub;
apU = ap*U;
amU = am*U;
apL = ap*L;
amL = am*L;
papU = ap.*U';
pamU = am.*U';
papL = ap.*L';
pamL = am.*L';
minusbminusapUminusamL = -b-apU-amL;
minusbminusamUminusapL = -b-amU-apL;
for k = find_a
p_ub_k = p_ub(k);
p_lb_k = p_lb(k);
if (p_ub_k-p_lb_k) > 1e-8
ak = a(k);
if ak > 0
%newlower = (-b-apU+papU(k)-amL+pamL(k) )/ak;
%newupper = (-b-amU+pamU(k)-apL+papL(k) )/ak;
newlower = (minusbminusapUminusamL+papU(k)+pamL(k) )/ak;
newupper = (minusbminusamUminusapL+pamU(k)+papL(k) )/ak;
else
newlower = (minusbminusamUminusapL+pamU(k)+papL(k) )/ak;
newupper = (minusbminusapUminusamL+papU(k)+pamL(k) )/ak;
end
if p_ub_k>newupper
p_ub(k) = newupper;
U(k) = newupper;
apU = ap*U;
amU = am*U;
papU = ap.*U';
pamU = am.*U';
minusbminusapUminusamL = -b-apU-amL;
minusbminusamUminusapL = -b-amU-apL;
end
if p_lb_k<newlower
p_lb(k) = newlower;
L(k) = newlower;
apL = ap*L;
amL = am*L;
papL = ap.*L';
pamL = am.*L';
minusbminusapUminusamL = -b-apU-amL;
minusbminusamUminusapL = -b-amU-apL;
end
end
end
p.ub = p_ub;
p.lb = p_lb;
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
bnb.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/bnb.m
| 43,995 |
utf_8
|
03312500b49d6a12f8d2d4a75db4c1cc
|
function output = bnb(p)
%BNB General branch-and-bound scheme for conic programs
%
% BNB applies a branch-and-bound scheme to solve mixed integer
% conic programs (LP, QP, SOCP, SDP) and mixed integer geometric programs.
%
% BNB is never called by the user directly, but is called by
% YALMIP from SOLVESDP, by choosing the solver tag 'bnb' in sdpsettings.
%
% BNB is used if no other mixed integer solver is found, and
% is only useful for very small problems, due to its simple
% and naive implementation.
%
% The behaviour of BNB can be altered using the fields
% in the field 'bnb' in SDPSETTINGS
%
% bnb.branchrule Deceides on what variable to branch
% 'max' : Variable furthest away from being integer
% 'min' : Variable closest to be being integer
% 'first' : First variable (lowest variable index in YALMIP)
% 'last' : Last variable (highest variable index in YALMIP)
% 'weight' : See manual
%
% bnb.method Branching strategy
% 'depth' : Depth first
% 'breadth' : Breadth first
% 'best' : Expand branch with lowest lower bound
% 'depthX' : Depth until integer solution found, then X (e.g 'depthbest')
%
% solver Solver for the relaxed problems (standard solver tag, see SDPSETTINGS)
%
% maxiter Maximum number of nodes explored
%
% inttol Tolerance for declaring a variable as integer
%
% feastol Tolerance for declaring constraints as feasible
%
% gaptol Exit when (upper bound-lower bound)/(1e-3+abs(lower bound)) < gaptol
%
% round Round variables smaller than bnb.inttol
%
%
% See also SOLVESDP, BINVAR, INTVAR, BINARY, INTEGER
% ********************************
%% INITIALIZE DIAGNOSTICS IN YALMIP
% ********************************
bnbsolvertime = clock;
showprogress('Branch and bound started',p.options.showprogress);
% ********************************
%% We might have a GP : pre-calc
% ********************************
p.nonlinear = find(~(sum(p.monomtable~=0,2)==1 & sum(p.monomtable,2)==1));
p.nonlinear = union(p.nonlinear,p.evalVariables);
% ********************************
% This field is only used in bmibnb, which uses the same sub-functions as
% bnb
% ********************************
p.high_monom_model = [];
% ********************************
%% Define infinite bounds
% ********************************
if isempty(p.ub)
p.ub = repmat(inf,length(p.c),1);
end
if isempty(p.lb)
p.lb = repmat(-inf,length(p.c),1);
end
% ********************************
%% Extract bounds from model
% ********************************
if ~isempty(p.F_struc)
[lb,ub,used_rows_eq,used_rows_lp] = findulb(p.F_struc,p.K);
if ~isempty(used_rows_lp)
used_rows_lp = used_rows_lp(~any(full(p.F_struc(p.K.f + used_rows_lp,1+p.nonlinear)),2));
if ~isempty(used_rows_lp)
lower_defined = find(~isinf(lb));
if ~isempty(lower_defined)
p.lb(lower_defined) = max(p.lb(lower_defined),lb(lower_defined));
end
upper_defined = find(~isinf(ub));
if ~isempty(upper_defined)
p.ub(upper_defined) = min(p.ub(upper_defined),ub(upper_defined));
end
p.F_struc(p.K.f + used_rows_lp,:)=[];
p.K.l = p.K.l - length(used_rows_lp);
end
end
if ~isempty(used_rows_eq)
used_rows_eq = used_rows_eq(~any(full(p.F_struc(used_rows_eq,1+p.nonlinear)),2));
if ~isempty(used_rows_eq)
lower_defined = find(~isinf(lb));
if ~isempty(lower_defined)
p.lb(lower_defined) = max(p.lb(lower_defined),lb(lower_defined));
end
upper_defined = find(~isinf(ub));
if ~isempty(upper_defined)
p.ub(upper_defined) = min(p.ub(upper_defined),ub(upper_defined));
end
p.F_struc(used_rows_eq,:)=[];
p.K.f = p.K.f - length(used_rows_eq);
end
end
end
% ********************************
%% ADD CONSTRAINTS 0<x<1 FOR BINARY
% ********************************
if ~isempty(p.binary_variables)
p.ub(p.binary_variables) = min(p.ub(p.binary_variables),1);
p.lb(p.binary_variables) = max(p.lb(p.binary_variables),0);
% godown = find(p.ub(p.binary_variables) < 1);
% goup = find(p.lb(p.binary_variables) > 0);
% p.ub(p.binary_variables(godown)) = 0;
% p.lb(p.binary_variables(goup)) = 1;
end
%p.lb(p.integer_variables) = ceil(p.lb(p.integer_variables));
%p.ub(p.integer_variables) = floor(p.ub(p.integer_variables));
p = update_integer_bounds(p);
if ~isempty(p.semicont_variables)
redundant = find(p.lb<=0 & p.ub>=0);
p.semicont_variables = setdiff(p.semicont_variables,redundant);
% Now relax the model and generate hull including 0
p.semibounds.lb = p.lb(p.semicont_variables);
p.semibounds.ub = p.ub(p.semicont_variables);
p.lb(p.semicont_variables) = min(p.lb(p.semicont_variables),0);
p.ub(p.semicont_variables) = max(p.ub(p.semicont_variables),0);
end
% Could be some nonlinear terms (although these problems are recommended to
% be solved using BMIBNB
p = compile_nonlinear_table(p);
p = updatemonomialbounds(p);
% *******************************
%% PRE-SOLVE (nothing fancy coded)
% *******************************
pss=[];
p = propagate_bounds_from_equalities(p);
if p.K.f > 0
pp = p;
r = find(p.lb == p.ub);
pp.F_struc(:,1) = pp.F_struc(:,1) + pp.F_struc(:,r+1)*p.lb(r);
pp.F_struc(:,r+1)=[];
pp.lb(r)=[];
pp.ub(r)=[];
pp.variabletype(r)=[];
% FIXME: This is lazy, should update new list
pp.binary_variables = [];
pp.integer_variables = [];
pp = propagate_bounds_from_equalities(pp);
other = setdiff(1:length(p.lb),r);
p.lb(other) = pp.lb;
p.ub(other) = pp.ub;
p = update_integer_bounds(p);
redundant = find(~any(pp.F_struc(1:p.K.f,2:end),2));
if any(p.F_struc(redundant,1)<0)
p.feasible = 0;
else
p.F_struc(redundant,:)=[];
p.K.f = p.K.f - length(redundant);
end
end
if isempty(p.nonlinear)
if p.K.f>0
Aeq = -p.F_struc(1:p.K.f,2:end);
beq = p.F_struc(1:p.K.f,1);
A = [Aeq;-Aeq];
b = [beq;-beq];
[p.lb,p.ub,redundant,pss] = tightenbounds(A,b,p.lb,p.ub,p.integer_variables,p.binary_variables,ones(length(p.lb),1));
end
pss=[];
if p.K.l>0
A = -p.F_struc(1+p.K.f:p.K.f+p.K.l,2:end);
b = p.F_struc(1+p.K.f:p.K.f+p.K.l,1);
[p.lb,p.ub,redundant,pss] = tightenbounds(A,b,p.lb,p.ub,p.integer_variables,p.binary_variables,ones(length(p.lb),1));
if length(redundant)>0
pss.AL0A(redundant,:)=[];
pss.AG0A(redundant,:)=[];
p.F_struc(p.K.f+redundant,:)=[];
p.K.l = p.K.l - length(redundant);
end
end
end
% Silly redundancy
p = updatemonomialbounds(p);
p = propagate_bounds_from_equalities(p);
if p.K.l > 0
b = p.F_struc(1+p.K.f:p.K.l+p.K.f,1);
A = -p.F_struc(1+p.K.f:p.K.l+p.K.f,2:end);
redundant = find(((A>0).*A*(p.ub-p.lb) - (b-A*p.lb) <= 0));
if ~isempty(redundant)
p.F_struc(p.K.f + redundant,:) = [];
p.K.l = p.K.l - length(redundant);
end
end
% *******************************
%% PERTURBATION OF LINEAR COST
% *******************************
p.corig = p.c;
if nnz(p.Q)==0 & isequal(p.K.m,0)
% g = randn('seed');
% randn('state',1253); %For my testing, I keep this the same...
% % This perturbation has to be better. Crucial for many real LP problems
% p.c = (p.c).*(1+randn(length(p.c),1)*1e-4);
% randn('seed',g);
end
% *******************************
%% Display logics
% 0 : Silent
% 1 : Display branching
% 2 : Display node solver prints
% *******************************
switch max(min(p.options.verbose,3),0)
case 0
p.options.bnb.verbose = 0;
case 1
p.options.bnb.verbose = 1;
p.options.verbose = 0;
case 2
p.options.bnb.verbose = 2;
p.options.verbose = 0;
case 3
p.options.bnb.verbose = 2;
p.options.verbose = 1;
otherwise
p.options.bnb.verbose = 0;
p.options.verbose = 0;
end
% *******************************
%% Figure out the weights if any
% *******************************
try % Probably buggy first version...
if ~isempty(p.options.bnb.weight)
weightvar = p.options.bnb.weight;
if isa(weightvar,'sdpvar')
if (prod(size(weightvar)) == 1)
weight = ones(length(p.c),1);
for i = 1:length(p.c)
weight(i,1) = full(getbasematrix(weightvar,p.used_variables(i)));
end
p.weight = weight;
else
error('Weight should be an SDPVAR scalar');
end
else
error('Weight should be an SDPVAR scalar');
end
else
p.weight = ones(length(p.c),1);
end
catch
disp('Something wrong with weights. Please report bug');
p.weight = ones(length(p.c),1);
end
% *******************************
%% START BRANCHING
% *******************************
setuptime = etime(clock,bnbsolvertime);
bnbsolvertime = clock;
[x_min,solved_nodes,lower,upper,profile,diagnostics] = branch_and_bound(p,pss);
bnbsolvertime = etime(clock,bnbsolvertime);
output.solvertime = setuptime + bnbsolvertime;
% **********************************
%% CREATE SOLUTION
% **********************************
if diagnostics == -4
output.problem = -4;
else
output.problem = 0;
if isinf(upper)
output.problem = 1;
end
if isinf(-lower)
output.problem = 2;
end
if solved_nodes == p.options.bnb.maxiter
output.problem = 3;
end
end
output.solved_nodes = solved_nodes;
output.Primal = x_min;
output.Dual = [];
output.Slack = [];
if output.problem == -4
output.infostr = yalmiperror(output.problem,[p.solver.lower.tag '-' p.solver.lower.version]);
else
output.infostr = yalmiperror(output.problem,'BNB');
end
output.solverinput = 0;
if p.options.savesolveroutput
output.solveroutput.setuptime = setuptime;
output.solveroutput.localsolvertime = profile.local_solver_time;
output.solveroutput.branchingtime = bnbsolvertime;
output.solveroutput.solved_nodes = solved_nodes;
output.solveroutput.lower = lower;
output.solveroutput.upper = upper;
else
output.solveroutput =[];
end
%% --
function [x_min,solved_nodes,lower,upper,profile,diagnostics] = branch_and_bound(p,pss)
% *******************************
% We don't need this
% *******************************
p.options.savesolveroutput = 0;
p.options.saveduals = 0;
p.options.dimacs = 0;
diagnostics = 0;
% *******************************
% Tracking performance etc
% *******************************
profile.local_solver_time = 0;
% *************************************************************************
% We save this to re-use some stuff in fmincon
% *************************************************************************
p.options.savesolverinput = 1;
% *******************************
%% SET-UP ROOT PROBLEM
% *******************************
p.depth = 0;
p.lower = NaN;
% Does the user want to create his own initial guess
if p.options.usex0
[x_min,upper] = initializesolution(p);
if isinf(upper)
% Try to initialize to lowerbound+ upperbound. fmincon really
% doesn't like zero initial guess, despite having bounds available
x_min = zeros(length(p.c),1);
violates_finite_bounds = ((x_min < p.lb) | (x_min < p.ub));
violates_finite_bounds = find(violates_finite_bounds & ~isinf(p.lb) & ~isinf(p.ub));
x_min(violates_finite_bounds) = (p.lb(violates_finite_bounds) + p.ub(violates_finite_bounds))/2;
x_min = setnonlinearvariables(p,x_min);
end
p.x0 = x_min;
else
upper = inf;
x_min = zeros(length(p.c),1);
violates_finite_bounds = ((x_min < p.lb) | (x_min < p.ub));
violates_finite_bounds = find(violates_finite_bounds & ~isinf(p.lb) & ~isinf(p.ub));
x_min(violates_finite_bounds) = (p.lb(violates_finite_bounds) + p.ub(violates_finite_bounds))/2;
x_min = setnonlinearvariables(p,x_min);
p.x0 = x_min;
end
% *******************************
%% Global stuff
% *******************************
lower = NaN;
stack = [];
% *******************************
%% Create function handle to solver
% *******************************
lowersolver = p.solver.lower.call;
uppersolver = p.options.bnb.uppersolver;
% *******************************
%% INVARIANT PROBLEM DATA
% *******************************
c = p.corig;
Q = p.Q;
f = p.f;
integer_variables = p.integer_variables;
solved_nodes = 0;
semicont_variables = p.semicont_variables;
gap = inf;
node = 1;
if p.options.bnb.presolve
savec = p.c;
saveQ = p.Q;
p.Q = p.Q*0;
n = length(p.c);
saveBinary = p.binary_variables;
saveInteger = p.integer_variables;
p.binary_variables = [];
p.integer_variables = [];;
for i = 1:length(c)
p.c = eyev(n,i);
output = feval(lowersolver,p);
if output.problem == 0
p.lb(i) = max(p.lb(i),output.Primal(i));
end
p.c = -eyev(n,i);
output = feval(lowersolver,p);
if output.problem == 0
p.ub(i) = min(p.ub(i),output.Primal(i));
end
p.lb(saveBinary) = ceil(p.lb(saveBinary)-1e-3);
p.ub(saveBinary) = floor(p.ub(saveBinary)+1e-3);
end
p.binary_variables = saveBinary;
p.integer_variables = saveInteger;
p.Q = saveQ;
p.c = savec;
end
% ************************************************
% Some hacks to speed up solver calls
% Only track solver-time if user wants profile
% ************************************************
p.getsolvertime = p.options.bnb.profile;
% *******************************
%% DISPLAY HEADER
% *******************************
originalDiscrete = [p.integer_variables(:);p.binary_variables(:)];
originalBinary = p.binary_variables(:);
if nnz(Q)==0 & (nnz(p.c-fix(p.c))==0) & isequal(p.K.m,0)
can_use_ceil_lower = all(ismember(find(p.c),originalDiscrete));
else
can_use_ceil_lower = 0;
end
if p.options.bnb.verbose
pc = p.problemclass;
non_convex_obj = pc.objective.quadratic.nonconvex | pc.objective.polynomial;
non_convex_constraint = pc.constraint.equalities.quadratic | pc.constraint.inequalities.elementwise.quadratic.nonconvex;
non_convex_constraint = non_convex_constraint | pc.constraint.equalities.polynomial | pc.constraint.inequalities.elementwise.polynomial;
possiblynonconvex = non_convex_obj | non_convex_constraint;
if ~isequal(p.solver.lower.version,'')
p.solver.lower.tag = [p.solver.lower.tag '-' p.solver.lower.version];
end
disp('* Starting YALMIP integer branch & bound.');
disp(['* Lower solver : ' p.solver.lower.tag]);
disp(['* Upper solver : ' p.options.bnb.uppersolver]);
disp(['* Max iterations : ' num2str(p.options.bnb.maxiter)]);
if possiblynonconvex & p.options.warning
disp(' ');
disp('Warning : The continuous relaxation may be nonconvex. This means ');
disp('that the branching process is not guaranteed to find a');
disp('globally optimal solution, since the lower bound can be');
disp('invalid. Hence, do not trust the bound or the gap...')
end
end
if p.options.bnb.verbose; disp(' Node Upper Gap(%) Lower Open');end;
if nnz(Q)==0 & nnz(c)==1 & isequal(p.K.m,0)
p.simplecost = 1;
else
p.simplecost = 0;
end
poriginal = p;
p.cuts = [];
%% MAIN LOOP
% p.options.rounding = [1 1 1 1];
if p.options.bnb.nodefix & (p.K.s(1)>0)
top=1+p.K.f+p.K.l+sum(p.K.q);
for i=1:length(p.K.s)
n=p.K.s(i);
for j=1:size(p.F_struc,2)-1;
X=full(reshape(p.F_struc(top:top+n^2-1,j+1),p.K.s(i),p.K.s(i)));
X=(X+X')/2;
v=real(eig(X+sqrt(eps)*eye(length(X))));
if all(v>=0)
sdpmonotinicity(i,j)=-1;
elseif all(v<=0)
sdpmonotinicity(i,j)=1;
else
sdpmonotinicity(i,j)=nan;
end
end
top=top+n^2;
end
else
sdpmonotinicity=[];
end
% Try to find sum(d_i) = 1
sosgroups = {};
sosvariables = [];
if p.K.f > 0 & ~isempty(p.binary_variables)
nbin = length(p.binary_variables);
Aeq = -p.F_struc(1:p.K.f,2:end);
beq = p.F_struc(1:p.K.f,1);
notbinary_var_index = setdiff(1:length(p.lb),p.binary_variables);
only_binary = ~any(Aeq(:,notbinary_var_index),2);
Aeq_bin = Aeq(find(only_binary),p.binary_variables);
beq_bin = beq(find(only_binary),:);
% Detect groups with constraints sum(d_i) == 1
sosgroups = {};
for i = 1:size(Aeq_bin,1)
if beq_bin(i) == 1
[ix,jx,sx] = find(Aeq_bin(i,:));
if all(sx == 1)
sosgroups{end+1} = p.binary_variables(jx);
sosvariables = [sosvariables p.binary_variables(jx)];
end
end
end
end
pid = 0;
lowerhist = [];
upperhist = [];
p.fixedvariable = [];
p.fixdir = '';
p.sosgroups = sosgroups;
p.sosvariables = sosvariables;
while ~isempty(node) & (solved_nodes < p.options.bnb.maxiter) & (isinf(lower) | gap>p.options.bnb.gaptol)
% ********************************************
% Adjust variable bound based on upper bound
% ********************************************
% This code typically never runs but can be turned on
% using options.bnb.nodetight and bnb.nodefix.
if ~isinf(upper) & ~isnan(lower)
[p,poriginal,stack] = pruneglobally(p,poriginal,upper,lower,stack,x);
[p,poriginal,stack] = fixvariables(p,poriginal,upper,lower,stack,x_min,sdpmonotinicity);
stack = prunecardinality(p,poriginal,stack,lower,upper);
end
% ********************************************
% BINARY VARIABLES ARE FIXED ALONG THE PROCESS
% ********************************************
binary_variables = p.binary_variables;
% ********************************************
% SO ARE SEMI VARIABLES
% ********************************************
semicont_variables = p.semicont_variables;
% ********************************************
% ASSUME THAT WE WON'T FATHOME
% ********************************************
keep_digging = 1;
message = '';
% *************************************
% SOLVE NODE PROBLEM
% *************************************
if any(p.ub<p.lb - 1e-12)
x = zeros(length(p.c),1);
output.Primal = x;
output.problem=1;
else
p.x_min = x_min;
relaxed_p = p;
relaxed_p.integer_variables = [];
relaxed_p.binary_variables = [];
relaxed_p.semicont_variables = [];
relaxed_p.ub(p.ub<p.lb) = relaxed_p.lb(p.ub<p.lb);
if upper<inf & length(poriginal.binary_variables)==length(poriginal.c) & p.K.f == 0
% Cut away current best
% FIXME: Generalize
positive = find(x_min==1);
zero = find(x_min==0);
% Add cut c'*x < c*xmin,
cc = poriginal.c;
cc(positive) = ceil(cc(positive));
cc(zero) = floor(cc(zero));
relaxed_p.K.l = relaxed_p.K.l+1;
relaxed_p.F_struc = [-1+sum(cc(positive)) -cc';relaxed_p.F_struc];
end
output = bnb_solvelower(lowersolver,relaxed_p,upper+abs(upper)*1e-2+1e-4,lower);
if p.options.bnb.profile
profile.local_solver_time = profile.local_solver_time + output.solvertime;
end
% A bit crappy code to exploit computations that were done in the
% call to fmincon...
if isfield(output,'solverinput')
if isfield(output.solverinput,'model')
if isfield(output.solverinput.model,'fastdiff')
p.fastdiff = output.solverinput.model.fastdiff;
end
end
end
if output.problem == -4
diagnostics = -4;
x = nan+zeros(length(p.lb),1);
else
if isempty(output.Primal)
output.Primal = zeros(length(p.c),1);
end
try
x = setnonlinearvariables(p,output.Primal);
catch
1
end
if(p.K.l>0) & any(p.F_struc(p.K.f+1:p.K.f+p.K.l,:)*[1;x]<-1e-5)
output.problem = 1;
elseif output.problem == 5 & ~checkfeasiblefast(p,x,p.options.bnb.feastol)
output.problem = 1;
end
end
end
solved_nodes = solved_nodes+1;
% **************************************
% THIS WILL BE INTIAL GUESS FOR CHILDREN
% **************************************
p.x0 = x;
% *************************************
% ANY INTEGERS? ROUND?
% *************************************
non_integer_binary = abs(x(binary_variables)-round(x(binary_variables)))>p.options.bnb.inttol;
non_integer_integer = abs(x(integer_variables)-round(x(integer_variables)))>p.options.bnb.inttol;
if p.options.bnb.round
x(binary_variables(~non_integer_binary)) = round(x(binary_variables(~non_integer_binary)));
x(integer_variables(~non_integer_integer)) = round(x(integer_variables(~non_integer_integer)));
end
non_integer_binary = find(non_integer_binary);
non_integer_integer = find(non_integer_integer);
if isempty(p.semicont_variables)
non_semivar_semivar=[];
else
non_semivar_semivar = find(~(abs(x(p.semicont_variables))<p.options.bnb.inttol | (x(p.semicont_variables)>p.semibounds.lb & x(p.semicont_variables)<=p.semibounds.ub)));
end
try
x = setnonlinearvariables(p,x);
catch
end
TotalIntegerInfeas = sum(abs(round(x(non_integer_integer))-x(non_integer_integer)));
TotalBinaryInfeas = sum(abs(round(x(non_integer_binary))-x(non_integer_binary)));
% *************************************
% NODE HEURISTICS (NOTHING CODED)
% *************************************
should_be_tight = find([p.lb == p.ub]);
if ~isempty(should_be_tight)
% FIX for problems that only report numerical problems but violate
% binary
if max(abs(p.lb(should_be_tight)-x(should_be_tight)))>p.options.bnb.inttol
output.problem = 1;
end
end
if output.problem==0 | output.problem==3 | output.problem==4
cost = computecost(f,c,Q,x,p);
if output.problem~=1
if isnan(lower)
lower = cost;
end
if isfield(p.options,'plottruss')
if p.options.plottruss
plottruss(1,'Relaxed node',poriginal,x);
end
end
if cost <= upper & ~(isempty(non_integer_binary) & isempty(non_integer_integer) & isempty(non_semivar_semivar))
poriginal.upper = upper;
poriginal.lower = lower;
[upper1,x_min1] = feval(uppersolver,poriginal,output,p);
if upper1 < upper
if isfield(p.options,'plottruss')
if p.options.plottruss
plottruss(3,'Best binary solution',poriginal,x_min1);
end
end
x_min = x_min1;
upper = upper1;
[stack,stacklower] = prune(stack,upper,p.options,solved_nodes,p);
lower = min(lower,stacklower);
[p,poriginal,stack] = pruneglobally(p,poriginal,upper,lower,stack,x_min);
[p,poriginal,stack] = fixvariables(p,poriginal,upper,lower,stack,x_min,sdpmonotinicity);
end
end
end
end
p = adaptivestrategy(p,upper,solved_nodes);
% *************************************
% CHECK FATHOMING POSSIBILITIES
% *************************************
feasible = 1;
switch output.problem
case 0
if can_use_ceil_lower
lower = ceil(lower-1e-8);
end
case {1,12,-4}
keep_digging = 0;
cost = inf;
feasible = 0;
case 2
cost = -inf;
otherwise
% This part has to be much more robust
cost = f+c'*x+x'*Q*x;
end
% **************************************
% YAHOO! INTEGER SOLUTION FOUND
% **************************************
if isempty(non_integer_binary) & isempty(non_integer_integer) & isempty(non_semivar_semivar)
if (cost<upper) & feasible
x_min = x;
upper = cost;
[stack,lower] = prune(stack,upper,p.options,solved_nodes,p);
if isfield(p.options,'plottruss')
if p.options.plottruss
plottruss(3,'Best binary solution',poriginal,x_min);
end
end
end
p = adaptivestrategy(p,upper,solved_nodes);
keep_digging = 0;
end
% **************************************
% Stop digging if it won't give sufficient improvement anyway
% **************************************
if cost>upper*(1-1e-6)
keep_digging = 0;
end
% **********************************
% CONTINUE SPLITTING?
% **********************************
if keep_digging & (cost<upper)
if solved_nodes == 1
RootNodeInfeas = TotalIntegerInfeas+TotalBinaryInfeas;
RootNodeCost = cost;
end
% **********************************
% BRANCH VARIABLE
% **********************************
[index,whatsplit,globalindex] = branchvariable(x,integer_variables,binary_variables,p.options,x_min,[],p);
% **********************************
% CREATE NEW PROBLEMS
% **********************************
p0_feasible = 1;
p1_feasible = 1;
switch whatsplit
case 'binary'
[p0,p1,index] = binarysplit(p,x,index,cost,[],sosgroups,sosvariables);
case 'integer'
[p0,p1] = integersplit(p,x,index,cost,x_min);
case 'semi'
[p0,p1] = semisplit(p,x,index,cost,x_min);
case 'sos1'
[p0,p1] = sos1split(p,x,index,cost,x_min);
otherwise
end
node1.lb = p1.lb;
node1.ub = p1.ub;
node1.depth = p1.depth;
node1.lower = p1.lower;
node1.fixedvariable = globalindex;
node1.fixdir = 'up';
node1.TotalIntegerInfeas = TotalIntegerInfeas;
node1.TotalBinaryInfeas = TotalBinaryInfeas;
node1.IntInfeas = 1-(x(globalindex)-floor(x(globalindex)));
node1.x0 = p1.x0;
node1.binary_variables = p1.binary_variables;
node1.semicont_variables = p1.semicont_variables;
node1.semibounds = p1.semibounds;
node1.pid = pid;pid = pid + 1;
node1.sosgroups = p1.sosgroups;
node1.sosvariables = p1.sosvariables;
node0.lb = p0.lb;
node0.ub = p0.ub;
node0.depth = p0.depth;
node0.lower = p0.lower;
node0.fixedvariable = index;
node0.fixdir = 'down';
node0.TotalIntegerInfeas = TotalIntegerInfeas;
node0.TotalBinaryInfeas = TotalBinaryInfeas;
node0.IntInfeas = x(globalindex)-floor(x(globalindex));
node0.x0 = p0.x0;
node0.binary_variables = p0.binary_variables;
node0.semicont_variables = p0.semicont_variables;
node0.semibounds = p0.semibounds;
node0.pid = pid;pid = pid + 1;
node0.sosgroups = p0.sosgroups;
node0.sosvariables = p0.sosvariables;
if p0_feasible
stack = push(stack,node0);
end
if p1_feasible
stack = push(stack,node1);
end
end
% Lowest cost in any open node
if ~isempty(stack)
lower = min([stack.lower]);
if can_use_ceil_lower
lower = ceil(lower);
end
end
% **********************************
% Get a new node to solve
% **********************************
[node,stack] = pull(stack,p.options.bnb.method,x_min,upper);
if ~isempty(node)
p.lb = node.lb;
p.ub = node.ub;
p.depth = node.depth;
p.lower = node.lower;
p.fixedvariable = node.fixedvariable;
p.fixdir = node.fixdir;
p.TotalIntegerInfeas = node.TotalIntegerInfeas;
p.TotalBinaryInfeas = node.TotalBinaryInfeas;
p.IntInfeas = node.IntInfeas;
p.x0 = node.x0;
p.binary_variables = node.binary_variables;
p.semicont_variables = node.semicont_variables;
p.semibounds = node.semibounds;
% p.Musts = node.Musts;
p.pid = node.pid;
p.sosgroups = node.sosgroups;
p.sosvariables = node.sosvariables;
end
gap = abs((upper-lower)/(1e-3+abs(upper)+abs(lower)));
if isnan(gap)
gap = inf;
end
if p.options.bnb.plotbounds
lowerhist = [lowerhist lower];
upperhist = [upperhist upper];
hold off
plot([lowerhist' upperhist']);
drawnow
end
%DEBUG if p.options.bnb.verbose;fprintf(' %4.0f : %12.3E %7.2f %12.3E %2.0f %2.0f %2.0f %2.0f %2.0f\n',solved_nodes,upper,100*gap,lower,length(stack)+length(node),sedd);end
if p.options.bnb.verbose;fprintf(' %4.0f : %12.3E %7.2f %12.3E %2.0f %s\n',solved_nodes,upper,100*gap,lower,length(stack)+length(node),yalmiperror(output.problem));end
end
if p.options.bnb.verbose;showprogress([num2str2(solved_nodes,3) ' Finishing. Cost: ' num2str(upper) ],p.options.bnb.verbose);end
function stack = push(stackin,p)
if ~isempty(stackin)
stack = [p;stackin];
else
stack(1)=p;
end
%%
function [p,stack] = pull(stack,method,x_min,upper);
if ~isempty(stack)
switch method
case {'depth','depthfirst','depthbreadth','depthproject','depthbest'}
[i,j]=max([stack.depth]);
p=stack(j);
stack = stack([1:1:j-1 j+1:1:end]);
case 'project'
[i,j]=min([stack.projection]);
p=stack(j);
stack = stack([1:1:j-1 j+1:1:end]);
case 'breadth'
[i,j]=min([stack.depth]);
p=stack(j);
stack = stack([1:1:j-1 j+1:1:end]);
case 'best'
[i,j]=min([stack.lower]);
% candidates = find([stack.lower] == stack(j).lower);
% [i,j] = min([stack(candidates).IntInfeas]);
% j = candidates(j);
p=stack(j);
stack = stack([1:1:j-1 j+1:1:end]);
otherwise
end
else
p = [];
end
% **********************************
%% BRANCH VARIABLE
% **********************************
function [index,whatsplit,globalindex] = branchvariable(x,integer_variables,binary_variables,options,x_min,Weight,p)
all_variables = [integer_variables(:);binary_variables(:)];
if isempty(setdiff(all_variables,p.sosvariables)) & strcmp(options.bnb.branchrule,'sos')
% All variables are in SOS1 constraints
for i = 1:length(p.sosgroups)
dist(i) = (sum(x(p.sosgroups{i}))-max(x(p.sosgroups{i})))/length(p.sosgroups{i});
end
% Which SOS to branch on
[val,index] = max(dist);
whatsplit = 'sos1';
globalindex = index;
return
end
switch options.bnb.branchrule
case 'weight'
interror = abs(x(all_variables)-round(x(all_variables)));
[val,index] = max(abs(p.weight(all_variables)).*interror);
case 'first'
index = min(find(abs(x(all_variables)-round(x(all_variables)))>options.bnb.inttol));
case 'last'
index = max(find(abs(x(all_variables)-round(x(all_variables)))>options.bnb.inttol));
case 'min'
nint = find(abs(x(all_variables)-round(x(all_variables)))>options.bnb.inttol);
[val,index] = min(abs(x(nint)));
index = nint(index);
case 'max'
[val,index] = max((abs(x(all_variables)-round(x(all_variables)))));
%[val,index] = max(abs(p.c(all_variables)).^2.*(abs(x(all_variables)-round(x(all_variables)))));
otherwise
error('Branch-rule not supported')
end
if index<=length(integer_variables)
whatsplit = 'integer';
globalindex = integer_variables(index);
else
index = index-length(integer_variables);
whatsplit = 'binary';
globalindex = binary_variables(index);
end
if isempty(index) | ~isempty(p.semicont_variables)
for i = 1:length(p.semicont_variables)
j = p.semicont_variables(i);
if x(j)>= p.semibounds.lb(i) & x(j)<= p.semibounds.ub(i)
s(i) = 0;
elseif x(j)==0
s(i) = 0;
else
s(i) = min([abs(x(j)-0); abs(x(j)-p.semibounds.lb(i));abs(x(j)-p.semibounds.ub(i))]);
end
end
[val2,index2] = max(s);
if isempty(val)
whatsplit = 'semi';
index = index2;
elseif val2>val
% index = p.semicont_variables(index);
whatsplit = 'semi';
index = index2;
end
end
% **********************************
% SPLIT PROBLEM
% **********************************
function [p0,p1,variable] = binarysplit(p,x,index,lower,options,sosgroups,sosvariables)
p0 = p;
p1 = p;
variable = p.binary_variables(index);
tf = ~(ismembcYALMIP(p0.binary_variables,variable));
new_binary = p0.binary_variables(tf);
friends = [];
if ~isempty(sosvariables)
if ismember(variable,sosvariables)
i = 1;
while i<=length(sosgroups)
if ismember(variable,sosgroups{i})
friends = setdiff(sosgroups{i},variable);
break
else
i = i + 1;
end
end
end
end
p0.ub(variable)=0;
p0.lb(variable)=0;
if length(friends) == 1
p0.ub(friends) = 1;
p0.lb(friends) = 1;
end
p0.lower = lower;
p0.depth = p.depth+1;
p0.binary_variables = new_binary;%setdiff1D(p0.binary_variables,variable);
%p0.binary_variables = setdiff(p0.binary_variables,friends);
p1.ub(variable)=1;
p1.lb(variable)=1;
if length(friends) > 1
p1.ub(friends)=0;
p1.lb(friends)=0;
end
p1.binary_variables = new_binary;%p0.binary_variables;%setdiff1D(p1.binary_variables,variable);
%p1.binary_variables = setdiff(p1.binary_variables,friends);
p1.lower = lower;
p1.depth = p.depth+1;
% % *****************************
% % PROCESS MOST PROMISING FIRST
% % (p0 in top of stack)
% % *****************************
if x(variable)>0.5
pt=p1;
p1=p0;
p0=pt;
end
function [p0,p1] = integersplit(p,x,index,lower,options,x_min)
variable = p.integer_variables(index);
current = x(p.integer_variables(index));
lb = floor(current)+1;
ub = floor(current);
% xi<ub
p0 = p;
p0.lower = lower;
p0.depth = p.depth+1;
p0.x0(variable) = ub;
p0.ub(variable)=min(p0.ub(variable),ub);
% xi>lb
p1 = p;
p1.lower = lower;
p1.depth = p.depth+1;
p1.x0(variable) = lb;
p1.lb(variable)=max(p1.lb(variable),lb);
% *****************************
% PROCESS MOST PROMISING FIRST
% *****************************
if lb-current<0.5
pt=p1;
p1=p0;
p0=pt;
end
function [p0,p1] = sos1split(p,x,index,lower,options,x_min)
v = p.sosgroups{index};
n = ceil(length(v)/2);
v1 = v(randperm(length(v),n));
v2 = setdiff(v,v1);
%v1 = v(1:n);
%v2 = v(n+1:end);
% In first node, set v2 to 0 and v1 to sosgroup
p0 = p;p0.lower = lower;
p0.sosgroups{index} = v1;
p0.ub(v2) = 0;
% In second node, set v1 to 0 and v1 to sosgroup
p1 = p;p1.lower = lower;
p1.sosgroups{index} = v2;
p1.ub(v1) = 0;
function [p0,p1] = semisplit(p,x,index,lower,options,x_min)
variable = p.semicont_variables(index);
current = x(p.semicont_variables(index));
p0 = p;
p0.lower = lower;
p0.depth = p.depth+1;
p0.x0(variable) = 0;
p0.lb(variable)=0;
p0.ub(variable)=0;
p1 = p;
p1.lower = lower;
p1.depth = p.depth+1;
p1.x0(variable) = p.semibounds.lb(index);
p1.lb(variable) = p.semibounds.lb(index);
p1.ub(variable) = p.semibounds.ub(index);
p0.semicont_variables = setdiff(p.semicont_variables,variable);
p1.semicont_variables = setdiff(p.semicont_variables,variable);
p0.semibounds.lb(index)=[];
p0.semibounds.ub(index)=[];
p1.semibounds.lb(index)=[];
p1.semibounds.ub(index)=[];
function s = num2str2(x,d,c);
if nargin==3
s = num2str(x,c);
else
s = num2str(x);
end
s = [repmat(' ',1,d-length(s)) s];
function [stack,lower] = prune(stack,upper,options,solved_nodes,p)
% *********************************
% PRUNE STACK W.R.T NEW UPPER BOUND
% *********************************
if ~isempty(stack)
% toolarge = find([stack.lower]>upper*(1-1e-4));
toolarge = find([stack.lower]>upper*(1-options.bnb.prunetol));
if ~isempty(toolarge)
stack(toolarge)=[];
end
end
if ~isempty(stack)
lower = min([stack.lower]);
else
lower = upper;
end
function p = adaptivestrategy(p,upper,solved_nodes)
% **********************************'
% SWITCH NODE SELECTION STRATEGY?
% **********************************'
if strcmp(p.options.bnb.method,'depthproject') & (upper<inf)
p.options.bnb.method = 'project';
end
if strcmp(p.options.bnb.method,'depthbest') & (upper<inf)
p.options.bnb.method = 'best';
end
if strcmp(p.options.bnb.method,'depthprojection') & (upper<inf)
p.options.bnb.method = 'projection';
end
if strcmp(p.options.bnb.method,'depthbreadth') & (upper<inf)
p.options.bnb.method = 'breadth';
end
if strcmp(p.options.bnb.method,'depthest') & (upper<inf)
p.options.bnb.method = 'est';
end
function res = resids(p,x)
res= [];
if p.K.f>0
res = -abs(p.F_struc(1:p.K.f,:)*[1;x]);
end
if p.K.l>0
res = [res;p.F_struc(p.K.f+1:p.K.f+p.K.l,:)*[1;x]];
end
if (length(p.K.s)>1) | p.K.s>0
top = 1+p.K.f+p.K.l;
for i = 1:length(p.K.s)
n = p.K.s(i);
X = p.F_struc(top:top+n^2-1,:)*[1;x];top = top+n^2;
X = reshape(X,n,n);
res = [res;min(eig(X))];
end
end
res = [res;min([p.ub-x;x-p.lb])];
function p = Updatecostbound(p,upper,lower);
if p.simplecost
if ~isinf(upper)
ind = find(p.c);
if p.c(ind)>0
p.ub(ind) = min(p.ub(ind),(upper-p.f)/p.c(ind));
else
p.lb(ind) = max(p.lb(ind),(p.f-upper)/abs(p.c(ind)));
end
end
end
function [x_min,upper] = initializesolution(p);
x_min = zeros(length(p.c),1);
upper = inf;
if p.options.usex0
z = p.x0;
residual = resids(p,z);
relaxed_feasible = all(residual(1:p.K.f)>=-1e-12) & all(residual(1+p.K.f:end)>=-1e-6);
if relaxed_feasible & all(z(p.integer_variables)==fix(z(p.integer_variables))) & all(z(p.binary_variables)==fix(z(p.binary_variables)))
upper = computecost(p.f,p.corig,p.Q,z,p);%upper = p.f+p.c'*z+z'*p.Q*z;
x_min = z;
end
else
p.x0 = zeros(length(p.c),1);
x = p.x0;
z = evaluate_nonlinear(p,x);
residual = resids(p,z);
relaxed_feasible = all(residual(1:p.K.f)>=-p.options.bmibnb.eqtol) & all(residual(1+p.K.f:end)>=p.options.bmibnb.pdtol);
if relaxed_feasible
upper = computecost(p.f,p.corig,p.Q,z,p);%upper = p.f+p.c'*z+z'*p.Q*z;
x_min = x;
end
end
function [p,poriginal,stack] = pruneglobally(p,poriginal,upper,lower,stack,x);
if isempty(p.nonlinear) & (nnz(p.Q)==0) & p.options.bnb.nodetight
pp = poriginal;
if p.K.l > 0
A = -pp.F_struc(1+pp.K.f:pp.K.f+pp.K.l,2:end);
b = pp.F_struc(1+p.K.f:p.K.f+p.K.l,1);
else
A = [];
b = [];
end
if (nnz(p.Q)==0) & ~isinf(upper)
A = [pp.c';-pp.c';A];
b = [upper;-(lower-0.0001);b];
else
% c = p.c;
% Q = p.Q;
% A = [c'+2*x'*Q;A];
% b = [2*x'*Q*x+c'*x;b];
end
[lb,ub,redundant,pss] = milppresolve(A,b,pp.lb,pp.ub,pp.integer_variables,pp.binary_variables,ones(length(pp.lb),1));
if ~isempty(redundant)
if (nnz(p.Q)==0) & ~isinf(upper)
redundant = redundant(redundant>2)-2;
else
% redundant = redundant(redundant>1)-1;
end
if length(redundant)>0
poriginal.K.l=poriginal.K.l-length(redundant);
poriginal.F_struc(poriginal.K.f+redundant,:)=[];
p.K.l=p.K.l-length(redundant);
p.F_struc(p.K.f+redundant,:)=[];
end
end
if ~isempty(stack)
keep = ones(length(stack),1);
for i = 1:length(stack)
stack(i).lb = max([stack(i).lb lb]')';
stack(i).ub = min([stack(i).ub ub]')';
if any(stack(i).lb>stack(i).ub)
keep(i) = 0;
end
end
stack = stack(find(keep));
end
poriginal.lb = max([poriginal.lb lb]')';
poriginal.ub = min([poriginal.ub ub]')';
p.lb = max([p.lb lb]')';
p.ub = min([p.ub ub]')';
end
function [p,poriginal,stack] = fixvariables(p,poriginal,upper,lower,stack,x_min,monotinicity)
% Fix variables
if p.options.bnb.nodefix & (p.K.f == 0) & (nnz(p.Q)==0) & isempty(p.nonlinear)
A = -poriginal.F_struc(poriginal.K.f + (1:poriginal.K.l),2:end);
b = poriginal.F_struc(poriginal.K.f + (1:poriginal.K.l),1);
c = poriginal.c;
[fix_up,fix_down] = presolve_fixvariables(A,b,c,poriginal.lb,poriginal.ub,monotinicity);
%
poriginal.lb(fix_up) = 1;
p.lb(fix_up) = 1;
% not_in_obj = find(p.c==0);
% constrained_blow = all(poriginal.F_struc(1:poriginal.K.l,1+not_in_obj)>=0,1);
% sdp_positive = sdpmonotinicity(not_in_obj) == -1;
% can_fix = not_in_obj(find(constrained_blow & sdp_positive));
%
% still_on = find(p.lb==0 & p.ub==1);
% p.lb(intersect(can_fix,still_on)) = 1;
% still_on = find(poriginal.lb==0 & poriginal.ub==1);
% poriginal.lb(intersect(can_fix,still_on)) = 1;
if ~isempty(stack) & ~(isempty(fix_up) & isempty(fix_down))
keep = ones(length(stack),1);
for i = 1:length(stack)
stack(i).lb = max([stack(i).lb poriginal.lb]')';
stack(i).ub = min([stack(i).ub poriginal.ub]')';
if any(stack(i).lb>stack(i).ub)
keep(i) = 0;
end
end
stack = stack(find(keep));
end
end
function [feasible,p] = checkmusts(p)
feasible = 1;
if ~isempty(p.Musts)
% for i = 1:size(p.Musts,1)
% if all(p.ub(find(p.Musts(i,:)))==0)
% 1
% end
% end
%
% TurnedOff = find(p.ub == 0);
% if ~isempty(TurnedOff)
% p.Musts(:,TurnedOff) = 0;
% Failure = find(sum(p.Musts,2)==0);
% if ~isempty(Failure)
% 1;%feasible = 0;
% return
% end
% TurnedOn = find(sum(p.Musts,2)==1);
% if ~isempty(TurnedOn)
% % p.lb(TurnedOn) = 1;
% end
% end
end
function stack = prunecardinality(p,poriginal,stack,lower,upper)
% if length(poriginal.binary_variables)==length(poriginal.c) & nnz(poriginal.Q)==0 & all(poriginal.c)>0
% card_max_low = lower/max(p.c);
% card_max_high = upper/min(p.c);
% [card_max_low card_max_high]
% keep = ones(1,length(stack));
% for i = 1:length(stack)
% fixed_zero = nnz(stack(i).ub==0);
% fixed_one = nnz(stack(i).lb==1);
% if fixed_one>= card_max_high
% 1
% end
% end
% end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
update_monomial_bounds.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/update_monomial_bounds.m
| 2,544 |
utf_8
|
d5ad776f01d587f1e7c5b70478d633e6
|
function model = update_monomial_bounds(model,these)
if nargin == 1 & all(model.variabletype<=2) & any(model.variabletype)
% Fast code for purely quadratic case
x = model.bilinears(:,2);
y = model.bilinears(:,3);
z = model.bilinears(:,1);
corners = [model.lb(x).*model.lb(y) model.ub(x).*model.lb(y) model.lb(x).*model.ub(y) model.ub(x).*model.ub(y)];
maxz = max(corners,[],2);
minz = min(corners,[],2);
model.lb(z) = max(model.lb(z),minz);
model.ub(z) = min(model.ub(z),maxz);
return
end
if nargin == 1
polynomials = find((model.variabletype ~= 0));
else
polynomials = find((model.variabletype ~= 0));
polynomials = polynomials(find(ismember(polynomials,these)));
end
for i = 1:length(polynomials)
j = polynomials(i);
if j<=length(model.lb)
monomials = model.monomtable(j,:);
bound = powerbound(model.lb,model.ub,monomials);
model.lb(j) = max(model.lb(j),bound(1));
model.ub(j) = min(model.ub(j),bound(2));
[inversebound,var] = inversepowerbound(model.lb,model.ub,monomials, polynomials(i));
if ~isempty(var)
model.lb(var) = max(model.lb(var),inversebound(1));
model.ub(var) = min(model.ub(var),inversebound(2));
end
end
end
function [inversebound,var] = inversepowerbound(lb,ub,monomials,polynomial);
inversebound = [];
var = [];
[i,var,val] = find(monomials);
if all(val == fix(val)) & all(val >= 0)
if length(var) == 1
if even(val)
if val > 2
inversebound = [-inf inf];
aux = inf;
if ~isinf(lb(polynomial))
if lb(polynomial) >= 0
aux = lb(polynomial)^(1/val);
end
end
if ~isinf(ub(polynomial))
if ub(polynomial) >= 0
aux = max(aux,ub(polynomial)^(1/val));
end
else
aux = inf;
end
inversebound = [-aux aux];
else
var = [];
end
elseif val >= 3
inversebound = [-inf inf];
if ~isinf(lb(polynomial))
inversebound(1,1) = sign(lb(polynomial))*abs(lb(polynomial))^(1/val);
end
if ~isinf(ub(polynomial))
inversebound(1,2) = sign(ub(polynomial))*abs(ub(polynomial))^(1/val);
end
end
else
var = [];
end
else
var = [];
end
|
github
|
EnricoGiordano1992/LMI-Matlab-master
|
updatebounds_recursive_evaluation.m
|
.m
|
LMI-Matlab-master/yalmip/modules/global/updatebounds_recursive_evaluation.m
| 1,128 |
utf_8
|
ea7474325c03f63f6c8e286a9ab08de8
|
function p = updatebounds_recursive_evaluation(p)
if p.changedbounds
if isempty(p.evalMap) & all(p.variabletype <= 2)
% Bilinear/quadratic case can be done much faster
p = updatemonomialbounds(p);
else
for i = 1:length(p.evaluation_scheme)
switch p.evaluation_scheme{i}.group
case 'eval'
for j = 1:length(p.evaluation_scheme{i}.variables)
p = update_one_eval_bound(p,j);
p = update_one_inverseeval_bound(p,j);
end
case 'monom'
for j = 1:length(p.evaluation_scheme{i}.variables)
p = update_one_monomial_bound(p,j);
end
otherwise
end
end
end
% This flag is turned on if a bound tightening funtion manages to
% tighten the bounds
p.changedbounds = 0;
end
function p = update_one_monomial_bound(p,indicies);
j = p.monomials(indicies);
bound = powerbound(p.lb,p.ub,p.monomtable(j,:));
p.lb(j) = max(p.lb(j),bound(1));
p.ub(j) = min(p.ub(j),bound(2));
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.