plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
lcnbeapp/beapp-master
|
lik_inputdependentweibull.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_inputdependentweibull.m
| 17,398 |
windows_1250
|
c288cecc00237a7f8b7ae149561644c4
|
function lik = lik_inputdependentweibull(varargin)
%LIK_INPUTDEPENDENTWEIBULL Create a right censored input dependent Weibull likelihood structure
%
% Description
% LIK = LIK_INPUTDEPENDENTWEIBULL('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a likelihood structure for right censored input dependent
% Weibull survival model in which the named parameters have the
% specified values. Any unspecified parameters are set to default
% values.
%
% LIK = LIK_INPUTDEPENDENTWEIBULL(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for Weibull likelihood [default]
% shape - shape parameter r [1]
% shape_prior - prior for shape [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f1,f2, z) = || i=1 [ (r*exp(f2_i))^(1-z_i) exp( (1-z_i)*(-f1_i)
% +(1-z_i)*((r*exp(f2_i))-1)*log(y_i)
% -exp(-f1_i)*y_i^(r*exp(f2_i))) ]
%
% where r is the shape parameter of Weibull distribution.
% z is a vector of censoring indicators with z = 0 for uncensored event
% and z = 1 for right censored event. Here the second latent variable f2
% implies the input dependance to the shape parameter in the original
% Weibull likelihood.
%
% When using the Weibull likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2011 Jaakko Riihimäki
% Copyright (c) 2011 Aki Vehtari
% Copyright (c) 2012 Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_INPUTDEPENDENTWEIBULL';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('shape',1, @(x) isscalar(x) && x>0);
ip.addParamValue('shape_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.nondiagW=true;
lik.type = 'Inputdependent-Weibull';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Weibull')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('shape',ip.UsingDefaults)
lik.shape = ip.Results.shape;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('shape_prior',ip.UsingDefaults)
lik.p.shape=ip.Results.shape_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_inputdependentweibull_pak;
lik.fh.unpak = @lik_inputdependentweibull_unpak;
lik.fh.lp = @lik_inputdependentweibull_lp;
lik.fh.lpg = @lik_inputdependentweibull_lpg;
lik.fh.ll = @lik_inputdependentweibull_ll;
lik.fh.llg = @lik_inputdependentweibull_llg;
lik.fh.llg2 = @lik_inputdependentweibull_llg2;
lik.fh.llg3 = @lik_inputdependentweibull_llg3;
lik.fh.invlink = @lik_inputdependentweibull_invlink;
lik.fh.predy = @lik_inputdependentweibull_predy;
lik.fh.recappend = @lik_inputdependentweibull_recappend;
end
end
function [w,s] = lik_inputdependentweibull_pak(lik)
%LIK_INPUTDEPENDENTWEIBULL_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_INPUTDEPENDENTWEIBULL_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.shape)
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.shape)
w = log(lik.shape);
s = [s; 'log(weibull.shape)'];
[wh sh] = lik.p.shape.fh.pak(lik.p.shape);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_inputdependentweibull_unpak(lik, w)
%LIK_INPUTDEPENDENTWEIBULL_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_INPUTDEPENDENTWEIBULL_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.shape)
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_PAK, GP_UNPAK
if ~isempty(lik.p.shape)
lik.shape = exp(w(1));
w = w(2:end);
[p, w] = lik.p.shape.fh.unpak(lik.p.shape, w);
lik.p.shape = p;
end
end
function lp = lik_inputdependentweibull_lp(lik, varargin)
%LIK_INPUTDEPENDENTWEIBULL_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_INPUTDEPENDENTWEIBULL_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_E
% If prior for shape parameter, add its contribution
lp=0;
if ~isempty(lik.p.shape)
lp = lik.p.shape.fh.lp(lik.shape, lik.p.shape) +log(lik.shape);
end
end
function lpg = lik_inputdependentweibull_lpg(lik)
%LIK_INPUTDEPENDENTWEIBULL_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_INPUTDEPENDENTWEIBULL_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters.
% This subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.shape)
% Evaluate the gprior with respect to shape
ggs = lik.p.shape.fh.lpg(lik.shape, lik.p.shape);
lpg = ggs(1).*lik.shape + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_inputdependentweibull_ll(lik, y, ff, z)
%LIK_INPUTDEPENDENTWEIBULL_LL Log likelihood
%
% Description
% LL = LIK_INPUTDEPENDENTWEIBULL_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_E
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_ll: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
a = lik.shape;
ll = sum((1-z).*(log(a*expf2) + (a*expf2-1).*log(y)-f1) - exp(-f1).*y.^(a*expf2));
end
function llg = lik_inputdependentweibull_llg(lik, y, ff, param, z)
%LIK_INPUTDEPENDENTWEIBULL_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_INPUTDEPENDENTWEIBULL_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_LLG2, LIK_INPUTDEPENDENTWEIBULL_LLG3, GPLA_E
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_llg: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
a = lik.shape;
switch param
case 'param'
llg = sum((1-z).*(1./a + expf2.*log(y)) - exp(-f1).*y.^(a.*expf2).*log(y).*expf2);
% correction for the log transformation
llg = llg.*lik.shape;
case 'latent'
llg1 = -(1-z) + exp(-f1).*y.^(a.*expf2);
llg2 = (1-z).*(1 + a.*expf2.*log(y)) - exp(-f1).*y.^(a.*expf2).*log(y).*a.*expf2;
llg = [llg1; llg2];
end
end
function llg2 = lik_inputdependentweibull_llg2(lik, y, ff, param, z)
%LIK_INPUTDEPENDENTWEIBULL_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_INPUTDEPENDENTWEIBULL_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, GPLA_E
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_llg2: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
a = lik.shape;
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
switch param
case 'param'
case 'latent'
t1=exp(-f1).*y.^(a.*expf2);
t2=log(y).*a.*expf2;
t3=t1.*t2;
llg2_11 = -t1;
llg2_12 = t3;
llg2_22 = (1-z).*t2 - (t2 + 1).*t3;
llg2 = [llg2_11 llg2_12; llg2_12 llg2_22];
case 'latent+param'
t1=expf2.*log(y);
t2=exp(-f1).*y.^(a.*expf2);
t3=t1.*t2;
llg2_1 = t3;
llg2_2 = (1-z).*t1 - (t1.*a + 1).*t3;
llg2 = [llg2_1; llg2_2];
% correction due to the log transformation
llg2 = llg2.*lik.shape;
end
end
function llg3 = lik_inputdependentweibull_llg3(lik, y, ff, param, z)
%LIK_INPUTDEPENDENTWEIBULL_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_INPUTDEPENDENTWEIBULL_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_llg3: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
a = lik.shape;
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
switch param
case 'param'
case 'latent'
t1=a.*expf2.*log(y);
t2=exp(-f1).*y.^(a.*expf2);
t3=t2.*t1;
t4=t3.*t1;
nl=2;
llg3=zeros(nl,nl,nl,n);
llg3(1,1,1,:) = t2;
llg3(2,2,1,:) = t4 + t3;
llg3(2,1,2,:) = llg3(2,2,1,:);
llg3(1,2,2,:) = llg3(2,2,1,:);
llg3(2,1,1,:) = -t3;
llg3(1,2,1,:) = llg3(2,1,1,:);
llg3(1,1,2,:) = llg3(2,1,1,:);
llg3(2,2,2,:) = (1-z).*t1 - t4.*t1 - 3.*t4 - t3;
case 'latent2+param'
t1 = log(y).*expf2;
t2 = exp(-f1).*y.^(a*expf2);
t3 = t2.*t1;
t4 = t3.*t1;
llg3_11 = -t3;
llg3_12 = a.*t4 + t3;
llg3_22 = (1-z).*t1 - a.^2.*t4.*t1 - 3.*a.*t4 - t3;
llg3 = [diag(llg3_11) diag(llg3_12); diag(llg3_12) diag(llg3_22)];
% correction due to the log transformation
llg3 = llg3.*lik.shape;
end
end
function [lpy, Ey, Vary] = lik_inputdependentweibull_predy(lik, Ef, Varf, yt, zt)
%LIK_INPUTDEPENDENTWEIBULL_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_INPUTDEPENDENTWEIBULL_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the survival times YT, censoring indicators ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_INPUTDEPENDENTWEIBULL_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_predy: missing zt!'...
'Weibull likelihood needs the censoring '...
'indicators as an extra input zt. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
yc = 1-zt;
r = lik.shape;
Ef = Ef(:);
ntest = 0.5*size(Ef,1);
Ef1=Ef(1:ntest); Ef2=Ef(ntest+1:end);
% Varf1=squeeze(Varf(1,1,:)); Varf2=squeeze(Varf(2,2,:));
if size(Varf,2) == size(Varf,1)
Varf1=diag(Varf(1:ntest,1:ntest));Varf2=diag(Varf(ntest+1:end,ntest+1:end));
else
Varf1=Varf(:,1); Varf2=Varf(:,2);
end
Ey=[];
Vary=[];
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i2=1:ntest
m1=Ef1(i2); m2=Ef2(i2);
s1=sqrt(Varf1(i2)); s2=sqrt(Varf2(i2));
% Function handle for Weibull * Gaussian_f1 * Gaussian_f2
pd=@(f1,f2) exp(yc(i2).*((log(r) + f2) + (r.*exp(f2)-1).*log(yt(i2))-f1) - exp(-f1).*yt(i2).^(r*exp(f2))) ...
.*norm_pdf(f1,Ef1(i2),sqrt(Varf1(i2))).*norm_pdf(f2,Ef2(i2),sqrt(Varf2(i2)));
% Integrate over latent variables
lpy(i2) = log(dblquad(pd, m1-6.*s1, m1+6.*s1, m2-6.*s2, m2+6.*s2));
end
end
function p = lik_inputdependentweibull_invlink(lik, f)
%LIK_INPUTDEPENDENTWEIBULL Returns values of inverse link function
%
% Description
% P = LIK_INPUTDEPENDENTWEIBULL_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_PREDY
p = exp(f);
end
function reclik = lik_inputdependentweibull_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_INPUTDEPENDENTWEIBULL__RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Inputdependent-Weibull';
reclik.nondiagW=true;
% Initialize parameter
% reclik.shape = [];
% Set the function handles
reclik.fh.pak = @lik_inputdependentweibull_pak;
reclik.fh.unpak = @lik_inputdependentweibull_unpak;
reclik.fh.lp = @lik_t_lp;
reclik.fh.lpg = @lik_t_lpg;
reclik.fh.ll = @lik_inputdependentweibull_ll;
reclik.fh.llg = @lik_inputdependentweibull_llg;
reclik.fh.llg2 = @lik_inputdependentweibull_llg2;
reclik.fh.llg3 = @lik_inputdependentweibull_llg3;
reclik.fh.invlink = @lik_inputdependentweibull_invlink;
reclik.fh.predy = @lik_inputdependentweibull_predy;
reclik.fh.recappend = @lik_inputdependentweibull_recappend;
reclik.p=[];
reclik.p.shape=[];
if ~isempty(ri.p.shape)
reclik.p.shape = ri.p.shape;
end
else
% Append to the record
reclik.shape(ri,:)=lik.shape;
if ~isempty(lik.p.shape)
reclik.p.shape = lik.p.shape.fh.recappend(reclik.p.shape, ri, lik.p.shape);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpep_pred.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpep_pred.m
| 22,353 |
UNKNOWN
|
9fdd2594b62565a6cf43df3eb051355a
|
function [Eft, Varft, lpyt, Eyt, Varyt] = gpep_pred(gp, x, y, varargin)
%GPEP_PRED Predictions with Gaussian Process EP approximation
%
% Description
% [EFT, VARFT] = GPEP_PRED(GP, X, Y, XT, OPTIONS)
% takes a GP structure together with matrix X of training
% inputs and vector Y of training targets, and evaluates the
% predictive distribution at test inputs XT. Returns a posterior
% mean EFT and variance VARFT of latent variables.
%
% [EFT, VARFT, LPYT] = GPEP_PRED(GP, X, Y, XT, 'yt', YT, OPTIONS)
% returns also logarithm of the predictive density LPYT of the
% observations YT at test input locations XT. This can be used
% for example in the cross-validation. Here Y has to be a vector.
%
% [EFT, VARFT, LPYT, EYT, VARYT] = GPEP_PRED(GP, X, Y, XT, OPTIONS)
% returns also the posterior predictive mean EYT and variance VARYT.
%
% [EF, VARF, LPY, EY, VARY] = GPEP_PRED(GP, X, Y, OPTIONS)
% evaluates the predictive distribution at training inputs X
% and logarithm of the predictive density LPY of the training
% observations Y.
%
% OPTIONS is optional parameter-value pair
% predcf - an index vector telling which covariance functions are
% used for prediction. Default is all (1:gpcfn).
% See additional information below.
% tstind - a vector/cell array defining, which rows of X belong
% to which training block in *IC type sparse models.
% Default is []. In case of PIC, a cell array
% containing index vectors specifying the blocking
% structure for test data. IN FIC and CS+FIC a
% vector of length n that points out the test inputs
% that are also in the training set (if none, set
% TSTIND = [])
% yt - optional observed yt in test points (see below)
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected value
% for ith case.
% zt - optional observed quantity in triplet (xt_i,yt_i,zt_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, the expected
% value for the ith case.
%
% NOTE! In case of FIC and PIC sparse approximation the
% prediction for only some PREDCF covariance functions is just
% an approximation since the covariance functions are coupled in
% the approximation and are not strictly speaking additive
% anymore.
%
% For example, if you use covariance such as K = K1 + K2 your
% predictions Eft1 = gpep_pred(GP, X, Y, X, 'predcf', 1) and
% Eft2 = gpep_pred(gp, x, y, x, 'predcf', 2) should sum up to
% Eft = gpep_pred(gp, x, y, x). That is Eft = Eft1 + Eft2. With
% FULL model this is true but with FIC and PIC this is true only
% approximately. That is Eft \approx Eft1 + Eft2.
%
% With CS+FIC the predictions are exact if the PREDCF covariance
% functions are all in the FIC part or if they are CS
% covariances.
%
% NOTE! When making predictions with a subset of covariance
% functions with FIC approximation the predictive variance can
% in some cases be ill-behaved i.e. negative or unrealistically
% small. This may happen because of the approximative nature of
% the prediction.
%
% See also
% GPEP_E, GPEP_G, GP_PRED, DEMO_SPATIAL, DEMO_CLASSIFIC
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Heikki Peura
% Copyright (c) 2011 Pasi Jyl�nki
% Copyright (c) 2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPEP_PRED';
ip.addRequired('gp', @isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addOptional('xt', [], @(x) isempty(x) || (isreal(x) && all(isfinite(x(:)))))
ip.addParamValue('yt', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('zt', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('predcf', [], @(x) isempty(x) || ...
isvector(x) && isreal(x) && all(isfinite(x)&x>0))
ip.addParamValue('tstind', [], @(x) isempty(x) || iscell(x) ||...
(isvector(x) && isreal(x) && all(isfinite(x)&x>0)))
if numel(varargin)==0 || isnumeric(varargin{1})
% inputParser should handle this, but it doesn't
ip.parse(gp, x, y, varargin{:});
else
ip.parse(gp, x, y, [], varargin{:});
end
xt=ip.Results.xt;
yt=ip.Results.yt;
z=ip.Results.z;
zt=ip.Results.zt;
predcf=ip.Results.predcf;
tstind=ip.Results.tstind;
if isempty(xt)
xt=x;
if isempty(tstind)
if iscell(gp)
gptype=gp{1}.type;
else
gptype=gp.type;
end
switch gptype
case {'FULL' 'VAR' 'DTC' 'SOR'}
tstind = [];
case {'FIC' 'CS+FIC'}
tstind = 1:size(x,1);
case 'PIC'
if iscell(gp)
tstind = gp{1}.tr_index;
else
tstind = gp.tr_index;
end
end
end
if isempty(yt)
yt=y;
end
if isempty(zt)
zt=z;
end
end
[tn, tnin] = size(x);
switch gp.type
% ============================================================
% FULL
% ============================================================
case 'FULL' % Predictions with FULL GP model
[e, edata, eprior, tautilde, nutilde, L] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
[K, C]=gp_trcov(gp,x);
kstarstar = gp_trvar(gp, xt, predcf);
ntest=size(xt,1);
K_nf=gp_cov(gp,xt,x,predcf);
[n,nin] = size(x);
if all(tautilde > 0) && ~isequal(gp.latent_opt.optim_method, 'robust-EP')
% This is the usual case where likelihood is log concave
% for example, Poisson and probit
sqrttautilde = sqrt(tautilde);
Stildesqroot = sparse(1:n, 1:n, sqrttautilde, n, n);
if ~isfield(gp,'meanf')
if issparse(L) % If compact support covariance functions are used
% the covariance matrix will be sparse
z=Stildesqroot*ldlsolve(L,Stildesqroot*(C*nutilde));
else
z=Stildesqroot*(L'\(L\(Stildesqroot*(C*nutilde))));
end
Eft=K_nf*(nutilde-z); % The mean, zero mean GP
else
z = Stildesqroot*(L'\(L\(Stildesqroot*(C))));
Eft_zm=K_nf*(nutilde-z*nutilde); % The mean, zero mean GP
Ks = eye(size(z)) - z; % inv(K + S^-1)*S^-1
Ksy = Ks*nutilde;
[RB RAR] = mean_predf(gp,x,xt,K_nf',Ks,Ksy,'EP',Stildesqroot.^2);
Eft = Eft_zm + RB; % The mean
end
% Compute variance
if nargout > 1
if issparse(L)
V = ldlsolve(L, Stildesqroot*K_nf');
Varft = kstarstar - sum(K_nf.*(Stildesqroot*V)',2);
else
V = (L\Stildesqroot)*K_nf';
Varft = kstarstar - sum(V.^2)';
end
if isfield(gp,'meanf')
Varft = Varft + RAR;
end
end
else
% We might end up here if the likelihood is not log concave
% For example Student-t likelihood.
%{
z=tautilde.*(L'*(L*nutilde));
Eft=K_nf*(nutilde-z);
if nargout > 1
S = diag(tautilde);
V = K_nf*S*L';
Varft = kstarstar - sum((K_nf*S).*K_nf,2) + sum(V.^2,2);
end
%}
% An alternative implementation for avoiding negative variances
[Eft,V]=pred_var(tautilde,K,K_nf,nutilde);
Varft=kstarstar-V;
end
% ============================================================
% FIC
% ============================================================
case 'FIC' % Predictions with FIC sparse approximation for GP
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
% Here tstind = 1 if the prediction is made for the training set
if nargin > 6
if ~isempty(tstind) && length(tstind) ~= size(x,1)
error('tstind (if provided) has to be of same lenght as x.')
end
else
tstind = [];
end
u = gp.X_u;
m = size(u,1);
K_fu = gp_cov(gp,x,u,predcf); % f x u
K_nu=gp_cov(gp,xt,u,predcf);
K_uu = gp_trcov(gp,u,predcf); % u x u, noiseless covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
kstarstar=gp_trvar(gp,xt,predcf);
if all(tautilde > 0) && ~isequal(gp.latent_opt.optim_method, 'robust-EP')
% From this on evaluate the prediction
% See Snelson and Ghahramani (2007) for details
% p=iLaKfu*(A\(iLaKfu'*mutilde));
p = b';
ntest=size(xt,1);
Eft = K_nu*(K_uu\(K_fu'*p));
% if the prediction is made for training set, evaluate Lav also for prediction points
if ~isempty(tstind)
[Kv_ff, Cv_ff] = gp_trvar(gp, xt(tstind,:), predcf);
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Kv_ff-Qv_ff;
Eft(tstind) = Eft(tstind) + Lav.*p;
end
% Compute variance
if nargout > 1
%Varft(i1,1)=kstarstar(i1) - (sum(Knf(i1,:).^2./La') - sum((Knf(i1,:)*L).^2));
Luu = chol(K_uu)';
B=Luu\(K_fu');
B2=Luu\(K_nu');
Varft = kstarstar - sum(B2'.*(B*(repmat(La,1,m).\B')*B2)',2) + sum((K_nu*(K_uu\(K_fu'*L))).^2, 2);
% if the prediction is made for training set, evaluate Lav also for prediction points
if ~isempty(tstind)
Varft(tstind) = Varft(tstind) - 2.*sum( B2(:,tstind)'.*(repmat((La.\Lav),1,m).*B'),2) ...
+ 2.*sum( B2(:,tstind)'*(B*L).*(repmat(Lav,1,m).*L), 2) ...
- Lav./La.*Lav + sum((repmat(Lav,1,m).*L).^2,2);
end
end
else
% Robust-EP
[Eft,V]=pred_var2(tautilde,nutilde,L,K_uu,K_fu,b,K_nu);
Varft=kstarstar-V;
end
% ============================================================
% PIC
% ============================================================
case {'PIC' 'PIC_BLOCK'} % Predictions with PIC sparse approximation for GP
% Calculate some help matrices
u = gp.X_u;
ind = gp.tr_index;
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
K_fu = gp_cov(gp, x, u, predcf); % f x u
K_nu = gp_cov(gp, xt, u, predcf); % n x u
K_uu = gp_trcov(gp, u, predcf); % u x u, noiseles covariance K_uu
% From this on evaluate the prediction
% See Snelson and Ghahramani (2007) for details
% p=iLaKfu*(A\(iLaKfu'*mutilde));
p = b';
iKuuKuf = K_uu\K_fu';
w_bu=zeros(length(xt),length(u));
w_n=zeros(length(xt),1);
for i=1:length(ind)
w_bu(tstind{i},:) = repmat((iKuuKuf(:,ind{i})*p(ind{i},:))', length(tstind{i}),1);
K_nf = gp_cov(gp, xt(tstind{i},:), x(ind{i},:), predcf); % n x u
w_n(tstind{i},:) = K_nf*p(ind{i},:);
end
Eft = K_nu*(iKuuKuf*p) - sum(K_nu.*w_bu,2) + w_n;
% Compute variance
if nargout > 1
kstarstar = gp_trvar(gp, xt, predcf);
KnfL = K_nu*(iKuuKuf*L);
Varft = zeros(length(xt),1);
for i=1:length(ind)
v_n = gp_cov(gp, xt(tstind{i},:), x(ind{i},:), predcf); % n x u
v_bu = K_nu(tstind{i},:)*iKuuKuf(:,ind{i});
KnfLa = K_nu*(iKuuKuf(:,ind{i})/chol(La{i}));
KnfLa(tstind{i},:) = KnfLa(tstind{i},:) - (v_bu + v_n)/chol(La{i});
Varft = Varft + sum((KnfLa).^2,2);
KnfL(tstind{i},:) = KnfL(tstind{i},:) - v_bu*L(ind{i},:) + v_n*L(ind{i},:);
end
Varft = kstarstar - (Varft - sum((KnfL).^2,2));
end
% ============================================================
% CS+FIC
% ============================================================
case 'CS+FIC' % Predictions with CS+FIC sparse approximation for GP
% Here tstind = 1 if the prediction is made for the training set
if nargin > 6
if ~isempty(tstind) && length(tstind) ~= size(x,1)
error('tstind (if provided) has to be of same lenght as x.')
end
else
tstind = [];
end
u = gp.X_u;
m = length(u);
n = size(x,1);
n2 = size(xt,1);
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
% Indexes to all non-compact support and compact support covariances.
cf1 = [];
cf2 = [];
% Indexes to non-CS and CS covariances, which are used for predictions
predcf1 = [];
predcf2 = [];
ncf = length(gp.cf);
% Loop through all covariance functions
for i = 1:ncf
% Non-CS covariances
if ~isfield(gp.cf{i},'cs')
cf1 = [cf1 i];
% If used for prediction
if ~isempty(find(predcf==i))
predcf1 = [predcf1 i];
end
% CS-covariances
else
cf2 = [cf2 i];
% If used for prediction
if ~isempty(find(predcf==i))
predcf2 = [predcf2 i];
end
end
end
if isempty(predcf1) && isempty(predcf2)
predcf1 = cf1;
predcf2 = cf2;
end
% Determine the types of the covariance functions used
% in making the prediction.
if ~isempty(predcf1) && isempty(predcf2) % Only non-CS covariances
ptype = 1;
predcf2 = cf2;
elseif isempty(predcf1) && ~isempty(predcf2) % Only CS covariances
ptype = 2;
predcf1 = cf1;
else % Both non-CS and CS covariances
ptype = 3;
end
K_fu = gp_cov(gp,x,u,predcf1); % f x u
K_uu = gp_trcov(gp,u,predcf1); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
K_nu=gp_cov(gp,xt,u,predcf1);
Kcs_nf = gp_cov(gp, xt, x, predcf2);
p = b';
ntest=size(xt,1);
% Calculate the predictive mean according to the type of
% covariance functions used for making the prediction
if ptype == 1
Eft = K_nu*(K_uu\(K_fu'*p));
elseif ptype == 2
Eft = Kcs_nf*p;
else
Eft = K_nu*(K_uu\(K_fu'*p)) + Kcs_nf*p;
end
% evaluate also Lav if the prediction is made for training set
if ~isempty(tstind)
[Kv_ff, Cv_ff] = gp_trvar(gp, xt(tstind,:), predcf1);
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Kv_ff-Qv_ff;
end
% Add also Lav if the prediction is made for training set
% and non-CS covariance function is used for prediction
if ~isempty(tstind) && (ptype == 1 || ptype == 3)
Eft(tstind) = Eft(tstind) + Lav.*p;
end
% Evaluate the variance
if nargout > 1
Knn_v = gp_trvar(gp,xt,predcf);
Luu = chol(K_uu)';
B=Luu\(K_fu');
B2=Luu\(K_nu');
p = amd(La);
iLaKfu = La\K_fu;
% Calculate the predictive variance according to the type
% covariance functions used for making the prediction
if ptype == 1 || ptype == 3
% FIC part of the covariance
Varft = Knn_v - sum(B2'.*(B*(La\B')*B2)',2) + sum((K_nu*(K_uu\(K_fu'*L))).^2, 2);
% Add Lav2 if the prediction is made for the training set
if ~isempty(tstind)
% Non-CS covariance
if ptype == 1
Kcs_nf = sparse(tstind,1:n,Lav,n2,n);
% Non-CS and CS covariances
else
Kcs_nf = Kcs_nf + sparse(tstind,1:n,Lav,n2,n);
end
% Add Lav2 inside Kcs_nf
Varft = Varft - sum((Kcs_nf(:,p)/chol(La(p,p))).^2,2) + sum((Kcs_nf*L).^2, 2) ...
- 2.*sum((Kcs_nf*iLaKfu).*(K_uu\K_nu')',2) + 2.*sum((Kcs_nf*L).*(L'*K_fu*(K_uu\K_nu'))' ,2);
% In case of both non-CS and CS prediction covariances add
% only Kcs_nf if the prediction is not done for the training set
elseif ptype == 3
Varft = Varft - sum((Kcs_nf(:,p)/chol(La(p,p))).^2,2) + sum((Kcs_nf*L).^2, 2) ...
- 2.*sum((Kcs_nf*iLaKfu).*(K_uu\K_nu')',2) + 2.*sum((Kcs_nf*L).*(L'*K_fu*(K_uu\K_nu'))' ,2);
end
% Prediction with only CS covariance
elseif ptype == 2
Varft = Knn_v - sum((Kcs_nf(:,p)/chol(La(p,p))).^2,2) + sum((Kcs_nf*L).^2, 2) ;
end
end
% ============================================================
% DTC/(VAR)
% ============================================================
case {'DTC' 'VAR' 'SOR'} % Predictions with DTC or variational sparse approximation for GP
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
% Here tstind = 1 if the prediction is made for the training set
if nargin > 6
if ~isempty(tstind) && length(tstind) ~= size(x,1)
error('tstind (if provided) has to be of same lenght as x.')
end
else
tstind = [];
end
u = gp.X_u;
m = size(u,1);
K_fu = gp_cov(gp,x,u,predcf); % f x u
K_nu=gp_cov(gp,xt,u,predcf);
K_uu = gp_trcov(gp,u,predcf); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
kstarstar=gp_trvar(gp,xt,predcf);
% From this on evaluate the prediction
p = b';
ntest=size(xt,1);
Eft = K_nu*(K_uu\(K_fu'*p));
% if the prediction is made for training set, evaluate Lav also for prediction points
if ~isempty(tstind)
[Kv_ff, Cv_ff] = gp_trvar(gp, xt(tstind,:), predcf);
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Kv_ff-Cv_ff;
Eft(tstind) = Eft(tstind);% + Lav.*p;
end
if nargout > 1
% Compute variances of predictions
%Varft(i1,1)=kstarstar(i1) - (sum(Knf(i1,:).^2./La') - sum((Knf(i1,:)*L).^2));
Luu = chol(K_uu)';
B=Luu\(K_fu');
B2=Luu\(K_nu');
Varft = sum(B2'.*(B*(repmat(La,1,m).\B')*B2)',2) + sum((K_nu*(K_uu\(K_fu'*L))).^2, 2);
switch gp.type
case {'VAR' 'DTC'}
Varft = kstarstar - Varft;
case 'SOR'
Varft = sum(B2.^2,1)' - Varft;
end
end
% ============================================================
% SSGP
% ============================================================
case 'SSGP' % Predictions with sparse spectral sampling approximation for GP
% The approximation is proposed by M. Lazaro-Gredilla, J. Quinonero-Candela and A. Figueiras-Vidal
% in Microsoft Research technical report MSR-TR-2007-152 (November 2007)
% NOTE! This does not work at the moment.
[e, edata, eprior, tautilde, nutilde, L, S, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
%param = varargin{1};
Phi_f = gp_trcov(gp, x);
Phi_a = gp_trcov(gp, xt);
m = size(Phi_f,2);
ntest=size(xt,1);
Eft = Phi_a*(Phi_f'*b');
if nargout > 1
% Compute variances of predictions
%Varft(i1,1)=kstarstar(i1) - (sum(Knf(i1,:).^2./La') - sum((Knf(i1,:)*L).^2));
Varft = sum(Phi_a.^2,2) - sum(Phi_a.*((Phi_f'*(repmat(S,1,m).*Phi_f))*Phi_a')',2) + sum((Phi_a*(Phi_f'*L)).^2,2);
for i1=1:ntest
switch gp.lik.type
case 'Probit'
p1(i1,1)=norm_cdf(Eft(i1,1)/sqrt(1+Varft(i1))); % Probability p(y_new=1)
case 'Poisson'
p1 = NaN;
end
end
end
end
% ============================================================
% Evaluate also the predictive mean and variance of new observation(s)
% ============================================================
if nargout == 3
if isempty(yt)
lpyt=[];
else
lpyt = gp.lik.fh.predy(gp.lik, Eft, Varft, yt, zt);
end
elseif nargout > 3
[lpyt, Eyt, Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, yt, zt);
end
end
function [m,S]=pred_var(tau_q,K,A,b)
% helper function for determining
%
% m = A * inv( K+ inv(diag(tau_q)) ) * inv(diag(tau_q)) *b
% S = diag( A * inv( K+ inv(diag(tau_q)) ) * A)
%
% when the site variances tau_q may be negative
%
ii1=find(tau_q>0); n1=length(ii1); W1=sqrt(tau_q(ii1));
ii2=find(tau_q<0); n2=length(ii2); W2=sqrt(abs(tau_q(ii2)));
m=A*b;
b=K*b;
S=zeros(size(A,1),1);
u=0;
U=0;
if ~isempty(ii1)
% Cholesky decomposition for the positive sites
L1=(W1*W1').*K(ii1,ii1);
L1(1:n1+1:end)=L1(1:n1+1:end)+1;
L1=chol(L1);
U = bsxfun(@times,A(:,ii1),W1')/L1;
u = L1'\(W1.*b(ii1));
m = m-U*u;
S = S+sum(U.^2,2);
end
if ~isempty(ii2)
% Cholesky decomposition for the negative sites
V=bsxfun(@times,K(ii2,ii1),W1')/L1;
L2=(W2*W2').*(V*V'-K(ii2,ii2));
L2(1:n2+1:end)=L2(1:n2+1:end)+1;
[L2,pd]=chol(L2);
if pd==0
U = bsxfun(@times,A(:,ii2),W2')/L2 -U*(bsxfun(@times,V,W2)'/L2);
u = L2'\(W2.*b(ii2)) -L2'\(bsxfun(@times,V,W2)*u);
m = m+U*u;
S = S-sum(U.^2,2);
else
fprintf('Posterior covariance is negative definite.\n')
end
end
end
function [m_q,S_q]=pred_var2(tautilde,nutilde,L,K_uu,K_fu,D,K_nu)
% function for determining the parameters of the q-distribution
% when site variances tau_q may be negative
%
% q(f) = N(f|0,K)*exp( -0.5*f'*diag(tau_q)*f + nu_q'*f )/Z_q = N(f|m_q,S_q)
%
% S_q = inv(inv(K)+diag(tau_q)) where K is sparse approximation for prior
% covariance
% m_q = S_q*nu_q;
%
% det(eye(n)+K*diag(tau_q))) = det(L1)^2 * det(L2)^2
% where L1 and L2 are upper triangular
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nutilde);
U = K_fu;
S = 1+tautilde.*D;
B = tautilde./S;
BUiL = bsxfun(@times, B, U)/L';
% iKS = diag(B) - BUiL*BUiL';
Ktnu = D.*nutilde + U*(K_uu\(U'*nutilde));
m_q = nutilde - B.*Ktnu + BUiL*(BUiL'*Ktnu);
kstar = K_nu*(K_uu\K_fu');
m_q = kstar*m_q;
S_q = sum(bsxfun(@times,B',kstar.^2),2) - sum((kstar*BUiL).^2,2);
% S_q = kstar*iKS*kstar';
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_constant.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_constant.m
| 14,290 |
utf_8
|
a923f90b02c067d618848207c02b8980
|
function gpcf = gpcf_constant(varargin)
%GPCF_CONSTANT Create a constant covariance function
%
% Description
% GPCF = GPCF_CONSTANT('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a squared exponential covariance function structure in
% which the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_CONSTANT(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for constant covariance function [default]
% constSigma2 - magnitude (squared) [0.1]
% constSigma2_prior - prior for constSigma2 [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, MEAN_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihimaki, Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_CONSTANT';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('constSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('constSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_constant';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_constant')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameter
if init || ~ismember('constSigma2',ip.UsingDefaults)
gpcf.constSigma2=ip.Results.constSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('constSigma2_prior',ip.UsingDefaults)
gpcf.p.constSigma2=ip.Results.constSigma2_prior;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_constant_pak;
gpcf.fh.unpak = @gpcf_constant_unpak;
gpcf.fh.lp = @gpcf_constant_lp;
gpcf.fh.lpg = @gpcf_constant_lpg;
gpcf.fh.cfg = @gpcf_constant_cfg;
gpcf.fh.ginput = @gpcf_constant_ginput;
gpcf.fh.cov = @gpcf_constant_cov;
gpcf.fh.trcov = @gpcf_constant_trcov;
gpcf.fh.trvar = @gpcf_constant_trvar;
gpcf.fh.recappend = @gpcf_constant_recappend;
end
end
function [w, s] = gpcf_constant_pak(gpcf, w)
%GPCF_CONSTANT_PAK Combine GP covariance function parameters into
% one vector.
%
% Description
% W = GPCF_CONSTANT_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.constSigma2)
% (hyperparameters of gpcf.constSigma2)]'
%
% See also
% GPCF_CONSTANT_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.constSigma2)
w = log(gpcf.constSigma2);
s = [s 'log(constant.constSigma2)'];
% Hyperparameters of constSigma2
[wh sh] = gpcf.p.constSigma2.fh.pak(gpcf.p.constSigma2);
w = [w wh];
s = [s sh];
end
end
function [gpcf, w] = gpcf_constant_unpak(gpcf, w)
%GPCF_CONSTANT_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_CONSTANT_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance parameters have been set
% to the values in W. Deletes the values set to GPCF from W
% and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.constSigma2)
% (hyperparameters of gpcf.constSigma2)]'
%
% See also
% GPCF_CONSTANT_PAK
gpp=gpcf.p;
if ~isempty(gpp.constSigma2)
gpcf.constSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.constSigma2.fh.unpak(gpcf.p.constSigma2, w);
gpcf.p.constSigma2 = p;
end
end
function lp = gpcf_constant_lp(gpcf)
%GPCF_CONSTANT_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_CONSTANT_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the
% "real" samples. On the other hand errors are evaluated in the
% W-space so we need take into account also the Jacobian of
% transformation W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpp.constSigma2)
lp = gpp.constSigma2.fh.lp(gpcf.constSigma2, gpp.constSigma2) +log(gpcf.constSigma2);
end
end
function lpg = gpcf_constant_lpg(gpcf)
%GPCF_CONSTANT_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_CONSTANT_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.constSigma2)
lpgs = gpp.constSigma2.fh.lpg(gpcf.constSigma2, gpp.constSigma2);
lpg = [lpg lpgs(1).*gpcf.constSigma2+1 lpgs(2:end)];
end
end
function DKff = gpcf_constant_cfg(gpcf, x, x2, mask, i1)
%GPCF_CONSTANT_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_CONSTANT_CFG(GPCF, X) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X) with respect to th (cell array with matrix
% elements). This is a mandatory subfunction used in gradient
% computations.
%
% DKff = GPCF_CONSTANT_CFG(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_CONSTANT_CFG(GPCF, X, [], MASK)
% takes a covariance function structure GPCF, a matrix X of
% input vectors and returns DKff, the diagonal of gradients of
% covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed when
% using sparse approximations (e.g. FIC).
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LP, GP_G
[n, m] =size(x);
DKff = {};
if nargin==5
% Use memory save option
if i1==0
% Return number of hyperparameters
if ~isempty(gpcf.p.constSigma2)
DKff=1;
else
DKff=0;
end
return
end
end
% Evaluate: DKff{1} = d Kff / d constSigma2
% DKff{2} = d Kff / d coeffSigma2
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
if ~isempty(gpcf.p.constSigma2)
DKff{1}=ones(n)*gpcf.constSigma2;
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_constant -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
if ~isempty(gpcf.p.constSigma2)
DKff{1}=ones([n size(x2,1)])*gpcf.constSigma2;
end
% Evaluate: DKff{1} = d mask(Kff,I) / d constSigma2
% DKff{2...} = d mask(Kff,I) / d coeffSigma2
elseif nargin == 4 || nargin == 5
if ~isempty(gpcf.p.constSigma2)
DKff{1}=ones(n,1)*gpcf.constSigma2; % d mask(Kff,I) / d constSigma2
end
end
if nargin==5
DKff=DKff{1};
end
end
function DKff = gpcf_constant_ginput(gpcf, x, x2, i1)
%GPCF_CONSTANT_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_CONSTANT_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_CONSTANT_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_CONSTANT_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n);
end
end
elseif nargin == 3 || nargin == 4
%K = feval(gpcf.fh.cov, gpcf, x, x2);
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n, size(x2,1));
gprior(ii1) = 0;
end
end
end
if nargin==5
DKff=DKff{1};
end
end
function C = gpcf_constant_cov(gpcf, x1, x2, varargin)
%GP_CONSTANT_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_CONSTANT_COV(GP, TX, X) takes in covariance function
% of a Gaussian process GP and two matrixes TX and X that
% contain input vectors to GP. Returns covariance matrix C.
% Every element ij of C contains covariance between inputs i
% in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
% See also
% GPCF_CONSTANT_TRCOV, GPCF_CONSTANT_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
C = ones(n1,n2)*gpcf.constSigma2;
end
function C = gpcf_constant_trcov(gpcf, x)
%GP_CONSTANT_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_CONSTANT_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_CONSTANT_COV, GPCF_CONSTANT_TRVAR, GP_COV, GP_TRCOV
n =size(x,1);
C = ones(n,n)*gpcf.constSigma2;
end
function C = gpcf_constant_trvar(gpcf, x)
%GP_CONSTANT_TRVAR Evaluate training variance vector
%
% Description
% C = GP_CONSTANT_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_CONSTANT_COV, GP_COV, GP_TRCOV
n =size(x,1);
C = ones(n,1)*gpcf.constSigma2;
end
function reccf = gpcf_constant_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_CONSTANT_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_constant';
% Initialize parameters
reccf.constSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_constant_pak;
reccf.fh.unpak = @gpcf_constant_unpak;
reccf.fh.lp = @gpcf_constant_lp;
reccf.fh.lpg = @gpcf_constant_lpg;
reccf.fh.cfg = @gpcf_constant_cfg;
reccf.fh.cov = @gpcf_constant_cov;
reccf.fh.trcov = @gpcf_constant_trcov;
reccf.fh.trvar = @gpcf_constant_trvar;
reccf.fh.recappend = @gpcf_constant_recappend;
reccf.p=[];
reccf.p.constSigma2=[];
if ~isempty(ri.p.constSigma2)
reccf.p.constSigma2 = ri.p.constSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record constSigma2
reccf.constSigma2(ri,:)=gpcf.constSigma2;
if isfield(gpp,'constSigma2') && ~isempty(gpp.constSigma2)
reccf.p.constSigma2 = gpp.constSigma2.fh.recappend(reccf.p.constSigma2, ri, gpcf.p.constSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_gaussiansmt.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_gaussiansmt.m
| 11,228 |
utf_8
|
e1313f56ba16306bf8fd48d805556b9d
|
function lik = lik_gaussiansmt(varargin)
%LIK_GAUSSIANSMT Create a Gaussian scale mixture likelihood structure
% with priors producing approximation of the Student's t
%
% Description
% LIK = LIK_GAUSSIANSMT('ndata',N,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a scale mixture noise covariance function structure
% (with priors producing approximation of the Student's t) in
% which the named parameters have the specified values. Any
% unspecified parameters are set to default values. Obligatory
% parameter is 'ndata', which tells the number of data points,
% that is, number of mixture components.
%
% LIK = LIK_GAUSSIANSMT(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for the Gaussian scale mixture approximation of the
% Student's t
% sigma2 - Variances of the mixture components.
% The default is 1 x ndata vector of 0.1s.
% U - Part of the parameter expansion, see below.
% The default is 1 x ndata vector of 1s.
% tau2 - Part of the parameter expansion, see below.
% The default is 0.1.
% alpha - Part of the parameter expansion, see below.
% The default is 0.5.
% nu - Degrees of freedom. The default is 4.
% nu_prior - Prior for nu. The default is prior_fixed().
% gibbs - Whether Gibbs sampling is 'on' (default) or 'off'.
%
% Parametrisation and non-informative priors for alpha and tau
% are same as in Gelman et. al. (2004) page 304-305:
% y-E[y] ~ N(0, alpha^2 * U),
% where U = diag(u_1, u_2, ..., u_n)
% u_i ~ Inv-Chi^2(nu, tau^2)
%
% The parameters of this likelihood can be inferred only by
% Gibbs sampling by calling GP_MC.
%
% If degrees of freedom nu is given a prior (other than
% prior_fixed), it is sampled using slice sampling within Gibbs
% sampling with limits [0,128].
%
% See also
% GP_SET, PRIOR_*, LIK_*
% Copyright (c) 1998,1999,2010 Aki Vehtari
% Copyright (c) 2007-2010 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_GAUSSIANSMT';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('ndata',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('sigma2',[], @(x) isvector(x) && all(x>0));
ip.addParamValue('U',[], @isvector);
ip.addParamValue('tau2',0.1, @isscalar);
ip.addParamValue('alpha',0.5, @isscalar);
ip.addParamValue('nu',4, @isscalar);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('censored',[], @(x) isstruct);
ip.addParamValue('gibbs','on', @(x) ismember(x,{'on' 'off'}));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Gaussian-smt';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Gaussian-smt')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('ndata',ip.UsingDefaults)
ndata = ip.Results.ndata;
lik.ndata=ndata;
lik.r = zeros(ndata,1);
end
if isempty(ndata)
error('NDATA has to be defined')
end
if init || ~ismember('sigma2',ip.UsingDefaults)
sigma2=ip.Results.sigma2;
if isempty(sigma2)
lik.sigma2 = repmat(0.1,ndata,1);
else
if (size(sigma2,1) == lik.ndata && size(sigma2,2) == 1)
lik.sigma2 = sigma2;
else
error('The size of sigma2 has to be NDATAx1')
end
end
lik.sigma2 = sigma2;
end
if init || ~ismember('U',ip.UsingDefaults)
U=ip.Results.U;
if isempty(U)
lik.U = ones(ndata,1);
else
if size(U,1) == lik.ndata
lik.U = U;
else
error('the size of U has to be NDATAx1')
end
end
end
if init || ~ismember('tau2',ip.UsingDefaults)
lik.tau2=ip.Results.tau2;
end
if init || ~ismember('alpha',ip.UsingDefaults)
lik.alpha=ip.Results.alpha;
end
if init || ~ismember('nu',ip.UsingDefaults)
lik.nu=ip.Results.nu;
end
if init || ~ismember('censored',ip.UsingDefaults)
censored=ip.Results.censored;
if ~isempty(censored)
lik.censored = censored{1};
yy = censored{2};
if lik.censored(1) >= lik.censored(2)
error('lik_gaussiansmt -> if censored model is used, the limits must be given in increasing order.')
end
imis1 = [];
imis2 = [];
if lik.censored(1) > -inf
imis1 = find(yy<=lik.censored(1));
end
if lik.censored(1) < inf
imis2 = find(yy>=lik.censored(2));
end
lik.cy = yy([imis1 ; imis2])';
lik.imis = [imis1 ; imis2];
end
end
% Initialize prior structure
lik.p=[];
lik.p.sigma=[];
if init || ~ismember('nu_prior',ip.UsingDefaults)
lik.p.nu=ip.Results.nu_prior;
end
% using Gibbs or not
if init || ~ismember('gibbs',ip.UsingDefaults)
lik.gibbs = ip.Results.gibbs;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_gaussiansmt_pak;
lik.fh.unpak = @lik_gaussiansmt_unpak;
lik.fh.lp = @lik_gaussiansmt_lp;
lik.fh.lpg = @lik_gaussiansmt_lpg;
lik.fh.cfg = @lik_gaussiansmt_cfg;
lik.fh.trcov = @lik_gaussiansmt_trcov;
lik.fh.trvar = @lik_gaussiansmt_trvar;
lik.fh.gibbs = @lik_gaussiansmt_gibbs;
lik.fh.recappend = @lik_gaussiansmt_recappend;
end
end
function [w,s] = lik_gaussiansmt_pak(lik)
w = []; s = {};
end
function [lik, w] = lik_gaussiansmt_unpak(lik, w)
end
function lp =lik_gaussiansmt_lp(lik)
lp = 0;
end
function lpg = lik_gaussiansmt_lpg(lik)
lpg = [];
end
function DKff = lik_gaussiansmt_cfg(lik, x, x2)
DKff = [];
end
function C = lik_gaussiansmt_trcov(lik, x)
%LIK_GAUSSIANSMT_TRCOV Evaluate training covariance matrix
% corresponding to Gaussian noise
% Description
% C = LIK_GAUSSIANSMT_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This subfunction is needed only in Gaussian likelihoods.
%
% See also
% LIK_GAUSSIANSMT_COV, LIK_GAUSSIANSMT_TRVAR, GP_COV, GP_TRCOV
[n, m] =size(x);
n1=n+1;
if n ~= lik.ndata
error(['lik_gaussiansmt -> _trvar: The training variance can be evaluated'...
' only for training data. '])
end
C = sparse(1:n, 1:n, lik.sigma2, n, n);
end
function C = lik_gaussiansmt_trvar(lik, x)
%LIK_GAUSSIANSMT_TRVAR Evaluate training variance vector
% corresponding to Gaussian noise
%
% Description
% C = LIK_GAUSSIANSMT_TRVAR(LIK, TX) takes in covariance function
% of a Gaussian process LIK and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This
% subfunction is needed only in Gaussian likelihoods.
%
%
% See also
% LIK_GAUSSIANSMT_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
if n ~= lik.ndata
error(['lik_gaussiansmt -> _trvar: The training variance can be evaluated'...
' only for training data. '])
end
C = lik.sigma2;
end
function [lik, y] = lik_gaussiansmt_gibbs(gp, lik, x, y)
%LIK_GAUSSIANSMT_GIBBS Function for sampling the sigma2's
%
% Description
% Perform Gibbs sampling for the scale mixture variances. This
% function is likelihood specific.
[n,m] = size(x);
% Draw a sample of the mean of y. Its distribution is
% f ~ N(K*inv(C)*y, K - K*inv(C)*K')
switch gp.type
case 'FULL'
sampy = gp_rnd(gp, x, y, x);
case 'FIC'
sampy = gp_rnd(gp, x, y, x, 'tstind', 1:n);
case {'PIC' 'PIC_BLOCK'}
sampy = gp_rnd(gp, x, y, x, 'tstind', gp.tr_index);
end
% Calculate the residual
r = y-sampy;
U = lik.U;
t2 = lik.tau2;
alpha = lik.alpha;
nu = lik.nu;
rss2=alpha.^2.*U;
% Perform the gibbs sampling (Gelman et.al. (2004) page 304-305)
% Notice that 'sinvchi2rand' is parameterized as in Gelman et. al.
U=sinvchi2rand(nu+1, (nu.*t2+(r./alpha).^2)./(nu+1));
shape = n*nu./2; % These are parameters...
invscale = nu.*sum(1./U)./2; % used in Gelman et al
t2=gamrnd(shape, 1./invscale); % Notice! The matlab parameterization is different
alpha2=sinvchi2rand(n,mean(r.^2./U));
rss2=alpha2.*U;
if ~isempty(lik.p.nu)
% Sample nu using Gibbs sampling
pp = lik.p.nu;
opt=struct('nomit',4,'display',0,'method','doubling', ...
'wsize',4,'plimit',5,'unimodal',1,'mmlimits',[0; 128]);
nu=sls(@(nu) (-sum(sinvchi2_lpdf(U,nu,t2))-pp.fh.lp(nu, pp)),nu,opt);
end
lik.sigma2 = rss2;
lik.U = U;
lik.tau2 = t2;
lik.alpha = sqrt(alpha2);
lik.nu = nu;
lik.r = r;
if isfield(lik, 'censored')
imis1 = [];
imis2 = [];
if lik.censored(1) > -inf
imis1 = find(y<=lik.censored(1));
y(imis1)=normrtrand(sampy(imis1),alpha2*U(imis1),lik.censored(1));
end
if lik.censored(1) < inf
imis2 = find(y>=lik.censored(2));
y(imis2)=normltrand(sampy(imis2),alpha2*U(imis2),lik.censored(2));
end
lik.cy = y([imis1 ; imis2]);
end
end
function reccf = lik_gaussiansmt_recappend(reccf, ri, lik)
%RECAPPEND Record append
%
% Description
% RECCF = LIK_GAUSSIANSMT_RECAPPEND(RECCF, RI, LIK)
% takes a likelihood record structure RECCF, record
% index RI and likelihood structure LIK with the
% current MCMC samples of the parameters. Returns
% RECCF which contains all the old samples and the
% current samples from LIK . This subfunction is
% needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'Gaussian-smt';
lik.ndata = [];
% Initialize parameters
reccf.sigma2 = [];
% Set the function handles
reccf.fh.pak = @lik_gaussiansmt_pak;
reccf.fh.unpak = @lik_gaussiansmt_unpak;
reccf.fh.lp = @lik_gaussiansmt_lp;
reccf.fh.lpg = @lik_gaussiansmt_lpg;
reccf.fh.cfg = @lik_gaussiansmt_cfg;
reccf.fh.cov = @lik_gaussiansmt_cov;
reccf.fh.trcov = @lik_gaussiansmt_trcov;
reccf.fh.trvar = @lik_gaussiansmt_trvar;
reccf.fh.gibbs = @lik_gaussiansmt_gibbs;
reccf.fh.recappend = @lik_gaussiansmt_recappend;
else
% Append to the record
reccf.ndata = lik.ndata;
gpp = lik.p;
% record noiseSigma
reccf.sigma2(ri,:)=lik.sigma2;
if ~isempty(lik.nu)
reccf.nu(ri,:)=lik.nu;
reccf.U(ri,:) = lik.U;
reccf.tau2(ri,:) = lik.tau2;
reccf.alpha(ri,:) = lik.alpha;
reccf.r(ri,:) = lik.r;
end
if isfield(lik, 'censored')
reccf.cy(ri,:) = lik.cy';
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gp_avpredcomp.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_avpredcomp.m
| 9,333 |
windows_1250
|
048f8facc21b0b145997ed98f7e991c9
|
function [apcs,apcss]=gp_avpredcomp(gp, x, y, varargin)
%GP_AVPREDCOMP Average predictive comparison for Gaussian process model
%
% Description
% APCS=GP_AVPREDCOMP(GP, X, Y, OPTIONS) Takes a Gaussian process
% structure GP together with a matrix X of training inputs and
% vector Y of training targets, and returns average predictive
% comparison (APC) estimates for each input in a structure APCS.
% APCS contains following fields
% ps - the probability of knowing the sign of the APC
% in the latent outcome for each input variable.
% fs - the samples from the APC in the latent outcome for each
% input variable
% fsa - the samples from the absolute APC in the latent outcome
% for each input variable
% fsrms - the samples from the root mean squared APC in the latent
% outcome for each input variable
% ys - the samples from the APC in the target outcome for each
% input variable
% ysa - the samples from the absolute APC in the target outcome
% for each input variable
% ysrms - the samples from the root mean squared APC in the target
% outcome for each input variable
%
% [APCS,APCSS]=GP_AVPREDCOMP(GP, X, Y, OPTIONS) returns also APCSS
% which contains APCS components for each data point. These can
% be used to form conditional average predictive comparisons (CAPC).
% APCSS contains following fields
% numfs - the samples from the numerator of APC in the latent
% outcome for each input variable
% numfsa - the samples from the numerator of absolute APC in
% the latent outcome for each input variable
% numfsrms - the samples from the numerator of RMS APC in
% the latent outcome for each input variable
% numys - the samples from the numerator of APC in the latent
% outcome for each input variable
% numysa - the samples from the numerator of absolute APC in
% the latent outcome for each input variable
% numysrms - the samples from the numerator of RMS APC in
% the latent outcome for each input variable
% dens - the samples from the denominator of APC in the latent
% outcome for each input variable
% densa - the samples from the denominator of absolute APC in
% the latent outcome for each input variable
% densrms - the samples from the denominator of RMS APC in
% the latent outcome for each input variable
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in
% case of Poisson likelihood we have z_i=E_i, that
% is, expected value for ith case.
% nsamp - determines the number of samples used (default=500).
% deltadist - indicator vector telling which component sets
% are handled using the delta distance (0 if x=x',
% and 1 otherwise). Default is found by examining
% the covariance and metric functions used.
%
% See also
% GP_PRED
% Copyright (c) 2011 Jaakko Riihimäki
% Copyright (c) 2011 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GP_AVPREDCOMP';
ip.addRequired('gp',@isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('predcf', [], @(x) isempty(x) || ...
isvector(x) && isreal(x) && all(isfinite(x)&x>0))
ip.addParamValue('tstind', [], @(x) isempty(x) || iscell(x) ||...
(isvector(x) && isreal(x) && all(isfinite(x)&x>0)))
ip.addParamValue('nsamp', 500, @(x) isreal(x) && isscalar(x))
ip.addParamValue('deltadist',[], @(x) isvector(x));
ip.parse(gp, x, y, varargin{:});
options=struct();
options.predcf=ip.Results.predcf;
options.tstind=ip.Results.tstind;
z=isempty(ip.Results.z);
if ~isempty(z)
options.z=ip.Results.z;
end
nsamp=ip.Results.nsamp;
deltadist = logical(ip.Results.deltadist);
[n, nin]=size(x);
if isempty(deltadist)
deltadist=false(1,nin);
deltadist(gp_finddeltadist(gp))=true;
end
ps=zeros(1,nin);
fs=zeros(nsamp,nin);
fsa=zeros(nsamp,nin);
fsrms=zeros(nsamp,nin);
if nargout>1
numfs=zeros(n,nsamp,nin);
numfsa=zeros(n,nsamp,nin);
numfsrms=zeros(n,nsamp,nin);
dens=zeros(n,nsamp,nin);
densa=zeros(n,nsamp,nin);
densrms=zeros(n,nsamp,nin);
end
ys=zeros(nsamp,nin);
ysa=zeros(nsamp,nin);
ysrms=zeros(nsamp,nin);
if nargout>1
numys=zeros(n,nsamp,nin);
numysa=zeros(n,nsamp,nin);
numysrms=zeros(n,nsamp,nin);
end
% covariance is used for Mahalanobis weighted distanec
covx=cov(x);
% handle categorical variables
covx(deltadist,:)=0;
covx(:,deltadist)=0;
for i1=find(deltadist)
covx(i1,i1)=1;
end
prevstream=setrandstream();
% loop through the input variables
for k1=1:nin
fprintf('k1=%d\n',k1)
%- Compute the weight matrix based on Mahalanobis distances:
x_=x; x_(:,k1)=[];
covx_=covx; covx_(:,k1)=[]; covx_(k1,:)=[];
deltadist_=deltadist; deltadist_(k1)=[];
% weight matrix:
W=zeros(n);
for i1=1:n
x_diff=zeros(nin-1,n-i1);
x_diff(~deltadist_,:)=bsxfun(@minus,x_(i1,~deltadist_),x_((i1+1):n,~deltadist_))';
x_diff(deltadist_,:)=double(bsxfun(@ne,x_(i1,deltadist_),x_((i1+1):n,deltadist_))');
W(i1,(i1+1):n)=1./(1+sum(x_diff.*(covx_\x_diff)));
end
W=W+W'+eye(n);
seed=round(rand*10e8);
numf=zeros(1,nsamp);
numfa=zeros(1,nsamp);
numfrms=zeros(1,nsamp);
numy=zeros(1,nsamp);
numya=zeros(1,nsamp);
numyrms=zeros(1,nsamp);
den=0;
dena=0;
for i1=1:n
% inputs of interest
ui=x(i1, k1);
ujs=x(:, k1);
% replicate same values for other inputs
xrep=repmat(x(i1,:),n,1); xrep(:,k1)=ujs;
if deltadist(k1)
Udiff=double(ujs~=ui);
else
Udiff=ujs-ui;
end
Udiffa=abs(Udiff);
Usign=sign(Udiff);
% draw random samples from the posterior
setrandstream(seed);
fr = gp_rnd(gp, x, y, xrep, 'nsamp', nsamp, options);
% average change in input
deni=sum(W(:,i1).*Udiff.*Usign);
denai=sum(W(:,i1).*Udiffa);
den=den+deni;
dena=dena+denai;
% average change in latent outcome
b=bsxfun(@minus,fr,fr(i1,:));
numfi=sum(bsxfun(@times,W(:,i1).*Usign,b));
numfai=sum(bsxfun(@times,W(:,i1),abs(b)));
numfrmsi=sum(bsxfun(@times,W(:,i1),b.^2));
numf=numf+numfi;
numfa=numfa+numfai;
numfrms=numfrms+numfrmsi;
if nargout>1
numfs(i1,:,k1)=numfi;
numsa(i1,:,k1)=numfai;
numfsrms(i1,:,k1)=numfrmsi;
dens(i1,:,k1)=deni;
densa(i1,:,k1)=denai;
densrms(i1,:,k1)=denai;
end
% compute latent values through the inverse link function
if isfield(gp.lik.fh, 'invlink')
ilfr = gp.lik.fh.invlink(gp.lik, fr, repmat(z,1,nsamp));
% average change in outcome
b=bsxfun(@minus,ilfr,ilfr(i1,:));
numyi=sum(bsxfun(@times,W(:,i1).*Usign,b));
numyai=sum(bsxfun(@times,W(:,i1),abs(b)));
numyrmsi=sum(bsxfun(@times,W(:,i1),b.^2));
numy=numy+numyi;
numya=numya+numyai;
numyrms=numyrms+numyrmsi;
if nargout>1
numys(i1,:,k1)=numyi;
numysa(i1,:,k1)=numyai;
numysrms(i1,:,k1)=numyrmsi;
end
end
end
% outcome is the latent function
fs(:,k1)=numf./den;
fsa(:,k1)=numfa./dena;
fsrms(:,k1)=sqrt(numfrms./dena);
if isfield(gp.lik.fh, 'invlink')
% outcome is computed through the inverse link function
ys(:,k1)=numy./den;
ysa(:,k1)=numya./dena;
ysrms(:,k1)=sqrt(numyrms./dena);
end
% probability of knowing the sign of the change in
% latent function
ps(1,k1)=mean(numf./den>0);
if ps(1,k1)<0.5
ps(1,k1)=1-ps(1,k1);
end
end
apcs.ps=ps;
apcs.fs=fs;
apcs.fsa=fsa;
apcs.fsrms=fsrms;
if isfield(gp.lik.fh, 'invlink')
apcs.ys=ys;
apcs.ysa=ysa;
apcs.ysrms=ysrms;
end
if nargout>1
apcss.numfs=numfs;
apcss.numfsa=numfsa;
apcss.numfsrms=numfsrms;
apcss.dens=dens;
apcss.densa=densa;
apcss.densrms=densrms;
if isfield(gp.lik.fh, 'invlink')
apcss.numys=numys;
apcss.numysa=numysa;
apcss.numysrms=numysrms;
end
end
setrandstream(prevstream);
end
function deltadist = gp_finddeltadist(cf)
% FINDDELTADIST - Find which covariates are using delta distance
%
deltadist=[];
if ~iscell(cf) && isfield(cf,'cf')
deltadist=union(deltadist,gp_finddeltadist(cf.cf));
else
for cfi=1:numel(cf)
if isfield(cf{cfi},'cf')
deltadist=union(deltadist,gp_finddeltadist(cf{cfi}.cf));
else
if isfield(cf{cfi},'metric')
if isfield(cf{cfi}.metric,'deltadist')
deltadist=union(deltadist,cf{cfi}.metric.deltadist);
end
elseif ismember(cf{cfi}.type,{'gpcf_cat' 'gpcf_mask'}) && ...
isfield(cf{cfi},'selectedVariables')
deltadist=union(deltadist,cf{cfi}.selectedVariables);
end
end
end
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_t.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_t.m
| 36,265 |
UNKNOWN
|
7c042ee6cb876d45190e93aa46e3e774
|
function lik = lik_t(varargin)
%LIK_T Create a Student-t likelihood structure
%
% Description
% LIK = LIK_T('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates Student-t likelihood structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% LIK = LIK_T(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for Student-t likelihood [default]
% sigma2 - scale squared [1]
% nu - degrees of freedom [4]
% sigma2_prior - prior for sigma2 [prior_logunif]
% nu_prior - prior for nu [prior_fixed]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 C(nu,s2) * (1 + 1/nu * (y_i - f_i)^2/s2 )^(-(nu+1)/2)
%
% where nu is the degrees of freedom, s2 the scale and f_i the
% latent variable defining the mean. C(nu,s2) is constant
% depending on nu and s2.
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% Copyright (c) 2011 Pasi Jyl�nki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_T';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('sigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',prior_fixed, @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Student-t';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Student-t')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
if init || ~ismember('nu',ip.UsingDefaults)
lik.nu = ip.Results.nu;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
lik.p.nu=ip.Results.nu_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_t_pak;
lik.fh.unpak = @lik_t_unpak;
lik.fh.lp = @lik_t_lp;
lik.fh.lpg = @lik_t_lpg;
lik.fh.ll = @lik_t_ll;
lik.fh.llg = @lik_t_llg;
lik.fh.llg2 = @lik_t_llg2;
lik.fh.llg3 = @lik_t_llg3;
lik.fh.tiltedMoments = @lik_t_tiltedMoments;
lik.fh.tiltedMoments2 = @lik_t_tiltedMoments2;
lik.fh.siteDeriv = @lik_t_siteDeriv;
lik.fh.siteDeriv2 = @lik_t_siteDeriv2;
lik.fh.optimizef = @lik_t_optimizef;
lik.fh.upfact = @lik_t_upfact;
lik.fh.invlink = @lik_t_invlink;
lik.fh.predy = @lik_t_predy;
lik.fh.predprcty = @lik_t_predprcty;
lik.fh.recappend = @lik_t_recappend;
end
end
function [w, s] = lik_t_pak(lik)
%LIK_T_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_T_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This
% is a mandatory subfunction used for example in energy and
% gradient computations.
%
% w = [ log(lik.sigma2)
% (hyperparameters of lik.sigma2)
% log(log(lik.nu))
% (hyperparameters of lik.nu)]'
%
% See also
% LIK_T_UNPAK, GP_PAK
w = []; s = {};
if ~isempty(lik.p.sigma2)
w = [w log(lik.sigma2)];
s = [s; 'log(lik.sigma2)'];
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
if ~isempty(lik.p.nu)
w = [w log(log(lik.nu))];
s = [s; 'loglog(lik.nu)'];
[wh sh] = lik.p.nu.fh.pak(lik.p.nu);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_t_unpak(lik, w)
%LIK_T_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_T_UNPAK(W, LIK) takes a likelihood structure LIK and
% extracts the parameters from the vector W to the LIK
% structure. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(lik.sigma2)
% (hyperparameters of lik.sigma2)
% log(log(lik.nu))
% (hyperparameters of lik.nu)]'
%
% See also
% LIK_T_PAK, GP_UNPAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
if ~isempty(lik.p.nu)
lik.nu = exp(exp(w(1)));
w = w(2:end);
[p, w] = lik.p.nu.fh.unpak(lik.p.nu, w);
lik.p.nu = p;
end
end
function lp = lik_t_lp(lik)
%LIK_T_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_T_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters.
% This subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_T_LLG, LIK_T_LLG3, LIK_T_LLG2, GPLA_E
v = lik.nu;
sigma2 = lik.sigma2;
lp = 0;
if ~isempty(lik.p.sigma2)
lp = lp + lik.p.sigma2.fh.lp(sigma2, lik.p.sigma2) +log(sigma2);
end
if ~isempty(lik.p.nu)
lp = lp + lik.p.nu.fh.lp(lik.nu, lik.p.nu) +log(v) +log(log(v));
end
end
function lpg = lik_t_lpg(lik)
%LIK_T_LPG d log(prior)/dth of the likelihood parameters th
%
% Description
% LPG = LIK_T_LPG(LIK) takes a likelihood structure LIK
% and returns d log(p(th))/dth, where th collects the
% parameters. This subfunction is needed when there are
% likelihood parameters.
%
% See also
% LIK_T_LLG, LIK_T_LLG3, LIK_T_LLG2, GPLA_G
% Evaluate the gradients of log(prior)
v = lik.nu;
sigma2 = lik.sigma2;
lpg = [];
i1 = 0;
if ~isempty(lik.p.sigma2)
i1 = i1+1;
lpg(i1) = lik.p.sigma2.fh.lpg(lik.sigma2, lik.p.sigma2).*sigma2 + 1;
end
if ~isempty(lik.p.nu)
i1 = i1+1;
lpg(i1) = lik.p.nu.fh.lpg(lik.nu, lik.p.nu).*v.*log(v) +log(v) + 1;
end
end
function ll = lik_t_ll(lik, y, f, z)
%LIK_T_LL Log likelihood
%
% Description
% LL = LIK_T_LL(LIK, Y, F) takes a likelihood structure LIK,
% observations Y, and latent values F. Returns the log
% likelihood, log p(y|f,z). This subfunction is needed when
% using Laplace approximation or MCMC for inference with
% non-Gaussian likelihoods. This subfunction is also used in
% information criteria (DIC, WAIC) computations.
%
% See also
% LIK_T_LLG, LIK_T_LLG3, LIK_T_LLG2, GPLA_E
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
term = gammaln((v + 1) / 2) - gammaln(v/2) -log(v.*pi.*sigma2)/2;
ll = term + log(1 + (r.^2)./v./sigma2) .* (-(v+1)/2);
ll = sum(ll);
end
function llg = lik_t_llg(lik, y, f, param, z)
%LIK_T_LLG Gradient of the log likelihood
%
% Description
% LOKLIKG = LIK_T_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y, and latent values F. Returns
% the gradient of log likelihood with respect to PARAM. At the
% moment PARAM can be 'param' or 'latent'. This subfunction is
% needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_T_LL, LIK_T_LLG2, LIK_T_LLG3, GPLA_E
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
switch param
case 'param'
n = length(y);
i1=0;
if ~isempty(lik.p.sigma2)
i1=i1+1;
% Derivative with respect to sigma2
llg(i1) = -n./sigma2/2 + (v+1)./2.*sum(r.^2./(v.*sigma2.^2+r.^2*sigma2));
% correction for the log transformation
llg(i1) = llg(i1).*sigma2;
end
if ~isempty(lik.p.nu)
i1=i1+1;
% Derivative with respect to nu
llg(i1) = 0.5.* sum(psi((v+1)./2) - psi(v./2) - 1./v - log(1+r.^2./(v.*sigma2)) + (v+1).*r.^2./(v.^2.*sigma2 + v.*r.^2));
% correction for the log transformation
llg(i1) = llg(i1).*v.*log(v);
end
case 'latent'
llg = (v+1).*r ./ (v.*sigma2 + r.^2);
end
end
function llg2 = lik_t_llg2(lik, y, f, param, z)
%LIK_T_LLG2 Second gradients of log likelihood
%
% Description
% LLG2 = LIK_T_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y, and latent values F. Returns
% the Hessian of log likelihood with respect to PARAM. At the
% moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace
% approximation or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_T_LL, LIK_T_LLG, LIK_T_LLG3, GPLA_E
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
switch param
case 'param'
case 'latent'
% The Hessian d^2 /(dfdf)
llg2 = (v+1).*(r.^2 - v.*sigma2) ./ (v.*sigma2 + r.^2).^2;
case 'latent+param'
% gradient d^2 / (dfds2)
llg2 = -v.*(v+1).*r ./ (v.*sigma2 + r.^2).^2;
% Correction for the log transformation
llg2 = llg2.*sigma2;
if ~isempty(lik.p.nu)
% gradient d^2 / (dfdnu)
llg2(:,2) = r./(v.*sigma2 + r.^2) - sigma2.*(v+1).*r./(v.*sigma2 + r.^2).^2;
% Correction for the log transformation
llg2(:,2) = llg2(:,2).*v.*log(v);
end
end
end
function llg3 = lik_t_llg3(lik, y, f, param, z)
%LIK_T_LLG3 Third gradients of log likelihood (energy)
%
% Description
% LLG3 = LIK_T_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y and latent values F and
% returns the third gradients of log likelihood with respect
% to PARAM. At the moment PARAM can be only 'latent'. G3 is a
% vector with third gradients. This subfunction is needed when
% using Laplace approximation for inference with non-Gaussian
% likelihoods.
%
% See also
% LIK_T_LL, LIK_T_LLG, LIK_T_LLG2, GPLA_E, GPLA_G
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
switch param
case 'param'
case 'latent'
% Return the diagonal of W differentiated with respect to latent values / dfdfdf
llg3 = (v+1).*(2.*r.^3 - 6.*v.*sigma2.*r) ./ (v.*sigma2 + r.^2).^3;
case 'latent2+param'
% Return the diagonal of W differentiated with respect to
% likelihood parameters / dfdfds2
llg3 = (v+1).*v.*( v.*sigma2 - 3.*r.^2) ./ (v.*sigma2 + r.^2).^3;
llg3 = llg3.*sigma2;
if ~isempty(lik.p.nu)
% dfdfdnu
llg3(:,2) = (r.^2-2.*v.*sigma2-sigma2)./(v.*sigma2 + r.^2).^2 - 2.*sigma2.*(r.^2-v.*sigma2).*(v+1)./(v.*sigma2 + r.^2).^3;
llg3(:,2) = llg3(:,2).*v.*log(v);
end
end
end
function [logM_0, m_1, sigm2hati1] = lik_t_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_T_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_T_TILTEDMOMENTS(LIK, Y, I, S2, MYY, Z)
% takes a likelihood structure LIK, incedence counts Y,
% expected counts Z, index I and cavity variance S2 and mean
% MYY. Returns the zeroth moment M_0, mean M_1 and variance
% M_2 of the posterior marginal (see Rasmussen and Williams
% (2006): Gaussian processes for Machine Learning, page 55).
% This subfunction is needed when using EP for inference with
% non-Gaussian likelihoods.
%
% See also
% GPEP_E
zm = @zeroth_moment;
tol = 1e-8;
yy = y(i1);
nu = lik.nu;
sigma2 = lik.sigma2;
% Set the limits for integration and integrate with quad
% -----------------------------------------------------
mean_app = myy_i;
sigm_app = sqrt(sigm2_i);
lambdaconf(1) = mean_app - 8.*sigm_app; lambdaconf(2) = mean_app + 8.*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2) > zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2) > zm(lambdaconf(2));
testiter = 1;
if test1 == 0
lambdaconf(1) = lambdaconf(1) - 3*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
if test1 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) - 2*sigm_app;
lambdaconf(2) = lambdaconf(2) - 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
elseif test2 == 0
lambdaconf(2) = lambdaconf(2) + 3*sigm_app;
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test2 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) + 2*sigm_app;
lambdaconf(2) = lambdaconf(2) + 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
end
RTOL = 1.e-6;
ATOL = 1.e-10;
% Integrate with quadrature
[m_0, m_1, m_2] = quad_moments(zm,lambdaconf(1), lambdaconf(2), RTOL, ATOL);
sigm2hati1 = m_2 - m_1.^2;
logM_0 = log(m_0);
function integrand = zeroth_moment(f)
r = yy-f;
term = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
integrand = exp(term + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2));
integrand = integrand.*exp(- 0.5 * (f-myy_i).^2./sigm2_i - log(sigm2_i)/2 - log(2*pi)/2); %
end
end
function [g_i] = lik_t_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_T_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description
% [M_0, M_1, M2] = LIK_T_TILTEDMOMENTS(LIK, Y, I, S2, MYY)
% takes a likelihood structure LIK, observations Y, index I
% and cavity variance S2 and mean MYY. Returns E_f [d log
% p(y_i|f_i) /d a], where a is the likelihood parameter and
% the expectation is over the marginal posterior. This term is
% needed when evaluating the gradients of the marginal
% likelihood estimate Z_EP with respect to the likelihood
% parameters (see Seeger (2008): Expectation propagation for
% exponential families). This subfunction is needed when using
% EP for inference with non-Gaussian likelihoods and there are
% likelihood parameters.
%
% See also
% GPEP_G
zm = @zeroth_moment;
znu = @deriv_nu;
zsigma2 = @deriv_sigma2;
tol = 1e-8;
yy = y(i1);
nu = lik.nu;
sigma2 = lik.sigma2;
% Set the limits for integration and integrate with quad
mean_app = myy_i;
sigm_app = sqrt(sigm2_i);
lambdaconf(1) = mean_app - 6.*sigm_app; lambdaconf(2) = mean_app + 6.*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
testiter = 1;
if test1 == 0
lambdaconf(1) = lambdaconf(1) - 3*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
if test1 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) - 2*sigm_app;
lambdaconf(2) = lambdaconf(2) - 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
elseif test2 == 0
lambdaconf(2) = lambdaconf(2) + 3*sigm_app;
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test2 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) + 2*sigm_app;
lambdaconf(2) = lambdaconf(2) + 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
end
% Integrate with quad
[m_0, fhncnt] = quadgk(zm, lambdaconf(1), lambdaconf(2));
% t=linspace(lambdaconf(1),lambdaconf(2),100);
% plot(t,zm(t))
% keyboard
[g_i(1), fhncnt] = quadgk( @(f) zsigma2(f).*zm(f) , lambdaconf(1), lambdaconf(2));
g_i(1) = g_i(1)/m_0*sigma2;
if ~isempty(lik.p.nu)
[g_i(2), fhncnt] = quadgk(@(f) znu(f).*zm(f) , lambdaconf(1), lambdaconf(2));
g_i(2) = g_i(2)/m_0.*nu.*log(nu);
end
function integrand = zeroth_moment(f)
r = yy-f;
term = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
integrand = exp(term + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2));
integrand = integrand.*exp(- 0.5 * (f-myy_i).^2./sigm2_i - log(sigm2_i)/2 - log(2*pi)/2);
end
function g = deriv_nu(f)
r = yy-f;
temp = 1 + r.^2./nu./sigma2;
g = psi((nu+1)/2)./2 - psi(nu/2)./2 - 1./(2.*nu) - log(temp)./2 + (nu+1)./(2.*temp).*(r./nu).^2./sigma2;
end
function g = deriv_sigma2(f)
r = yy-f;
g = -1/sigma2/2 + (nu+1)./2.*r.^2./(nu.*sigma2.^2 + r.^2.*sigma2);
end
end
function [lnZhat, muhat, sigm2hat] = lik_t_tiltedMoments2(likelih, y, yi, sigm2_i, myy_i, z, eta)
%LIKELIH_T_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIKELIH_T_TILTEDMOMENTS(LIKELIH, Y, I, S2, MYY, Z)
% takes a likelihood data structure LIKELIH, incedence counts Y,
% expected counts Z, index I and cavity variance S2 and mean
% MYY. Returns the zeroth moment M_0, mean M_1 and variance M_2
% of the posterior marginal (see Rasmussen and Williams (2006):
% Gaussian processes for Machine Learning, page 55). This subfunction
% is needed when using robust-EP for inference with non-Gaussian
% likelihoods.
%
% See also
% GPEP_E
if nargin<7
eta=1;
end
yy = y(yi);
nu = likelih.nu;
sigma2 = likelih.sigma2;
sigma = sqrt(sigma2);
nuprime = eta*nu+eta-1;
a=nuprime/2; %a=nu/2;
u=linspace(log(1e-8),5,200);
du=u(2)-u(1);
lnpu=(a-1)*u -a*exp(u)+u;
% sigma2 t-likelihood parameter, scale squared
% sigm2_i cavity variance
% myy_i cavity mean
sigma2prime = sigma2*nu/nuprime;
Vu = sigm2_i + (sigma2prime)./exp(u);
lnZu = 0.5*(-log(2*pi*Vu)) -0.5 * (yy-myy_i)^2 ./Vu;
lnZt = eta*gammaln((nu+1)/2) - eta/2*log(nu*pi*sigma2) - eta*gammaln(nu/2) - gammaln((nuprime+1)/2) + 0.5*log(nuprime*pi*sigma2prime) + gammaln(nuprime/2);
ptu=exp(lnpu+lnZu+lnZt);
Z_0=sum(ptu)*du;
lnZhat=log(Z_0) + a*log(a)-gammaln(a);
Vtu=1./(1/sigm2_i +(1/sigma2prime)*exp(u));
mtu=Vtu.*(myy_i/sigm2_i + (yy/sigma2prime)*exp(u));
muhat=sum(mtu.*ptu)*du/Z_0;
sigm2hat=sum((Vtu+mtu.^2).*ptu)*du/Z_0-muhat^2;
% limiting distribution (nu -> infinity)
% Vg=1/(1/sigm2_i +eta/sigma2);
% mg=Vg*(myy_i/sigm2_i +yy*eta/sigma2);
% sigm_i=sqrt(sigm2_i);
% sg=sqrt(Vg);
%
% % set integration limits and scaling
% nu_lim=1e10;
% if nu<nu_lim
%
% if sqrt(sigma2/sigm2_i)<0.05
% % set the integration limits when the likelihood is very narrow
%
% % grid resolution
% dd=10;
% df = [12*sigm_i/100 2*dd*sigma/100];
%
% if yy>=myy_i
% % grid break points
% bp=[min(myy_i-6*sigm_i,yy-dd*sigma) myy_i-6*sigm_i, ...
% min(myy_i+6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
% max(myy_i+6*sigm_i,yy+dd*sigma)];
%
% % grid values
% a=1e-6;
% fvec =[ bp(1):df(2):bp(2)-a, bp(2):df(1):bp(3)-a, bp(3):max(df):bp(4)-a, ...
% bp(4):df(2):bp(5)-a, bp(5):df(1):bp(6)];
% else
% % grid break points
% bp=[min(myy_i-6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
% max(myy_i-6*sigm_i,yy+dd*sigma), myy_i+6*sigm_i, ...
% max(myy_i+6*sigm_i,yy+dd*sigma)];
%
% % grid values
% a=1e-6;
% fvec =[ bp(1):df(1):bp(2)-a, bp(2):df(2):bp(3)-a, bp(3):max(df):bp(4)-a, ...
% bp(4):df(1):bp(5)-a, bp(5):df(2):bp(6)];
% end
%
% np=numel(fvec);
% logpt = lpt(fvec,0);
% lpt_max = max([logpt lpt([myy_i mg],0)]);
% lambdaconf=[fvec(1), fvec(end)];
% for i1=2:np-1
% if logpt(i1) < lpt_max+log(1e-7) %(exp(logpt(i1))/exp(lpt_max) < 1e-7)
% lambdaconf(1) = fvec(i1);
% else
% break;
% end
% end
% for i1=1:np-2
% if logpt(end-i1) < lpt_max+log(1e-7) %(exp(logpt(end-i1))/exp(lpt_max) < 1e-7)
% lambdaconf(2) = fvec(end-i1);
% else
% break;
% end
% end
% else
% % set the integration limits in easier cases
% np=20;
% if mg>myy_i
% lambdaconf=[myy_i-6*sigm_i,max(mg+6*sg,myy_i+6*sigm_i)];
% fvec=linspace(myy_i,mg,np);
% else
% lambdaconf=[min(mg-6*sg,myy_i-6*sigm_i),myy_i+6*sigm_i];
% fvec=linspace(mg,myy_i,np);
% end
% lpt_max=max(lpt(fvec,0));
% end
% C=log(1)-lpt_max; % scale the log-density for the quadrature tolerance
% else
% lambdaconf=[mg-6*sg,mg+6*sg];
% C=log(1)-lpt(mg,0);
% end
%
% if nu>nu_lim
% % the limiting Gaussian case
% Vz=sigm2_i+sigma2/eta;
% lnZhat = 0.5*(-log(eta) +(1-eta)*log(2*pi*sigma2) -log(2*pi*Vz)) -(0.5/Vz)*(yy-myy_i)^2;
% muhat = mg;
% sigm2hat = Vg;
% else
% % Integrate with quadrature
% RTOL = 1.e-6;
% ATOL = 1.e-7;
% tic
% [m_0, m_1, m_2] = quad_moments(@(f) exp(lpt(f,C)),lambdaconf(1), lambdaconf(2), RTOL, ATOL);toc
% muhat = m_1;
% sigm2hat = m_2 - m_1.^2;
% lnZhat = log(m_0) -C;
% end
function lpdf = lpt(f,C)
% logarithm of the tilted distribution
r = yy-f;
lpdf = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
lpdf = lpdf + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2);
lpdf = lpdf*eta - (0.5/sigm2_i) * (f-myy_i).^2 + (C-log(2*pi*sigm2_i)/2);
end
end
function [g_i] = lik_t_siteDeriv2(likelih, y, yi, sigm2_i, myy_i, z, eta, lnZhat)
%LIKELIH_T_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description
% [M_0, M_1, M2] = LIKELIH_T_TILTEDMOMENTS(LIKELIH, Y, I, S2, MYY)
% takes a likelihood data structure LIKELIH, observations Y, index I
% and cavity variance S2 and mean MYY. Returns E_f [d log
% p(y_i|f_i) /d a], where a is the likelihood parameter and the
% expectation is over the marginal posterior. This term is
% needed when evaluating the gradients of the marginal
% likelihood estimate Z_EP with respect to the likelihood
% parameters (see Seeger (2008): Expectation propagation for
% exponential families). This subfunction is needed when using
% robust-EP for inference with non-Gaussian likelihoods and there
% are likelihood parameters.
%
% See also
% GPEP_G
if nargin<7
eta=1;
end
yy = y(yi);
nu = likelih.nu;
sigma2 = likelih.sigma2;
sigma = sqrt(sigma2);
% limiting distribution (nu -> infinity)
Vg=1/(1/sigm2_i +eta/sigma2);
mg=Vg*(myy_i/sigm2_i +yy*eta/sigma2);
sigm_i=sqrt(sigm2_i);
sg=sqrt(Vg);
% set integration limits and scaling
nu_lim=1e10;
if nu<nu_lim
if sqrt(sigma2/sigm2_i)<0.05
% set the integration limits when the likelihood is very narrow
% grid resolution
dd=10;
df = [12*sigm_i/100 2*dd*sigma/100];
if yy>=myy_i
% grid break points
bp=[min(myy_i-6*sigm_i,yy-dd*sigma) myy_i-6*sigm_i, ...
min(myy_i+6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
max(myy_i+6*sigm_i,yy+dd*sigma)];
% grid values
a=1e-6;
fvec =[ bp(1):df(2):bp(2)-a, bp(2):df(1):bp(3)-a, bp(3):max(df):bp(4)-a, ...
bp(4):df(2):bp(5)-a, bp(5):df(1):bp(6)];
else
% grid break points
bp=[min(myy_i-6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
max(myy_i-6*sigm_i,yy+dd*sigma), myy_i+6*sigm_i, ...
max(myy_i+6*sigm_i,yy+dd*sigma)];
% grid values
a=1e-6;
fvec =[ bp(1):df(1):bp(2)-a, bp(2):df(2):bp(3)-a, bp(3):max(df):bp(4)-a, ...
bp(4):df(1):bp(5)-a, bp(5):df(2):bp(6)];
end
np=numel(fvec);
logpt = lpt(fvec,0);
lpt_max = max([logpt lpt([myy_i mg],0)]);
lambdaconf=[fvec(1), fvec(end)];
for i1=2:np-1
if logpt(i1) < lpt_max+log(1e-7) %(exp(logpt(i1))/exp(lpt_max) < 1e-7)
lambdaconf(1) = fvec(i1);
else
break;
end
end
for i1=1:np-2
if logpt(end-i1) < lpt_max+log(1e-7) %(exp(logpt(end-i1))/exp(lpt_max) < 1e-7)
lambdaconf(2) = fvec(end-i1);
else
break;
end
end
else
% set the integration limits in easier cases
np=20;
if mg>myy_i
lambdaconf=[myy_i-6*sigm_i,max(mg+6*sg,myy_i+6*sigm_i)];
fvec=linspace(myy_i,mg,np);
else
lambdaconf=[min(mg-6*sg,myy_i-6*sigm_i),myy_i+6*sigm_i];
fvec=linspace(mg,myy_i,np);
end
lpt_max=max(lpt(fvec,0));
end
C=log(1)-lpt_max; % scale the log-density for the quadrature tolerance
else
lambdaconf=[mg-6*sg,mg+6*sg];
C=log(1)-lpt(mg,0);
end
if nu>nu_lim
% the limiting normal observation model
Vz=sigm2_i+sigma2/eta;
g_i(1) = 0.5*( (1-eta)/sigma2 -1/Vz/eta + (yy-myy_i)^2 /Vz^2 /eta ) *sigma2/eta;
if (isfield(likelih,'p') && ~isempty(likelih.p.nu))
g_i(2) = 0;
end
else
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1e-7;
% Integrate with quad
%zm=@(f) exp(lpt(f,C));
%[m_0, fhncnt] = quadgk(zm, lambdaconf(1), lambdaconf(2),'AbsTol',ATOL,'RelTol',RTOL)
% Use the normalization determined in the lik_t_tiltedMoments2
m_0=exp(lnZhat+C);
zm=@(f) deriv_sigma2(f).*exp(lpt(f,C))*sigma2;
[g_i(1), fhncnt] = quadgk( zm, lambdaconf(1), lambdaconf(2),'AbsTol',ATOL,'RelTol',RTOL);
g_i(1) = g_i(1)/m_0;
if (isfield(likelih,'p') && ~isempty(likelih.p.nu))
zm=@(f) deriv_nu(f).*exp(lpt(f,C));
[g_i(2), fhncnt] = quadgk( zm, lambdaconf(1), lambdaconf(2),'AbsTol',ATOL,'RelTol',RTOL);
g_i(2) = g_i(2)/m_0.*nu.*log(nu);
end
end
function lpdf = lpt(f,C)
% logarithm of the tilted distribution
r = yy-f;
lpdf = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
lpdf = lpdf + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2);
lpdf = lpdf*eta - (0.5/sigm2_i) * (f-myy_i).^2 + (C-log(2*pi*sigm2_i)/2);
end
function g = deriv_nu(f)
% derivative of the log-likelihood wrt nu
r = yy-f;
temp = r.^2 ./(nu*sigma2);
g = psi((nu+1)/2) - psi(nu/2) - 1/nu;
g = g + (1+1/nu).*temp./(1+temp);
% for small values use a more accurate method for log(1+x)
ii = temp<1e3;
g(ii) = g(ii) - log1p(temp(ii));
g(~ii) = g(~ii) - log(1+temp(~ii));
g = g*0.5;
end
function g = deriv_sigma2(f)
% derivative of the log-likelihood wrt sigma2
r = yy-f;
temp = r.^2 /sigma2;
g = -1/sigma2/2 + ((1+1/nu)/2) * temp ./ (1 + temp/nu) /sigma2;
end
end
function [f, a] = lik_t_optimizef(gp, y, K, Lav, K_fu)
%LIK_T_OPTIMIZEF function to optimize the latent variables
% with EM algorithm
%
% Description:
% [F, A] = LIK_T_OPTIMIZEF(GP, Y, K, Lav, K_fu) Takes Gaussian
% process structure GP, observations Y and the covariance
% matrix K. Solves the posterior mode of F using EM algorithm
% and evaluates A = (K + W)\Y as a sideproduct. Lav and K_fu
% are needed for sparse approximations. For details, see
% Vanhatalo, Jyl�nki and Vehtari (2009): Gaussian process
% regression with Student-t likelihood. This subfunction is
% needed when using lik_specific optimization method for mode
% finding in Laplace algorithm.
%
iter = 1;
sigma2 = gp.lik.sigma2;
% if sigma2==0
% f=NaN;a=NaN;
% return
% end
nu = gp.lik.nu;
n = length(y);
switch gp.type
case 'FULL'
iV = ones(n,1)./sigma2;
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
[L,notpositivedefinite] = chol(B);
if notpositivedefinite
f=NaN;a=NaN;
return
end
B=B';
b = iV.*y;
a = b - siV.*(L'\(L\(siV.*(K*b))));
f = K*a;
while iter < 200
fold = f;
iV = (nu+1) ./ (nu.*sigma2 + (y-f).^2);
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
L = chol(B)';
b = iV.*y;
ws=warning('off','MATLAB:nearlySingularMatrix');
a = b - siV.*(L'\(L\(siV.*(K*b))));
warning(ws);
f = K*a;
if max(abs(f-fold)) < 1e-8
break
end
iter = iter + 1;
end
case 'FIC'
K_uu = K;
Luu = chol(K_uu)';
B=Luu\(K_fu'); % u x f
K = diag(Lav) + B'*B;
iV = ones(n,1)./sigma2;
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
L = chol(B)';
b = iV.*y;
a = b - siV.*(L'\(L\(siV.*(K*b))));
f = K*a;
while iter < 200
fold = f;
iV = (nu+1) ./ (nu.*sigma2 + (y-f).^2);
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
L = chol(B)';
b = iV.*y;
a = b - siV.*(L'\(L\(siV.*(K*b))));
f = K*a;
if max(abs(f-fold)) < 1e-8
break
end
iter = iter + 1;
end
end
end
function upfact = lik_t_upfact(gp, y, mu, ll, z)
nu = gp.lik.nu;
sigma = sqrt(gp.lik.sigma2);
sll = sqrt(ll);
fh_e = @(f) t_pdf(f, nu, y, sigma).*norm_pdf(f, mu, sll);
EE = quadgk(fh_e, -40, 40);
fm = @(f) f.*t_pdf(f, nu, y, sigma).*norm_pdf(f, mu, sll)./EE;
mm = quadgk(fm, -40, 40);
fV = @(f) (f - mm).^2.*t_pdf(f, nu, y, sigma).*norm_pdf(f, mu, sll)./EE;
Varp = quadgk(fV, -40, 40);
upfact = -(Varp - ll)./ll^2;
end
function [lpy, Ey, Vary] = lik_t_predy(lik, Ef, Varf, y, z)
%LIK_T_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_T_PREDY(LIK, EF, VARF YT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the observations YT. This subfunction is
% needed when computing posterior preditive distributions for
% future observations.
%
% [LPY, EY, VARY] = LIK_T_PREDY(LIK, EF, VARF) takes a likelihood
% structure LIK, posterior mean EF and posterior Variance
% VARF of the latent variable and returns the posterior
% predictive mean EY and variance VARY of the observations
% related to the latent variables. This subfunction is needed when
% computing posterior preditive distributions for future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
nu = lik.nu;
sigma2 = lik.sigma2;
sigma = sqrt(sigma2);
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
lpy = zeros(size(Ef));
if nargout > 1
% for i1=1:length(Ef)
% %%% With quadrature
% ci = sqrt(Varf(i1));
%
% F = @(x) x.*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
% Ey(i1) = quadgk(F,Ef(i1)-6*ci,Ef(i1)+6*ci);
%
% F2 = @(x) (nu./(nu-2).*sigma2).*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
% EVary(i1) = quadgk(F2,Ef(i1)-6*ci,Ef(i1)+6*ci);
%
% F3 = @(x) x.^2.*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
% VarEy(i1) = quadgk(F3,Ef(i1)-6*ci,Ef(i1)+6*ci) - Ey(i1).^2;
% end
% Vary = EVary + VarEy;
Ey = Ef;
if nu>2
Vary=nu./(nu-2).*sigma2 +Varf;
else
warning('Variance of Student''s t-distribution is not defined for nu<=2')
Vary=NaN+Varf;
end
end
lpy = zeros(length(y),1);
for i2 = 1:length(y)
mean_app = Ef(i2);
sigm_app = sqrt(Varf(i2));
pd = @(f) t_pdf(y(i2), nu, f, sigma).*norm_pdf(f,Ef(i2),sqrt(Varf(i2)));
lpy(i2) = log(quadgk(pd, mean_app - 12*sigm_app, mean_app + 12*sigm_app));
end
end
function prctys = lik_t_predprcty(lik, Ef, Varf, zt, prcty)
%LIK_T_PREDPRCTY Returns the percentiles of predictive density of y
%
% Description
% PRCTY = LIK_T_PREDPRCTY(LIK, EF, VARF YT, ZT)
% Returns percentiles of the predictive density PY of YT. This
% subfunction is needed when using function gp_predprcty.
%
% See also
% GP_PREDPCTY
opt=optimset('TolX',1e-5,'Display','off');
nt=size(Ef,1);
prctys = zeros(nt,numel(prcty));
prcty=prcty/100;
nu = lik.nu;
nu_p=max(2.5,nu);
sigma2 = lik.sigma2;
Vary=nu_p./(nu_p-2).*sigma2 +Varf;
for i1=1:nt
ci = sqrt(Varf(i1));
for i2=1:numel(prcty)
minf=sqrt(Vary(i1))*tinv(prcty(i2),nu)+(Ef(i1)-2.5*sqrt(Vary(i1)));
maxf=sqrt(Vary(i1))*tinv(prcty(i2),nu)+(Ef(i1)+2.5*sqrt(Vary(i1)));
a=(fminbnd(@(a) (quadgk(@(f) tcdf((a-f)/sqrt(Vary(i1)),nu).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,minf,maxf,opt));
% a=(fminbnd(@(a) (quadgk(@(f) quadgk(@(y) t_pdf(y,nu,Ef(i1),sqrt(Vary(i1))),Ef(i1)-12*sqrt(Vary(i1)),a).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,minf,maxf,opt));
prctys(i1,i2)=a;
close all;
end
end
end
function mu = lik_t_invlink(lik, f, z)
%LIK_T_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_T_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using gp_predprctmu.
%
% See also
% LIK_T_LL, LIK_T_PREDY
mu = f;
end
function reclik = lik_t_recappend(reclik, ri, lik)
%RECAPPEND Record append
% Description
% RECCF = GPCF_SEXP_RECAPPEND(RECCF, RI, GPCF) takes old
% covariance function record RECCF, record index RI, RECAPPEND
% returns a structure RECCF. This subfunction is needed when
% using MCMC sampling (gp_mc).
if nargin == 2
% Initialize the record
reclik.type = 'Student-t';
% Initialize parameters
reclik.nu = [];
reclik.sigma2 = [];
% Set the function handles
reclik.fh.pak = @lik_t_pak;
reclik.fh.unpak = @lik_t_unpak;
reclik.fh.lp = @lik_t_lp;
reclik.fh.lpg = @lik_t_lpg;
reclik.fh.ll = @lik_t_ll;
reclik.fh.llg = @lik_t_llg;
reclik.fh.llg2 = @lik_t_llg2;
reclik.fh.llg3 = @lik_t_llg3;
reclik.fh.tiltedMoments = @lik_t_tiltedMoments;
reclik.fh.tiltedMoments2 = @lik_t_tiltedMoments2;
reclik.fh.siteDeriv = @lik_t_siteDeriv;
reclik.fh.siteDeriv2 = @lik_t_siteDeriv2;
reclik.fh.optimizef = @lik_t_optimizef;
reclik.fh.upfact = @lik_t_upfact;
reclik.fh.invlink = @lik_t_invlink;
reclik.fh.predy = @lik_t_predy;
reclik.fh.predprcty = @lik_t_predprcty;
reclik.fh.recappend = @lik_t_recappend;
reclik.p.nu=[];
if ~isempty(ri.p.nu)
reclik.p.nu = ri.p.nu;
end
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
likp = lik.p;
% record sigma2
reclik.sigma2(ri,:) = lik.sigma2;
if isfield(likp,'sigma2') && ~isempty(likp.sigma2)
reclik.p.sigma2 = likp.sigma2.fh.recappend(reclik.p.sigma2, ri, likp.sigma2);
end
% record nu
reclik.nu(ri,:) = lik.nu;
if isfield(likp,'nu') && ~isempty(likp.nu)
reclik.p.nu = likp.nu.fh.recappend(reclik.p.nu, ri, likp.nu);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_rq.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_rq.m
| 30,341 |
utf_8
|
c746eccbc618b9d29506ad32406416e9
|
function gpcf = gpcf_rq(varargin)
%GPCF_RQ Create a rational quadratic covariance function
%
% Description
% GPCF = GPCF_RQ('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates
% rational quadratic covariance function structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_RQ(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for rational quadratic covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% alpha - shape parameter [20]
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% alpha_prior - prior for alpha [prior_unif]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Tuomas Nikoskinen, Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_RQ';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('alpha',20, @(x) isscalar(x) && x>0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('alpha_prior', prior_unif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_rq';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_rq')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
if init || ~ismember('alpha',ip.UsingDefaults)
gpcf.alpha = ip.Results.alpha;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
if init || ~ismember('alpha_prior',ip.UsingDefaults)
gpcf.p.alpha=ip.Results.alpha_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_rq_pak;
gpcf.fh.unpak = @gpcf_rq_unpak;
gpcf.fh.lp = @gpcf_rq_lp;
gpcf.fh.lpg = @gpcf_rq_lpg;
gpcf.fh.cfg = @gpcf_rq_cfg;
gpcf.fh.ginput = @gpcf_rq_ginput;
gpcf.fh.cov = @gpcf_rq_cov;
gpcf.fh.trcov = @gpcf_rq_trcov;
gpcf.fh.trvar = @gpcf_rq_trvar;
gpcf.fh.recappend = @gpcf_rq_recappend;
end
end
function [w, s] = gpcf_rq_pak(gpcf)
%GPCF_RQ_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_RQ_PAK(GPCF) takes a covariance function structure
% GPCF and combines the covariance function parameters and
% their hyperparameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)
% log(log(gpcf.alpha))
% (hyperparameters of gpcf.alpha)]'
%
% See also
% GPCF_RQ_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(rq.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wm sm] = gpcf.metric.fh.pak(gpcf.metric);
w = [w wm];
s = [s; sm];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(rq.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(rq.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
if ~isempty(gpcf.p.alpha)
w= [w log(log(gpcf.alpha))];
s = [s; 'log(log(rq.alpha))'];
% Hyperparameters of alpha
[wh sh] = gpcf.p.alpha.fh.pak(gpcf.p.alpha);
w = [w wh];
s = [s; sh];
end
end
function [gpcf, w] = gpcf_rq_unpak(gpcf, w)
%GPCF_RQ_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_RQ_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)
% log(log(gpcf.alpha))
% (hyperparameters of gpcf.alpha)]'
%
% See also
% GPCF_RQ_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
if ~isempty(gpp.alpha)
gpcf.alpha = exp(exp(w(1)));
w = w(2:end);
% Hyperparameters of alpha
[p, w] = gpcf.p.alpha.fh.unpak(gpcf.p.alpha, w);
gpcf.p.alpha = p;
end
end
function lp =gpcf_rq_lp(gpcf, x, t)
%GPCF_RQ_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_RQ_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
if ~isempty(gpcf.p.alpha)
lp = lp +gpp.alpha.fh.lp(gpcf.alpha, gpp.alpha) ...
+log(gpcf.alpha) +log(log(gpcf.alpha));
end
end
function lpg = gpcf_rq_lpg(gpcf)
%GPCF_RQ_LPG Evaluate gradient of the log prior with respect
% to the parameters
%
% Description
% LPG = GPCF_RQ_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
if ~isempty(gpcf.p.alpha)
lpgs = gpp.alpha.fh.lpg(gpcf.alpha, gpp.alpha);
lpg = [lpg lpgs(1).*gpcf.alpha.*log(gpcf.alpha)+log(gpcf.alpha)+1 lpgs(2:end)];
end
end
function DKff = gpcf_rq_cfg(gpcf, x, x2, mask, i1)
%GPCF_RQ_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_RQ_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_RQ_CFG(GPCF, X, X2) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2) with
% respect to th (cell array with matrix elements). This subfunction
% is needed when using sparse approximations (e.g. FIC).
%
% DKff = GPCF_RQ_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_RQ_CFG(GPCF, X, X2, [], i) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2), or
% k(X,X) if X2 is empty, with respect to ith hyperparameter. This
% subfunction is needed when using memory save option in gp_set.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LP, GP_G
gpp=gpcf.p;
a=(gpcf.alpha+1)/gpcf.alpha;
i2=1;
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
if ~isempty(gpcf.p.alpha)
i=i+1;
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d alpha
% DKff{3} = d Kff / d lengthscale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% (or loglog gor alpha)
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_rq_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
ma2=gpcf.magnSigma2;
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
% dalpha
ii1=ii1+1;
DKff{ii1} = (ma2.^(1-a).*.5.*dist.^2.*Cdm.^a - gpcf.alpha.*log(Cdm.^(-1/gpcf.alpha)./ma2.^(-1/gpcf.alpha)).*Cdm).*log(gpcf.alpha);
% dlengthscale
for i=1:length(distg)
ii1=ii1+1;
DKff{ii1} = Cdm.*-dist./(1+dist.^2./(2*gpcf.alpha)).*distg{i};
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
i2=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
if i1 > length(gpcf.lengthScale)
i2=1:m;
else
i2=i1;
end
ii1=ii1-1;
end
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% Isotropic = no ARD
s = 1./(gpcf.lengthScale^2);
dist2 = 0;
for i=1:m
dist2 = dist2 + (bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
if ~isempty(gpcf.p.lengthScale) && (~savememory || i1==1)
% dlengthscale
ii1 = ii1+1;
DKff{ii1} = Cdm.^a.*s.*dist2.*gpcf.magnSigma2^(-a+1);
end
if ~isempty(gpcf.p.alpha) && (~savememory || length(DKff) == 1)
% dalpha
ii1=ii1+1;
DKff{ii1} = (ma2^(1-a).*.5.*dist2.*s.*Cdm.^a - gpcf.alpha.*log(Cdm.^(-1/gpcf.alpha)./ma2^(-1/gpcf.alpha)).*Cdm).*log(gpcf.alpha);
end
else
% ARD
s = 1./(gpcf.lengthScale.^2);
D=zeros(size(Cdm));
for i=i2
dist2 =(bsxfun(@minus,x(:,i),x(:,i)')).^2;
% sum distance for the dalpha
D=D+dist2.*s(i);
% dlengthscale
if ~isempty(gpcf.p.lengthScale) && all(i1 <= m)
ii1 = ii1+1;
DKff{ii1}=Cdm.^a.*s(i).*dist2.*gpcf.magnSigma2.^(-a+1);
end
end
if ~isempty(gpcf.p.alpha) && (~savememory || isvector(i2))
% dalpha
ii1=ii1+1;
DKff{ii1} = (ma2^(1-a).*.5.*D.*Cdm.^a - gpcf.alpha.*log(Cdm.^(-1/gpcf.alpha)./ma2^(-1/gpcf.alpha)).*Cdm).*log(gpcf.alpha);
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_rq -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1=ii1+1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -K.*distg{i};
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
i2=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
if i1 > length(gpcf.lengthScale)
i2=1:m;
else
i2=i1;
end
ii1=ii1-1;
end
% Evaluate help matrix for calculations of derivatives with respect to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic EXP
s = 1/gpcf.lengthScale^2;
dist = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
DK_l = s.*K.^a.*dist.*gpcf.magnSigma2^(1-a);
ii1=ii1+1;
DKff{ii1} = DK_l;
if ~isempty(gpcf.p.alpha) && (~savememory || length(DKff) == 1)
% dalpha
ii1=ii1+(1-savememory);
DKff{ii1} = (gpcf.magnSigma2^(1-a).*.5.*dist.*s.*K.^a - gpcf.alpha.*log(K.^(-1/gpcf.alpha)./gpcf.magnSigma2^(-1/gpcf.alpha)).*K).*log(gpcf.alpha);
end
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2; % set the length
D=zeros(size(K));
for i=i2
dist2 =(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
% sum distance for the dalpha
D=D+dist2.*s(i);
if ~isempty(gpcf.p.lengthScale) && all(i1 <= m)
D1 = s(i).*K.^a.*dist2.*gpcf.magnSigma2^(1-a);
ii1=ii1+1;
DKff{ii1} = D1;
end
end
if ~isempty(gpcf.p.alpha) && (~savememory || isvector(i2))
% dalpha
ii1=ii1+1;
DKff{ii1} = (gpcf.magnSigma2^(1-a).*.5.*D.*K.^a - gpcf.alpha.*log(K.^(-1/gpcf.alpha)./gpcf.magnSigma2^(-1/gpcf.alpha)).*K).*log(gpcf.alpha);
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
if isfield(gpcf,'metric')
ii1=1;
[n, m] =size(x);
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1=ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_rq_ginput(gpcf, x, x2, i1)
%GPCF_RQ_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_RQ_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This
% subfunction is needed when computing gradients with respect
% to inducing inputs in sparse approximations.
%
% DKff = GPCF_RQ_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_RQ_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LP, GP_G
a=(gpcf.alpha+1)/gpcf.alpha;
[n, m] =size(x);
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
K = gpcf.fh.trcov(gpcf, x);
ii1 = 0;
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
[gdist, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*gdist{ii1};
gprior(ii1) = gprior_dist(ii1);
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic RQ
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
for i=i1
for j = 1:n
DK = zeros(size(K));
DK(j,:) = -s(i).*bsxfun(@minus,x(j,i),x(:,i)');
DK = DK + DK';
DK = DK.*K.^a.*gpcf.magnSigma2^(1-a);
ii1 = ii1 + 1;
DKff{ii1} = DK;
gprior(ii1) = 0;
end
end
end
elseif nargin == 3 || nargin == 4
[n2, m2] =size(x2);
K = gpcf.fh.cov(gpcf, x, x2);
ii1 = 0;
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
[gdist, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*gdist{ii1};
gprior(ii1) = gprior_dist(ii1);
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic RQ
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
ii1 = 0;
for i=i1
for j = 1:n
DK= zeros(size(K));
DK(j,:) = -s(i).*bsxfun(@minus,x(j,i),x2(:,i)');
DK = DK.*K.^a.*gpcf.magnSigma2^(1-a);
ii1 = ii1 + 1;
DKff{ii1} = DK;
gprior(ii1) = 0;
end
end
end
end
end
function C = gpcf_rq_cov(gpcf, x1, x2)
% GP_RQ_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_RQ_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_RQ_TRCOV, GPCF_RQ_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x1, x2).^2;
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*(1+dist./(2*gpcf.alpha)).^(-gpcf.alpha);
else
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./(2.*gpcf.alpha.*gpcf.lengthScale.^2);
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist=zeros(n1,n2);
for j=1:m1
dist = dist + s2(j).*(bsxfun(@minus,x1(:,j),x2(:,j)')).^2;
end
dist(dist<eps) = 0;
C = ma2.*(1+dist).^(-gpcf.alpha);
end
end
end
function C = gpcf_rq_trcov(gpcf, x)
%GP_RQ_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_RQ_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_RQ_COV, GPCF_RQ_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
ma2 = gpcf.magnSigma2;
C = zeros(n,n);
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:)).^2;
C(col_ind,ii1) = d;
end
C(C<eps) = 0;
C = C+C';
C = ma2.*(1+C./(2*gpcf.alpha)).^(-gpcf.alpha);
else
% If scaled euclidean metric
% Try to use the C-implementation
C=trcov(gpcf, x);
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s2 = 1./(2*gpcf.alpha.*gpcf.lengthScale.^2);
if size(s2)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
C = zeros(n,n);
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
C(col_ind,ii1) = d;
end
C(C<eps) = 0;
C = C+C';
C = ma2.*(1+C).^(-gpcf.alpha);
end
end
end
function C = gpcf_rq_trvar(gpcf, x)
%GP_RQ_TRVAR Evaluate training variance vector
%
% Description
% C = GP_RQ_TRVAR(GPCF, TX) takes in covariance function of a
% Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_RQ_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_rq_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_RQ_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_rq';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
reccf.gpcf.alpha = [];
% Set the function handles
reccf.fh.pak = @gpcf_rq_pak;
reccf.fh.unpak = @gpcf_rq_unpak;
reccf.fh.e = @gpcf_rq_lp;
reccf.fh.g = @gpcf_rq_g;
reccf.fh.cov = @gpcf_rq_cov;
reccf.fh.trcov = @gpcf_rq_trcov;
reccf.fh.trvar = @gpcf_rq_trvar;
reccf.fh.recappend = @gpcf_rq_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if ~isempty(ri.p.alpha)
reccf.p.alpha = ri.p.alpha;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
reccf.alpha(ri,:)=gpcf.alpha;
if isfield(gpp,'alpha') && ~isempty(ri.p.alpha)
reccf.p.alpha = gpp.alpha.fh.recappend(reccf.p.alpha, ri, gpcf.p.alpha);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
lgcp.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lgcp.m
| 7,612 |
utf_8
|
85edf686f047863fe47af6dbc611d045
|
function [l,lq,xt,gp] = lgcp(x,varargin)
% LGCP - Log Gaussian Cox Process intensity estimate for 1D and 2D data
%
% LGCP(X)
% [P,PQ,XT,GP] = LGCP(X,XT,OPTIONS)
%
% X is 1D or 2D point data
% XT is optional test points
% OPTIONS are optional parameter-value pairs
% 'gridn' is optional number of grid points used in each axis direction
% default is 100 for 1D, 15 for grid 2D
% 'range' tells the estimation range, default is data range
% for 1D [XMIN XMAX]
% for 2D [XMIN XMAX YMIN YMAX]
% 'gpcf' is optional function handle of a GPstuff covariance function
% (default is @gpcf_sexp)
% 'latent_method' is optional 'EP' (default) or 'Laplace'
% 'int_method' is optional 'mode' (default), 'CCD' or 'grid'
%
% P is the estimated intensity
% PQ is the 5% and 95% percentiles of the intensity estimate
% XT contains the used test points
% GP is the Gaussian process formed. As the grid is scaled to
% unit range or unit square, additional field 'scale' is
% included which includes the range for the grid in the
% original x space.
% Copyright (c) 2009-2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LGCP';
ip.addRequired('x', @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addOptional('xt',NaN, @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addParamValue('gridn',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('range',[], @(x) isreal(x)&&(length(x)==2||length(x)==4));
ip.addParamValue('gpcf',@gpcf_sexp,@(x) ischar(x) || isa(x,'function_handle'));
ip.addParamValue('latent_method','EP', @(x) ismember(x,{'EP' 'Laplace'}))
ip.addParamValue('int_method','mode', @(x) ismember(x,{'mode' 'CCD', 'grid'}))
ip.addParamValue('normalize',false, @islogical);
ip.parse(x,varargin{:});
x=ip.Results.x;
xt=ip.Results.xt;
gridn=ip.Results.gridn;
xrange=ip.Results.range;
gpcf=ip.Results.gpcf;
latent_method=ip.Results.latent_method;
int_method=ip.Results.int_method;
normalize=ip.Results.normalize;
[n,m]=size(x);
switch m
case 1 % 1D
% Parameters for a grid
if isempty(gridn)
% number of points
gridn=100;
end
xmin=min(x);xmax=max(x);
if ~isempty(xrange)
xmin=min(xmin,xrange(1));
xmax=max(xmax,xrange(2));
end
% Discretize the data
xx=linspace(xmin,xmax,gridn)';
yy=hist(x,xx)';
ye=ones(gridn,1)./gridn.*n;
% Test points
if isnan(xt)
xt=linspace(xmin,xmax,max(gridn,200))';
end
% normalise to unit range, so that same prior is ok for different scales
xxn=(xx-min(xx))./range(xx)-0.5;
xtn=(xt-min(xx))./range(xx)-0.5;
% smooth...
[Ef,Varf,gp]=gpsmooth(xxn,yy,ye,xtn,gpcf,latent_method,int_method);
gp.scale=range(xx);
% compute mean and quantiles
A=range(xx);
lm=exp(Ef+Varf/2)./A.*n;
lq5=exp(Ef-sqrt(Varf)*1.645)./A*n;
lq95=exp(Ef+sqrt(Varf)*1.645)./A*n;
lq=[lq5 lq95];
if nargout<1
% no output, do the plot thing
newplot
hp=patch([xt; xt(end:-1:1)],[lq(:,1); lq(end:-1:1,2)],[.9 .9 .9]);
set(hp,'edgecolor',[.9 .9 .9])
xlim([xmin xmax])
line(xt,lm,'linewidth',2);
else
l=lm;
end
case 2 % 2D
% Find unique points
[xu,I,J]=unique(x,'rows');
% and count number of repeated x's
counts=crosstab(J);
nu=length(xu);
% Parameters for a grid
if isempty(gridn)
% number of points in direction
gridn=15;
end
x1min=min(x(:,1));x1max=max(x(:,1));
x2min=min(x(:,2));x2max=max(x(:,2));
if ~isempty(xrange)
% range extension
x1min=min(x1min,xrange(1));
x1max=max(x1max,xrange(2));
x2min=min(x2min,xrange(3));
x2max=max(x2max,xrange(4));
end
% Form regular grid to discretize the data
zz1=linspace(x1min,x1max,gridn)';
zz2=linspace(x2min,x2max,gridn)';
[z1,z2]=meshgrid(zz1,zz2);
z=[z1(:),z2(:)];
nz=length(z);
% form data for GP (xx,yy,ye)
xx=z;
yy=zeros(nz,1);
zi=interp2(z1,z2,reshape(1:nz,gridn,gridn),xu(:,1),xu(:,2),'nearest');
for i1=1:nu
yy(zi(i1),1)=yy(zi(i1),1)+counts(i1);
end
ye=ones(nz,1)./nz.*n;
% Default test points
if isnan(xt)
[xt1,xt2]=meshgrid(linspace(x1min,x1max,max(100,gridn)),...
linspace(x2min,x2max,max(100,gridn)));
xt=[xt1(:) xt2(:)];
end
% normalise to unit square, so that same prior is ok for different scales
xxn=bsxfun(@rdivide,bsxfun(@minus,xx,min(xx,[],1)),range(xx,1))-.5;
xtn=bsxfun(@rdivide,bsxfun(@minus,xt,min(xx,[],1)),range(xx,1))-.5;
% smooth...
[Ef,Varf,gp]=gpsmooth(xxn,yy,ye,xtn,gpcf,latent_method,int_method);
gp.scale=[range(xx(:,1)) range(xx(:,2))];
% compute mean
A = range(xx(:,1)).*range(xx(:,2));
lm=exp(Ef+Varf/2)./A.*n;
lq5=exp(Ef-sqrt(Varf)*1.645)./A.*n;
lq95=exp(Ef+sqrt(Varf)*1.645)./A.*n;
lq=[lq5 lq95];
if nargout<1
% no output, do the plot thing
G=zeros(size(xt1));
G(:)=lm;
pcolor(xt1,xt2,G);
shading flat
colormap('jet')
cx=caxis;
cx(1)=0;
caxis(cx);
colorbar
else
l=lm;
end
otherwise
error('X has to be Nx1 or Nx2')
end
end
function [Ef,Varf,gp] = gpsmooth(xx,yy,ye,xt,gpcf,latent_method,int_method)
% Make inference with log Gaussian process and EP or Laplace approximation
nin = size(xx,2);
% init gp
if strfind(func2str(gpcf),'ppcs')
% ppcs still have nin parameter...
gpcf1 = gpcf('nin',nin);
else
gpcf1 = gpcf();
end
% default vague prior
pm = prior_sqrtt('s2', 1^2, 'nu', 4);
pl = prior_t('s2', 2^2, 'nu', 4);
%pm = prior_logunif();
%pl = prior_logunif();
pa = prior_t('s2', 10^2, 'nu', 4);
% different covariance functions have different parameters
if isfield(gpcf1,'magnSigma2')
gpcf1 = gpcf(gpcf1, 'magnSigma2', .1, 'magnSigma2_prior', pm);
end
if isfield(gpcf1,'lengthScale')
gpcf1 = gpcf(gpcf1, 'lengthScale', .1, 'lengthScale_prior', pl);
end
if isfield(gpcf1,'alpha')
gpcf1 = gpcf(gpcf1, 'alpha', 20, 'alpha_prior', pa);
end
if isfield(gpcf1,'weightSigma2')
gpcf1 = gpcf(gpcf1, 'weightSigma2_prior', prior_logunif(), 'biasSigma2_prior', prior_logunif());
end
% Create the GP structure
gp = gp_set('lik', lik_poisson(), 'cf', {gpcf1}, 'jitterSigma2', 1e-4);
% Set the approximate inference method
gp = gp_set(gp, 'latent_method', latent_method);
% Optimize hyperparameters
opt=optimset('TolX', 1e-3, 'Display', 'off');
if exist('fminunc')
gp = gp_optim(gp, xx, yy, 'z', ye, 'optimf', @fminunc, 'opt', opt);
else
gp = gp_optim(gp, xx, yy, 'z', ye, 'optimf', @fminlbfgs, 'opt', opt);
end
% Make prediction for the test points
if strcmpi(int_method,'mode')
% point estimate for the hyperparameters
[Ef,Varf] = gp_pred(gp, xx, yy, xt, 'z', ye);
else
% integrate over the hyperparameters
%[~, ~, ~, Ef, Varf] = gp_ia(opt, gp, xx, yy, xt, param);
[notused, notused, notused, Ef, Varf]=...
gp_ia(gp, xx, yy, xt, 'z', ye, 'int_method', int_method);
end
end
|
github
|
lcnbeapp/beapp-master
|
gpep_e.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpep_e.m
| 106,791 |
UNKNOWN
|
adc9d7d1debecd4cbbbb4f948a6cb72b
|
function [e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta] = gpep_e(w, gp, varargin)
%GPEP_E Do Expectation propagation and return marginal log posterior estimate
%
% Description
% E = GPEP_E(W, GP, X, Y, OPTIONS) takes a GP structure GP
% together with a matrix X of input vectors and a matrix Y of
% target vectors, and finds the EP approximation for the
% conditional posterior p(Y | X, th), where th is the
% parameters. Returns the energy at th (see below). Each row of
% X corresponds to one input vector and each row of Y
% corresponds to one target vector.
%
% [E, EDATA, EPRIOR] = GPEP_E(W, GP, X, Y, OPTIONS) returns also
% the data and prior components of the total energy.
%
% The energy is minus log posterior cost function for th:
% E = EDATA + EPRIOR
% = - log p(Y|X, th) - log p(th),
% where th represents the parameters (lengthScale,
% magnSigma2...), X is inputs and Y is observations.
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected
% value for ith case.
%
% See also
% GP_SET, GP_E, GPEP_G, GPEP_PRED
% Description 2
% Additional properties meant only for internal use.
%
% GP = GPEP_E('init', GP) takes a GP structure GP and
% initializes required fields for the EP algorithm.
%
% GPEP_E('clearcache', GP) takes a GP structure GP and cleares
% the internal cache stored in the nested function workspace
%
% [e, edata, eprior, site_tau, site_nu, L, La2, b, muvec_i, sigm2vec_i]
% = GPEP_E(w, gp, x, y, options)
% returns many useful quantities produced by EP algorithm.
%
% Copyright (c) 2007 Jaakko Riihim�ki
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Heikki Peura
% Copyright (c) 2010-2012 Aki Vehtari
% Copyright (c) 2011 Pasi Jyl�nki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% parse inputs
ip=inputParser;
ip.FunctionName = 'GPEP_E';
ip.addRequired('w', @(x) ...
isempty(x) || ...
(ischar(x) && ismember(x, {'init' 'clearcache'})) || ...
(isvector(x) && isreal(x) && all(isfinite(x))) || ...
all(isnan(x)));
ip.addRequired('gp',@isstruct);
ip.addOptional('x', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.addOptional('y', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.parse(w, gp, varargin{:});
x=ip.Results.x;
y=ip.Results.y;
z=ip.Results.z;
if strcmp(w, 'init')
% intialize cache
ch = [];
% return function handle to the nested function ep_algorithm
% this way each gp has its own peristent memory for EP
gp.fh.ne = @ep_algorithm;
% set other function handles
gp.fh.e=@gpep_e;
gp.fh.g=@gpep_g;
gp.fh.pred=@gpep_pred;
gp.fh.jpred=@gpep_jpred;
gp.fh.looe=@gpep_looe;
gp.fh.loog=@gpep_loog;
gp.fh.loopred=@gpep_loopred;
e = gp;
% remove clutter from the nested workspace
clear w gp varargin ip x y z
elseif strcmp(w, 'clearcache')
% clear the cache
gp.fh.ne('clearcache');
else
% call ep_algorithm using the function handle to the nested function
% this way each gp has its own peristent memory for EP
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta] = gp.fh.ne(w, gp, x, y, z);
end
function [e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta] = ep_algorithm(w, gp, x, y, z)
if strcmp(w, 'clearcache')
ch=[];
return
end
switch gp.latent_opt.optim_method
case 'basic-EP'
% check whether saved values can be used
if isempty(z)
datahash=hash_sha512([x y]);
else
datahash=hash_sha512([x y z]);
end
if ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w)<1e-8) && isequal(datahash,ch.datahash)
% The covariance function parameters or data haven't changed
% so we can return the energy and the site parameters that are saved
e = ch.e;
edata = ch.edata;
eprior = ch.eprior;
tautilde = ch.tautilde;
nutilde = ch.nutilde;
L = ch.L;
La2 = ch.La2;
b = ch.b;
muvec_i = ch.muvec_i;
sigm2vec_i = ch.sigm2vec_i;
logZ_i = ch.logZ_i;
eta = ch.eta;
else
% The parameters or data have changed since
% the last call for gpep_e. In this case we need to
% re-evaluate the EP approximation
gp=gp_unpak(gp, w);
ncf = length(gp.cf);
n = size(x,1);
% EP iteration parameters
iter=1;
maxiter = gp.latent_opt.maxiter;
tol = gp.latent_opt.tol;
df = gp.latent_opt.df;
nutilde = zeros(size(y));
tautilde = zeros(size(y));
muvec_i=zeros(size(y));
sigm2vec_i=zeros(size(y));
logZep_old=0; logZep=Inf;
if ~isfield(gp,'meanf')
mf = zeros(size(y));
else
[H,b_m,B_m]=mean_prep(gp,x,[]);
mf = H'*b_m;
end
logM0 = zeros(n,1);
muhat = zeros(n,1);
sigm2hat = zeros(n,1);
% =================================================
% First Evaluate the data contribution to the error
switch gp.type
% ============================================================
% FULL
% ============================================================
case 'FULL' % A full GP
[K,C] = gp_trcov(gp, x);
if ~issparse(C)
% The EP algorithm for full support covariance function
if ~isfield(gp,'meanf')
Sigm = C;
meanfp=false;
else
Sigm = C + H'*B_m*H;
meanfp=true;
end
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.init_prev, 'on') && iter==1 && ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w)<1) && isequal(datahash,ch.datahash)
tautilde=ch.tautilde;
nutilde=ch.nutilde;
else
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% compute marginal and cavity parameters
dSigm=diag(Sigm);
tau=1./dSigm-tautilde;
nu = 1./dSigm.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
[logM0, muhat, sigm2hat] = gp.lik.fh.tiltedMoments(gp.lik, y, 1:n, sigm2vec_i, muvec_i, z);
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df.*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df.*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% Algorithm utilizing Cholesky updates
% This is numerically more stable but slower
% $$$ % approximate cavity parameters
% $$$ S11 = sum(Ls(:,i1).^2);
% $$$ S1 = Ls'*Ls(:,i1);
% $$$ tau_i=S11^-1-tautilde(i1);
% $$$ nu_i=S11^-1*mf(i1)-nutilde(i1);
% $$$
% $$$ mu_i=nu_i/tau_i;
% $$$ sigm2_i=tau_i^-1;
% $$$
% $$$ if sigm2_i < 0
% $$$ [ii i1]
% $$$ end
% $$$
% $$$ % marginal moments
% $$$ [M0(i1), muhat, sigm2hat] = feval(gp.lik.fh.tiltedMoments, gp.lik, y, i1, sigm2_i, mu_i, z);
% $$$
% $$$ % update site parameters
% $$$ deltatautilde = sigm2hat^-1-tau_i-tautilde(i1);
% $$$ tautilde(i1) = tautilde(i1)+deltatautilde;
% $$$ nutilde(i1) = sigm2hat^-1*muhat-nu_i;
% $$$
% $$$ upfact = 1./(deltatautilde^-1+S11);
% $$$ if upfact > 0
% $$$ Ls = cholupdate(Ls, S1.*sqrt(upfact), '-');
% $$$ else
% $$$ Ls = cholupdate(Ls, S1.*sqrt(-upfact));
% $$$ end
% $$$ Sigm = Ls'*Ls;
% $$$ mf=Sigm*nutilde;
% $$$
% $$$ muvec_i(i1,1)=mu_i;
% $$$ sigm2vec_i(i1,1)=sigm2_i;
% Algorithm as in Rasmussen and Williams 2006
% approximate cavity parameters
Sigmi=Sigm(:,i1);
Sigmii=Sigmi(i1);
tau_i=1/Sigmii-tautilde(i1);
nu_i = 1/Sigmii*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=1/tau_i;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
if isnan(logM0(i1))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1)=tautilde(i1)+df*deltatautilde;
deltanutilde=sigm2hat(i1)^-1*muhat(i1)-nu_i-nutilde(i1);
nutilde(i1)=nutilde(i1)+df*deltanutilde;
% Update mean and variance after each site update (standard EP)
ds = deltatautilde/(1+deltatautilde*Sigmii);
Sigm = Sigm - ((ds*Sigmi)*Sigmi');
%Sigm = Sigm - ((ds*Sigm(:,i1))*Sigm(:,i1)');
% The below is how Rasmussen and Williams
% (2006) do the update. The above version is
% more robust.
%ds = deltatautilde^-1+Sigm(i1,i1);
%ds = (Sigm(:,i1)/ds)*Sigm(:,i1)';
%Sigm = Sigm - ds;
%Sigm=Sigm-(deltatautilde^-1+Sigm(i1,i1))^-1*(Sigm(:,i1)*Sigm(:,i1)');
if ~meanfp
mf=Sigm*nutilde;
else
mf=Sigm*(C\(H'*b_m)+nutilde);
end
muvec_i(i1)=mu_i;
sigm2vec_i(i1)=sigm2_i;
end
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
Stilde=tautilde;
Stildesqr=sqrt(Stilde);
if ~meanfp % zero mean function used
% NOTICE! upper triangle matrix! cf. to
% line 13 in the algorithm 3.5, p. 58.
%B=eye(n)+Stildesqr*C*Stildesqr;
B=bsxfun(@times,bsxfun(@times,Stildesqr,C),Stildesqr');
B(1:n+1:end)=B(1:n+1:end)+1;
[L,notpositivedefinite] = chol(B,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
%V=(L\Stildesqr)*C;
V=L\bsxfun(@times,Stildesqr,C);
Sigm=C-V'*V;
mf=Sigm*nutilde;
% Compute the marginal likelihood
% Direct formula (3.65):
% Sigmtilde=diag(1./tautilde);
% mutilde=inv(Stilde)*nutilde;
%
% logZep=-0.5*log(det(Sigmtilde+K))-0.5*mutilde'*inv(K+Sigmtilde)*mutilde+
% sum(log(normcdf(y.*muvec_i./sqrt(1+sigm2vec_i))))+
% 0.5*sum(log(sigm2vec_i+1./tautilde))+
% sum((muvec_i-mutilde).^2./(2*(sigm2vec_i+1./tautilde)))
% 4. term & 1. term
term41=0.5*sum(log(1+tautilde.*sigm2vec_i))-sum(log(diag(L)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
Cnutilde = C*nutilde;
L2 = V*nutilde;
term52 = nutilde'*Cnutilde - L2'*L2 - (nutilde'./(T+Stilde)')*nutilde;
term52 = term52.*0.5;
% 5. term (2/2 element)
term5=0.5*muvec_i'.*(T./(Stilde+T))'*(Stilde.*muvec_i-2*nutilde);
% 3. term
term3 = sum(logM0);
logZep = -(term41+term52+term5+term3);
iter=iter+1;
else
% mean function used
% help variables
hBh = H'*B_m*H;
C_t = C + hBh;
CHb = C\H'*b_m;
S = diag(Stildesqr.^2);
%B = eye(n)+Stildesqroot*C*Stildesqroot;
B=bsxfun(@times,bsxfun(@times,Stildesqr,C),Stildesqr');
B(1:n+1:end)=B(1:n+1:end)+1;
%B_h = eye(n) + Stildesqroot*C_t*Stildesqroot;
B_h=bsxfun(@times,bsxfun(@times,Stildesqr,C_t),Stildesqr');
B_h(1:n+1:end)=B_h(1:n+1:end)+1;
% L to return, without the hBh term
[L,notpositivedefinite]=chol(B,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% L for the calculation with mean term
[L_m,notpositivedefinite]=chol(B_h,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
%V=(L_m\Stildesqroot)*C_t;
V=L_m\bsxfun(@times,Stildesqr,C_t);
Sigm=C_t-V'*V;
mf=Sigm*(CHb+nutilde);
T=1./sigm2vec_i;
Cnutilde = (C_t - S^-1)*(S*H'*b_m-nutilde);
L2 = V*(S*H'*b_m-nutilde);
Stildesqroot = diag(Stildesqr);
zz = Stildesqroot*(L'\(L\(Stildesqroot*C)));
% inv(K + S^-1)*S^-1
Ks = eye(size(zz)) - zz;
% 5. term (1/2 element)
term5_1 = 0.5.*((nutilde'*S^-1)./(T.^-1+Stilde.^-1)')*(S^-1*nutilde);
% 2. term
term2 = 0.5.*((S*H'*b_m-nutilde)'*Cnutilde - L2'*L2);
% 4. term
term4 = 0.5*sum(log(1+tautilde.*sigm2vec_i));
% 1. term
term1 = -1.*sum(log(diag(L_m)));
% 3. term
term3 = sum(logM0);
% 5. term (2/2 element)
term5 = 0.5*muvec_i'.*(T./(Stilde+T))'*(Stilde.*muvec_i-2*nutilde);
logZep = -(term4+term1+term5_1+term5+term2+term3);
iter=iter+1;
end
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
else
% EP algorithm for compactly supported covariance function
% (C is a sparse matrix)
p = analyze(K);
r(p) = 1:n;
if ~isempty(z)
z = z(p,:);
end
y = y(p);
K = K(p,p);
Inn = sparse(1:n,1:n,1,n,n);
sqrtS = sparse(1:n,1:n,0,n,n);
mf = zeros(size(y));
sigm2 = zeros(size(y));
dSigm=full(diag(K));
gamma = zeros(size(y));
VD = sparse(1:n,1:n,1,n,n);
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
sqrtSK = ssmult(sqrtS, K);
tttt = ldlsolve(VD,sqrtSK);
sigm2 = full(diag(K) - sum(sqrtSK.*tttt)');
mf = gamma - tttt'*sqrtS*gamma;
tau=1./sigm2-tautilde;
nu = 1./sigm2.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
gamma = gamma + sum(bsxfun(@times,K,df.*deltanutilde'),2);
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
Ki1 = K(:,i1);
sqrtSKi1 = ssmult(sqrtS, Ki1);
tttt = ldlsolve(VD,sqrtSKi1);
sigm2(i1) = Ki1(i1) - sqrtSKi1'*tttt;
mf(i1) = gamma(i1) - tttt'*sqrtS*gamma;
tau_i=sigm2(i1)^-1-tautilde(i1);
nu_i=sigm2(i1)^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
tautilde_old = tautilde(i1);
deltatautilde=sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1)=tautilde(i1)+df*deltatautilde;
deltanutilde=sigm2hat(i1)^-1*muhat(i1)-nu_i-nutilde(i1);
nutilde(i1)=nutilde(i1)+df*deltanutilde;
gamma = gamma + Ki1.*df*deltanutilde;
% Update the LDL decomposition
sqrtS(i1,i1) = sqrt(tautilde(i1));
sqrtSKi1(i1) = sqrt(tautilde(i1)).*Ki1(i1);
D2_n = sqrtSKi1.*sqrtS(i1,i1) + Inn(:,i1);
if tautilde_old == 0
VD = ldlrowupdate(i1,VD,VD(:,i1),'-');
VD = ldlrowupdate(i1,VD,D2_n,'+');
else
VD = ldlrowmodify(VD, D2_n, i1);
end
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
sqrtS = sparse(1:n,1:n,sqrt(tautilde),n,n);
KsqrtS = ssmult(K,sqrtS);
B = ssmult(sqrtS,KsqrtS) + Inn;
[VD, notpositivedefinite] = ldlchol(B);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
Knutilde = K*nutilde;
mf = Knutilde - KsqrtS*ldlsolve(VD,sqrtS*Knutilde);
% Compute the marginal likelihood
% 4. term & 1. term
term41=0.5*sum(log(1+tautilde.*sigm2vec_i)) - 0.5.*sum(log(diag(VD)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = nutilde'*mf - (nutilde'./(T+tautilde)')*nutilde;
term52 = term52.*0.5;
% 5. term (2/2 element)
term5=0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = sum(logM0);
logZep = -(term41+term52+term5+term3);
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
%[iter-1 max(abs(muhat-mf)./abs(mf)) max(abs(sqrt(sigm2hat)-s)./abs(s)) max(abs(logM0_old-logM0)) abs(logZep_old-logZep)]
%[iter-1 max(abs(muhat-mf)./abs(mf)) max(abs(logM0_old-logM0)) abs(logZep_old-logZep)]
end
% Reorder all the returned and stored values
B = B(r,r);
nutilde = nutilde(r);
tautilde = tautilde(r);
muvec_i = muvec_i(r);
sigm2vec_i = sigm2vec_i(r);
logM0 = logM0(r);
mf = mf(r);
y = y(r);
if ~isempty(z)
z = z(r,:);
end
[L, notpositivedefinite] = ldlchol(B);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
end
edata = logZep;
% Set something into La2
La2 = B;
b = 0;
% ============================================================
% FIC
% ============================================================
case 'FIC'
u = gp.X_u;
m = size(u,1);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
% iLaKfu = diag(iLav)*K_fu = inv(La)*K_fu
% First some helper parameters
iLaKfu = zeros(size(K_fu)); % f x u,
for i=1:n
iLaKfu(i,:) = K_fu(i,:)./Lav(i); % f x u
end
A = K_uu+K_fu'*iLaKfu; A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
Lahat = 1./Lav;
I = eye(size(K_uu));
[R0, notpositivedefinite] = chol(inv(K_uu));
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
R = R0;
P = K_fu;
mf = zeros(size(y));
eta = zeros(size(y));
gamma = zeros(size(K_uu,1),1);
D_vec = Lav;
Ann=0;
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
Ann = D_vec+sum((P*R').^2,2);
mf = eta + sum(bsxfun(@times,P,gamma'),2);
tau = 1./Ann-tautilde;
nu = 1./Ann.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
pn = P(i1,:)';
Ann = D_vec(i1) + sum((R*pn).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = eta(i1) + pn'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1)+df*deltanutilde;
% Update the parameters
dn = D_vec(i1);
D_vec(i1) = D_vec(i1) - deltatautilde.*D_vec(i1).^2 ./ (1+deltatautilde.*D_vec(i1));
P(i1,:) = pn' - (deltatautilde.*dn ./ (1+deltatautilde.*dn)).*pn';
updfact = deltatautilde./(1 + deltatautilde.*Ann);
if updfact > 0
RtRpnU = R'*(R*pn).*sqrt(updfact);
R = cholupdate(R, RtRpnU, '-');
elseif updfact < 0
RtRpnU = R'*(R*pn).*sqrt(abs(updfact));
R = cholupdate(R, RtRpnU, '+');
end
eta(i1) = eta(i1) + (deltanutilde - deltatautilde.*eta(i1)).*dn./(1+deltatautilde.*dn);
gamma = gamma + (deltanutilde - deltatautilde.*mf(i1))./(1+deltatautilde.*dn) * R'*(R*pn);
% mf = eta + P*gamma;
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
temp1 = (1+Lav.*tautilde).^(-1);
D_vec = temp1.*Lav;
R0P0t = R0*K_fu';
temp2 = zeros(size(R0P0t));
% for i2 = 1:length(temp1)
% P(i2,:) = temp1(i2).*K_fu(i2,:);
% temp2(:,i2) = R0P0t(:,i2).*tautilde(i2).*temp1(i2);
% end
% R = chol(inv(eye(size(R0)) + temp2*R0P0t')) * R0;
P=bsxfun(@times,temp1,K_fu);
temp2=bsxfun(@times,(tautilde.*temp1)',R0P0t);
temp2=temp2*R0P0t';
temp2(1:m+1:end)=temp2(1:m+1:end)+1;
R = chol(inv(temp2)) * R0;
eta = D_vec.*nutilde;
gamma = R'*(R*(P'*nutilde));
mf = eta + P*gamma;
% Compute the marginal likelihood, see FULL model for
% details about equations
Lahat = 1./Lav + tautilde;
Lhat = bsxfun(@rdivide,L,Lahat);
H = I-L'*Lhat;
B = H\L';
Bhat = B./repmat(Lahat',m,1);
% 4. term & 1. term
Stildesqroot=sqrt(tautilde);
D = Stildesqroot.*Lav.*Stildesqroot + 1;
SsqrtKfu = K_fu.*repmat(Stildesqroot,1,m);
AA = K_uu + (SsqrtKfu'./repmat(D',m,1))*SsqrtKfu; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = - 0.5*sum(log(1+tautilde.*sigm2vec_i)) - sum(log(diag(Luu))) + sum(log(diag(AA))) + 0.5.*sum(log(D));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = -0.5*( (nutilde./Lahat)'*nutilde + (nutilde'*Lhat)*(Bhat*nutilde) - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
%L = iLaKfu;
% b' = (La + Kfu*iKuu*Kuf + 1./S)*1./S * nutilde
% = (S - S * (iLa - L*L' + S)^(-1) * S) * 1./S
% = I - S * (Lahat - L*L')^(-1)
% L = S*Kfu * (Lav + 1./S)^(-1) / chol(K_uu + SsqrtKfu'*(Lav + 1./S)^(-1)*SsqrtKfu)
% La2 = D./S = Lav + 1./S,
%
% The way evaluations are done is numerically more stable
% See equations (3.71) and (3.72) in Rasmussen and Williams (2006)
b = nutilde'.*(1 - Stildesqroot./Lahat.*Stildesqroot)' - (nutilde'*Lhat)*Bhat.*tautilde'; % part of eq. (3.71)
L = ((repmat(Stildesqroot,1,m).*SsqrtKfu)./repmat(D',m,1)')/AA'; % part of eq. (3.72)
La2 = 1./(Stildesqroot./D.*Stildesqroot); % part of eq. (3.72)
D = D_vec;
% ============================================================
% PIC
% ============================================================
case {'PIC' 'PIC_BLOCK'}
ind = gp.tr_index;
u = gp.X_u;
m = length(u);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
% First some helper parameters
iLaKfu = zeros(size(K_fu)); % f x u
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
Labl{i} = Cbl_ff - Qbl_ff;
[Llabl, notpositivedefinite] = chol(Labl{i});
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
iLaKfu(ind{i},:) = Llabl\(Llabl'\K_fu(ind{i},:));
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
I = eye(size(K_uu));
[R0, notpositivedefinite] = chol(inv(K_uu));
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
R = R0;
P = K_fu;
R0P0t = R0*K_fu';
mf = zeros(size(y));
eta = zeros(size(y));
gamma = zeros(size(K_uu,1),1);
D = Labl;
Ann=0;
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
for bl=1:length(ind)
bl_ind = ind{bl};
Pbl=P(bl_ind,:);
Ann = diag(D{bl}) +sum((Pbl*R').^2,2);
tau(bl_ind,1) = 1./Ann-tautilde(bl_ind);
mf(bl_ind,1) = eta(bl_ind) + sum(bsxfun(@times,Pbl,gamma'),2);
nu(bl_ind,1) = 1./Ann.*mf(bl_ind)-nutilde(bl_ind);
end
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde = 1./sigm2hat-tau-tautilde;
tautilde = tautilde+df*deltatautilde;
deltanutilde = 1./sigm2hat.*muhat-nu-nutilde;
nutilde = nutilde+df*deltanutilde;;
else
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for bl=1:length(ind)
bl_ind = ind{bl};
for in=1:length(bl_ind)
i1 = bl_ind(in);
% approximate cavity parameters
Dbl = D{bl}; dn = Dbl(in,in); pn = P(i1,:)';
Ann = dn + sum((R*pn).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = eta(i1) + pn'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1) + df*deltanutilde;
% Update the parameters
Dblin = Dbl(:,in);
Dbl = Dbl - deltatautilde ./ (1+deltatautilde.*dn) * Dblin*Dblin';
%Dbl = inv(inv(Dbl) + diag(tautilde(bl_ind)));
P(bl_ind,:) = P(bl_ind,:) - ((deltatautilde ./ (1+deltatautilde.*dn)).* Dblin)*pn';
updfact = deltatautilde./(1 + deltatautilde.*Ann);
if updfact > 0
RtRpnU = R'*(R*pn).*sqrt(updfact);
R = cholupdate(R, RtRpnU, '-');
elseif updfact < 0
RtRpnU = R'*(R*pn).*sqrt(abs(updfact));
R = cholupdate(R, RtRpnU, '+');
end
eta(bl_ind) = eta(bl_ind) + (deltanutilde - deltatautilde.*eta(i1))./(1+deltatautilde.*dn).*Dblin;
gamma = gamma + (deltanutilde - deltatautilde.*mf(i1))./(1+deltatautilde.*dn) * (R'*(R*pn));
%mf = eta + P*gamma;
D{bl} = Dbl;
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
temp2 = zeros(size(R0P0t));
Stildesqroot=sqrt(tautilde);
for i=1:length(ind)
sdtautilde = diag(Stildesqroot(ind{i}));
Dhat = sdtautilde*Labl{i}*sdtautilde + eye(size(Labl{i}));
[Ldhat{i}, notpositivedefinite] = chol(Dhat);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
D{i} = Labl{i} - Labl{i}*sdtautilde*(Ldhat{i}\(Ldhat{i}'\sdtautilde*Labl{i}));
P(ind{i},:) = D{i}*(Labl{i}\K_fu(ind{i},:));
temp2(:,ind{i}) = R0P0t(:,ind{i})*sdtautilde/Dhat*sdtautilde;
eta(ind{i}) = D{i}*nutilde(ind{i});
end
R = chol(inv(eye(size(R0)) + temp2*R0P0t')) * R0;
gamma = R'*(R*(P'*nutilde));
mf = eta + P*gamma;
% Compute the marginal likelihood, see FULL model for
% details about equations
%
% First some helper parameters
for i = 1:length(ind)
Lhat(ind{i},:) = D{i}*L(ind{i},:);
end
H = I-L'*Lhat;
B = H\L';
% Compute the marginal likelihood, see FULL model for
% details about equations
term41 = 0; term52 = 0;
for i=1:length(ind)
Bhat(:,ind{i}) = B(:,ind{i})*D{i};
SsqrtKfu(ind{i},:) = bsxfun(@times,K_fu(ind{i},:),Stildesqroot(ind{i}));
%SsqrtKfu(ind{i},:) = gtimes(K_fu(ind{i},:),Stildesqroot(ind{i}));
iDSsqrtKfu(ind{i},:) = Ldhat{i}\(Ldhat{i}'\SsqrtKfu(ind{i},:));
term41 = term41 + sum(log(diag(Ldhat{i})));
term52 = term52 + nutilde(ind{i})'*(D{i}*nutilde(ind{i}));
end
AA = K_uu + SsqrtKfu'*iDSsqrtKfu; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = term41 - 0.5*sum(log(1+tautilde.*sigm2vec_i)) - sum(log(diag(Luu))) + sum(log(diag(AA)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = -0.5*( term52 + (nutilde'*Lhat)*(Bhat*nutilde) - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
b = zeros(1,n);
for i=1:length(ind)
b(ind{i}) = nutilde(ind{i})'*D{i};
La2{i} = inv(diag(Stildesqroot(ind{i}))*(Ldhat{i}\(Ldhat{i}'\diag(Stildesqroot(ind{i})))));
end
b = nutilde' - ((b + (nutilde'*Lhat)*Bhat).*tautilde');
L = (repmat(Stildesqroot,1,m).*iDSsqrtKfu)/AA';
% ============================================================
% CS+FIC
% ============================================================
case 'CS+FIC'
u = gp.X_u;
m = length(u);
cf_orig = gp.cf;
cf1 = {};
cf2 = {};
j = 1;
k = 1;
for i = 1:ncf
if ~isfield(gp.cf{i},'cs')
cf1{j} = gp.cf{i};
j = j + 1;
else
cf2{k} = gp.cf{i};
k = k + 1;
end
end
gp.cf = cf1;
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
gp.cf = cf2;
K_cs = gp_trcov(gp,x);
La = sparse(1:n,1:n,Lav,n,n) + K_cs;
gp.cf = cf_orig;
% clear unnecessary variables
clear K_cs; clear Qv_ff; clear Kv_ff; clear Cv_ff; clear Lav;
% Find fill reducing permutation and permute all the
% matrices
p = analyze(La);
r(p) = 1:n;
if ~isempty(z)
z = z(p,:);
end
y = y(p);
La = La(p,p);
K_fu = K_fu(p,:);
[VD, notpositivedefinite] = ldlchol(La);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
iLaKfu = ldlsolve(VD,K_fu);
A = K_uu+K_fu'*iLaKfu; A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
I = eye(size(K_uu));
Inn = sparse(1:n,1:n,1,n,n);
sqrtS = sparse(1:n,1:n,0,n,n);
[R0, notpositivedefinite] = chol(inv(K_uu));
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
R = R0;
P = K_fu;
R0P0t = R0*K_fu';
mf = zeros(size(y));
eta = zeros(size(y));
gamma = zeros(size(K_uu,1),1);
Ann=0;
LasqrtS = La*sqrtS;
[VD, notpositivedefinite] = ldlchol(Inn);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
tttt = ldlsolve(VD,ssmult(sqrtS,La));
D_vec = full(diag(La) - sum(LasqrtS'.*tttt)');
Ann = D_vec+sum((P*R').^2,2);
mf = eta + sum(bsxfun(@times,P,gamma'),2);
tau = 1./Ann-tautilde;
nu = 1./Ann.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i= 1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
tttt = ldlsolve(VD,ssmult(sqrtS,La(:,i1)));
Di1 = La(:,i1) - ssmult(LasqrtS,tttt);
dn = Di1(i1);
pn = P(i1,:)';
Ann = dn + sum((R*pn).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = eta(i1) + pn'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i= tau_i^-1; % 1./tau_i; %
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1) + df*deltanutilde;
% Update the parameters
P = P - ((deltatautilde ./ (1+deltatautilde.*dn)).* Di1)*pn';
updfact = deltatautilde./(1 + deltatautilde.*Ann);
if updfact > 0
RtRpnU = R'*(R*pn).*sqrt(updfact);
R = cholupdate(R, RtRpnU, '-');
elseif updfact < 0
RtRpnU = R'*(R*pn).*sqrt(abs(updfact));
R = cholupdate(R, RtRpnU, '+');
end
eta = eta + (deltanutilde - deltatautilde.*eta(i1))./(1+deltatautilde.*dn).*Di1;
gamma = gamma + (deltanutilde - deltatautilde.*mf(i1))./(1+deltatautilde.*dn) * (R'*(R*pn));
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
D2_o = ssmult(sqrtS,LasqrtS(:,i1)) + Inn(:,i1);
sqrtS(i1,i1) = sqrt(tautilde(i1));
LasqrtS(:,i1) = La(:,i1).*sqrtS(i1,i1);
D2_n = ssmult(sqrtS,LasqrtS(:,i1)) + Inn(:,i1);
if tautilde(i1) - deltatautilde == 0
VD = ldlrowupdate(i1,VD,VD(:,i1),'-');
VD = ldlrowupdate(i1,VD,D2_n,'+');
else
VD = ldlrowmodify(VD, D2_n, i1);
end
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
sqrtS = sparse(1:n,1:n,sqrt(tautilde),n,n);
sqrtSLa = ssmult(sqrtS,La);
D2 = ssmult(sqrtSLa,sqrtS) + Inn;
LasqrtS = ssmult(La,sqrtS);
[VD, notpositivedefinite] = ldlchol(D2);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
SsqrtKfu = sqrtS*K_fu;
iDSsqrtKfu = ldlsolve(VD,SsqrtKfu);
P = K_fu - sqrtSLa'*iDSsqrtKfu;
R = chol(inv( eye(size(R0)) + R0P0t*sqrtS*ldlsolve(VD,sqrtS*R0P0t'))) * R0;
eta = La*nutilde - sqrtSLa'*ldlsolve(VD,sqrtSLa*nutilde);
gamma = R'*(R*(P'*nutilde));
mf = eta + P*gamma;
% Compute the marginal likelihood,
Lhat = La*L - sqrtSLa'*ldlsolve(VD,sqrtSLa*L);
H = I-L'*Lhat;
B = H\L';
Bhat = B*La - ldlsolve(VD,sqrtSLa*B')'*sqrtSLa;
% 4. term & 1. term
AA = K_uu + SsqrtKfu'*iDSsqrtKfu; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = - 0.5*sum(log(1+tautilde.*sigm2vec_i)) - sum(log(diag(Luu))) + sum(log(diag(AA))) + 0.5*sum(log(diag(VD)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = -0.5*( nutilde'*(eta) + (nutilde'*Lhat)*(Bhat*nutilde) - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
% b' = (K_fu/K_uu*K_fu' + La + diag(1./tautilde)) \ (tautilde.\nutilde)
% L = S*Kfu * (Lav + 1./S)^(-1) / chol(K_uu + SsqrtKfu'*(Lav + 1./S)^(-1)*SsqrtKfu)
% La2 = D./S = Lav + 1./S,
%
% The way evaluations are done is numerically more stable than with inversion of S (tautilde)
% See equations (3.71) and (3.72) in Rasmussen and Williams (2006)
b = nutilde' - ((eta' + (nutilde'*Lhat)*Bhat).*tautilde');
L = (sqrtS*iDSsqrtKfu)/AA';
La2 = sqrtS\D2/sqrtS;
% Reorder all the returned and stored values
b = b(r);
L = L(r,:);
La2 = La2(r,r);
D = La(r,r);
nutilde = nutilde(r);
tautilde = tautilde(r);
logM0 = logM0(r);
muvec_i = muvec_i(r);
sigm2vec_i = sigm2vec_i(r);
mf = mf(r);
P = P(r,:);
y = y(r);
if ~isempty(z)
z = z(r,:);
end
% ============================================================
% DTC,VAR
% ============================================================
case {'DTC' 'VAR' 'SOR'}
% First evaluate needed covariance matrices
% v defines that parameter is a vector
u = gp.X_u;
m = size(u,1);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
Phi = B';
m = size(Phi,2);
R = eye(m,m);
P = Phi;
mf = zeros(size(y));
gamma = zeros(m,1);
Ann=0;
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
Ann = sum((P*R').^2,2);
mf = sum(bsxfun(@times,Phi,gamma'),2);%phi'*gamma;
tau = 1./Ann-tautilde;
nu = 1./Ann.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i= 1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
phi = Phi(i1,:)';
Ann = sum((R*phi).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = phi'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1) + df*deltanutilde;
% Update the parameters
lnn = sum((R*phi).^2);
updfact = deltatautilde/(1 + deltatautilde*lnn);
if updfact > 0
RtLphiU = R'*(R*phi).*sqrt(updfact);
R = cholupdate(R, RtLphiU, '-');
elseif updfact < 0
RtLphiU = R'*(R*phi).*sqrt(updfact);
R = cholupdate(R, RtLphiU, '+');
end
gamma = gamma - R'*(R*phi)*(deltatautilde*mf(i1)-deltanutilde);
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
R = chol(inv(eye(m,m) + Phi'*(repmat(tautilde,1,m).*Phi)));
gamma = R'*(R*(Phi'*nutilde));
mf = Phi*gamma;
% Compute the marginal likelihood, see FULL model for
% details about equations
% 4. term & 1. term
Stildesqroot=sqrt(tautilde);
SsqrtPhi = Phi.*repmat(Stildesqroot,1,m);
AA = eye(m,m) + SsqrtPhi'*SsqrtPhi; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = - 0.5*sum(log(1+tautilde.*sigm2vec_i)) + sum(log(diag(AA)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
bb = nutilde'*Phi;
bb2 = bb*SsqrtPhi';
bb3 = bb2*SsqrtPhi/AA';
term52 = -0.5*( bb*bb' - bb2*bb2' + bb3*bb3' - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
%L = iLaKfu;
if strcmp(gp.type,'VAR')
Qv_ff = sum(B.^2)';
edata = edata + 0.5*sum((Kv_ff-Qv_ff).*tautilde);
end
temp = Phi*(SsqrtPhi'*(SsqrtPhi*bb'));
% b = Phi*bb' - temp + Phi*(SsqrtPhi'*(SsqrtPhi*(AA'\(AA\temp))));
b = nutilde - bb2'.*Stildesqroot + repmat(tautilde,1,m).*Phi*(AA'\bb3');
b = b';
% StildeKfu = zeros(size(K_fu)); % f x u,
% for i=1:n
% StildeKfu(i,:) = K_fu(i,:).*tautilde(i); % f x u
% end
% A = K_uu+K_fu'*StildeKfu; A = (A+A')./2; % Ensure symmetry
% A = chol(A);
% L = StildeKfu/A;
L = repmat(tautilde,1,m).*Phi/AA';
%L = repmat(tautilde,1,m).*K_fu/AA';
mu=nutilde./tautilde;
%b = nutilde - mu'*L*L'*mu;
%b=b';
La2 = 1./tautilde;
D = 0;
otherwise
error('Unknown type of Gaussian process!')
end
% ==================================================
% Evaluate the prior contribution to the error from
% covariance functions and likelihood
% ==================================================
% Evaluate the prior contribution to the error from covariance
% functions
eprior = 0;
for i=1:ncf
gpcf = gp.cf{i};
eprior = eprior - gpcf.fh.lp(gpcf);
end
% Evaluate the prior contribution to the error from likelihood
% functions
if isfield(gp.lik, 'p')
lik = gp.lik;
eprior = eprior - lik.fh.lp(lik);
end
% The last things to do
if isfield(gp.latent_opt, 'display') && ismember(gp.latent_opt.display,{'final','iter'})
fprintf('GPEP_E: Number of iterations in EP: %d \n', iter-1)
end
e = edata + eprior;
logZ_i = logM0(:);
eta = [];
% store values to the cache
ch.w = w;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.tautilde = tautilde;
ch.nutilde = nutilde;
ch.L = L;
ch.La2 = La2;
ch.b = b;
ch.muvec_i = muvec_i;
ch.sigm2vec_i = sigm2vec_i;
ch.logZ_i = logZ_i;
ch.eta = eta;
ch.datahash=datahash;
global iter_lkm
iter_lkm=iter;
end
case 'robust-EP'
% function [e,edata,eprior,tau_q,nu_q,L, La2, b, muvec_i,sigm2vec_i,Z_i, eta] = ep_algorithm2(w,gp,x,y,z)
%
% if strcmp(w, 'clearcache')
% ch=[];
% return
% end
% check whether saved values can be used
if isempty(z)
datahash=hash_sha512([x y]);
else
datahash=hash_sha512([x y z]);
end
if ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w) < 1e-8) && isequal(datahash,ch.datahash)
% The covariance function parameters haven't changed so just
% return the Energy and the site parameters that are saved
e = ch.e;
edata = ch.edata;
eprior = ch.eprior;
L = ch.L;
La2 = ch.La2;
b = ch.b;
nutilde = ch.nu_q;
tautilde = ch.tau_q;
eta = ch.eta;
muvec_i = ch.muvec_i;
sigm2vec_i = ch.sigm2vec_i;
logZ_i = ch.logZ_i;
else
% The parameters or data have changed since
% the last call for gpep_e. In this case we need to
% re-evaluate the EP approximation
% preparations
ninit=gp.latent_opt.ninit; % max number of initial parallel iterations
maxiter=gp.latent_opt.maxiter; % max number of double-loop iterations
max_ninner=gp.latent_opt.max_ninner; % max number of inner loop iterations in the double-loop algorithm
tolStop=gp.latent_opt.tolStop; % converge tolerance
tolUpdate=gp.latent_opt.tolUpdate; % tolerance for the EP site updates
tolInner=gp.latent_opt.tolInner; % inner loop energy tolerance
tolGrad=gp.latent_opt.tolGrad; % minimum gradient (g) decrease in the search direction, abs(g_new)<tolGrad*abs(g)
Vc_lim=gp.latent_opt.cavity_var_lim; % limit for the cavity variance Vc, Vc < Vc_lim*diag(K)
df0=gp.latent_opt.df; % the intial damping factor
eta1=gp.latent_opt.eta; % the initial fraction parameter
eta2=gp.latent_opt.eta2; % the secondary fraction parameter
display=gp.latent_opt.display; % control the display
gp=gp_unpak(gp,w);
likelih=gp.lik;
ncf = length(gp.cf);
n=length(y);
pvis=0;
eta=repmat(eta1,n,1); % the initial vector of fraction parameters
fh_tm=@(si,m_c,V_c,eta) likelih.fh.tiltedMoments2(likelih,y,si,V_c,m_c,z,eta);
switch gp.type
case 'FULL'
% prior covariance
K = gp_trcov(gp, x);
case 'FIC'
% Sparse
u = gp.X_u;
m = size(u,1);
K_uu = gp_trcov(gp,u);
K_uu = (K_uu + K_uu')./2;
K_fu = gp_cov(gp,x,u);
[Kv_ff, Cv_ff] = gp_trvar(gp,x);
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Sf = [];
Sf2 = [];
L2 = [];
end
% prior (zero) initialization
[nu_q,tau_q]=deal(zeros(n,1));
% initialize the q-distribution (the multivariate Gaussian posterior approximation)
switch gp.type
case 'FULL'
[mf,Sf,lnZ_q]=evaluate_q(nu_q,tau_q,K,display);
Vf = diag(Sf);
case 'FIC'
[mf,Vf,lnZ_q]=evaluate_q2(nu_q,tau_q,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
% initialize the surrogate distribution (the independent Gaussian marginal approximations)
nu_s=mf./Vf;
tau_s=1./Vf;
lnZ_s=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta ); % minus 0.5*log(2*pi)./eta
% initialize r-distribution (the tilted distributions)
[lnZ_r,lnZ_i,m_r,V_r]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
% initial energy (lnZ_ep)
e = lnZ_q + lnZ_r -lnZ_s;
if ismember(display,{'final','iter'})
fprintf('\nInitial energy: e=%.4f, hyperparameters:\n',e)
fprintf('Cov:%s \n',sprintf(' %.2g,',gp_pak(gp,'covariance')))
fprintf('Lik:%s \n',sprintf(' %.2g,',gp_pak(gp,'likelihood')))
end
if isfinite(e) % do not run the algorithm if the prior energy is not defined
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% initialize with ninit rounds of parallel EP
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% EP search direction
up_mode='ep'; % choose the moment matching
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
convergence=false; % convergence indicator
df=df0; % initial damping factor
tol_m=zeros(1,2); % absolute moment tolerances
switch gp.type
case 'FULL'
tauc_min=1./(Vc_lim*diag(K)); % minimum cavity precision
case 'FIC'
tauc_min=1./(Vc_lim*Cv_ff);
end
% Adjust damping by setting an upper limit (Vf_mult) to the increase
% of the marginal variance
Vf_mult=2;
i1=0;
while i1<ninit
i1=i1+1;
%%%%%%%%%%%%%%%%%%%
% the damped update
dfi=df(ones(n,1));
temp=(1/Vf_mult-1)./Vf;
ii2=df*dtau_q<temp;
if any(ii2)
dfi(ii2)=temp(ii2)./dtau_q(ii2);
end
% proposal site parameters
nu_q2=nu_q+dfi.*dnu_q;
tau_q2=tau_q+dfi.*dtau_q;
%%%%%%%%%%%%%%%%%%%%%%%%%%%
% a proposal q-distribution
switch gp.type
case 'FULL'
[mf2,Sf2,lnZ_q2,L1,L2]=evaluate_q(nu_q2,tau_q2,K,display);
Vf2 = diag(Sf2);
case 'FIC'
[mf2,Vf2,lnZ_q2,L1,L2]=evaluate_q2(nu_q2,tau_q2,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
% check that the new cavity variances do not exceed the limit
tau_s2=1./Vf2;
pcavity=all( (tau_s2-eta.*tau_q2 )>=tauc_min);
if isempty(L2) || ~pcavity
% In case of too small cavity precisions, half the step size
df=df*0.5;
if df<0.1,
% If mediocre damping is not sufficient, proceed to
% the double-loop algorithm
break
else
if ismember(display,{'iter'})
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, increasing damping to df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
continue
end
end
% a proposal surrogate distribution
nu_s2=mf2./Vf2;
lnZ_s2=0.5*sum( (-log(tau_s2) +nu_s2.^2 ./tau_s2)./eta );
%%%%%%%%%%%%%%%%%%%%%%%%%%%
% a proposal r-distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2,p]=evaluate_r(nu_q2,tau_q2,eta,fh_tm,nu_s2,tau_s2,display);
% the new energy
e2 = lnZ_q2 + lnZ_r2 -lnZ_s2;
% check that the energy is defined and that the tilted moments are proper
if ~all(p) || ~isfinite(e2)
df=df*0.5;
if df<0.1,
break
else
if ismember(display,{'iter'})
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, increasing damping to df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
continue
end
end
% accept the new state
[nu_q,tau_q,mf,Vf,Sf,lnZ_q]=deal(nu_q2,tau_q2,mf2,Vf2,Sf2,lnZ_q2);
[lnZ_r,lnZ_i,m_r,V_r,lnZ_s,nu_s,tau_s]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,lnZ_s2,nu_s2,tau_s2);
% EP search direction (moment matching)
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
% Check for convergence
% the difference between the marginal moments
% Vf=diag(Sf);
tol_m=[abs(mf-m_r) abs(Vf-V_r)];
% measure the convergence by the moment difference
convergence=all(tol_m(:,1)<tolStop*abs(mf)) && all(tol_m(:,2)<tolStop*abs(Vf));
% measure the convergence by the change of energy
%convergence=abs(e2-e)<tolStop;
tol_m=max(tol_m);
e=e2;
if ismember(display,{'iter'})
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
if convergence
if ismember(display,{'final','iter'})
fprintf('Convergence with parallel EP, iter %d, e=%.6f, dm=%.4f, dV=%.4f, df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
break
end
end
end % end of initial rounds of parallel EP
if isfinite(e) && ~convergence
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% if no convergence with the parallel EP
% start double-loop iterations
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
up_mode=gp.latent_opt.up_mode; % update mode in double-loop iterations
%up_mode='ep'; % choose the moment matching
%up_mode='grad'; % choose the gradients
df_lim=gp.latent_opt.df_lim; % step size limit (1 suitable for ep updates)
tol_e=inf; % the energy difference for measuring convergence (tol_e < tolStop)
ninner=0; % counter for the inner loop iterations
df=df0; % initial step size (damping factor)
% the intial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
sdir_reset=false;
rec_sadj=[0 e g]; % record for step size adjustment
for i1=1:maxiter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% calculate a new proposal state
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Limit the step size separately for each site so that the cavity variances
% do not exceed the upper limit (this will change the search direction)
% this should not happen after step size adjustment
ii1=tau_s-eta.*(tau_q+df*dtau_q)<tauc_min;
if any(ii1)
%ii1=dtau_q>0; df1=min( ( (tau_s(ii1)-tauc_min(ii1))./eta(ii1)-tau_q(ii1) )./dtau_q(ii1)/df ,1);
df1=( (tau_s(ii1)-tauc_min(ii1))./eta(ii1) -tau_q(ii1) )./dtau_q(ii1)/df;
dnu_q(ii1)=dnu_q(ii1).*df1;
dtau_q(ii1)=dtau_q(ii1).*df1;
% the intial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
% re-init the step size adjustment record
rec_sadj=[0 e g];
end
% proposal
nu_q2=nu_q+df*dnu_q;
tau_q2=tau_q+df*dtau_q;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% energy for the proposal state
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% update the q-distribution
% [mf2,Sf2,lnZ_q2,L1,L2]=evaluate_q(nu_q2,tau_q2,K,display,K_uu, K_fu, Kv_ff, Qv_ff);
switch gp.type
case 'FULL'
[mf2,Sf2,lnZ_q2,L1,L2]=evaluate_q(nu_q2,tau_q2,K,display);
Vf2 = diag(Sf2);
case 'FIC'
[mf2,Vf2,lnZ_q2,L1,L2]=evaluate_q2(nu_q2,tau_q2,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
% check cavity
pcavity=all( (1./Vf2-eta.*tau_q2 )>=tauc_min);
g2=NaN;
if isempty(L2)
% the q-distribution not defined (the posterior covariance
% not positive definite)
e2=inf;
elseif pcavity
% the tilted distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2]=evaluate_r(nu_q2,tau_q2,eta,fh_tm,nu_s,tau_s,display);
% the new energy
e2 = lnZ_q2 + lnZ_r2 -lnZ_s;
% gradients in the search direction
g2 = sum( (mf2 -m_r2).*dnu_q ) +0.5*sum( (V_r2 +m_r2.^2 -Vf2 -mf2.^2).*dtau_q );
if ismember(display,{'iter'})
% ratio of the gradients
fprintf('dg=%6.3f, ',min(abs(g2)/abs(g),99))
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% check if the energy decreases
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if ~isfinite(e2) || ( pcavity && g2>10*abs(g) )
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ill-conditioned q-distribution or very large increase
% in the gradient
% => half the step size
df=df*0.5;
if ismember(display,{'iter'})
fprintf('decreasing step size, ')
end
elseif ~pcavity && ~pvis
% The cavity distributions resulting from the proposal distribution
% are not well defined, reset the site parameters by doing
% one parallel update with a zero initialization and continue
% with double loop iterations
if ismember(display,{'iter'})
fprintf('re-init the posterior due to ill-conditioned cavity distributions, ')
end
% Do resetting only once
pvis=1;
up_mode='ep';
nu_q=zeros(size(y));tau_q=zeros(size(y));
mf=zeros(size(y));
switch gp.type
case 'FULL'
Sf=K;Vf=diag(K);
case 'FIC'
Vf=Cv_ff;
end
nu_s=mf./Vf;
tau_s=1./Vf;
% lnZ_s=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta ); % minus 0.5*log(2*pi)./eta
[lnZ_r,lnZ_i,m_r,V_r]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
% e = lnZ_q + lnZ_r -lnZ_s;
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
%nu_q=dnu_q; tau_q=dtau_q;
nu_q=0.9.*dnu_q; tau_q=0.9.*dtau_q;
switch gp.type
case 'FULL'
[mf,Sf,lnZ_q]=evaluate_q(nu_q,tau_q,K,display);
Vf = diag(Sf);
case 'FIC'
[mf,Vf,lnZ_q]=evaluate_q2(nu_q,tau_q,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
nu_s=mf./Vf; tau_s=1./Vf;
lnZ_s=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta ); % minus 0.5*log(2*pi)./eta
[lnZ_r,lnZ_i,m_r,V_r]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
e = lnZ_q + lnZ_r -lnZ_s;
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
df=0.8;
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
rec_sadj=[0 e g];
elseif size(rec_sadj,1)<=1 && ( e2>e || abs(g2)>abs(g)*tolGrad )
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% no decrease in energy or the new gradient exceeds the
% pre-defined limit
% => adjust the step size
if ismember(display,{'iter'})
fprintf('adjusting step size, ')
end
% update the record for step size adjustment
ii1=find(df>rec_sadj(:,1),1,'last');
ii2=find(df<rec_sadj(:,1),1,'first');
rec_sadj=[rec_sadj(1:ii1,:); df e2 g2; rec_sadj(ii2:end,:)];
df_new=0;
if size(rec_sadj,1)>1
if exist('csape','file')==2
if g2>0
% adjust the step size with spline interpolation
pp=csape(rec_sadj(:,1)',[rec_sadj(1,3) rec_sadj(:,2)' rec_sadj(end,3)],[1 1]);
[tmp,df_new]=fnmin(pp,[0 df]);
elseif isfinite(g2)
% extrapolate with Hessian end-conditions
H=(rec_sadj(end,3)-rec_sadj(end-1,3))/(rec_sadj(end,1)-rec_sadj(end-1,1));
pp=csape(rec_sadj(:,1)',[rec_sadj(1,3) rec_sadj(:,2)' H],[1 2]);
% extrapolate at most by 100% at a time
[tmp,df_new]=fnmin(pp,[df df*1.5]);
end
else
% if curvefit toolbox does not exist, use a simple Hessian
% approximation
[tmp,ind]=sort(rec_sadj(:,2),'ascend');
ind=ind(1:2);
H=(rec_sadj(ind(1),3)-rec_sadj(ind(2),3))/(rec_sadj(ind(1),1)-rec_sadj(ind(2),1));
df_new=rec_sadj(ind(1),1) -rec_sadj(ind(1),3)/H;
if g2>0
% interpolate
df_new=max(min(df_new,df),0);
else
% extrapolate at most 100%
df_new=max(min(df_new,1.5*df),df);
end
end
df_new=min(df_new,df_lim);
end
if df_new==0
% the spline approxmation fails or no record of the previous gradients
if g2>0
df=df*0.9; % too long step since the gradient is positive
else
df=df*1.1; % too short step since the gradient is negative
end
else
df=df_new;
end
% prevent too small cavity-variances after the step-size adjustment
ii1=dtau_q>0;
if any(ii1)
df_max=min( ( (tau_s(ii1)-tauc_min(ii1)-1e-8)./eta(ii1) -tau_q(ii1) )./dtau_q(ii1) );
df=min(df,df_max);
end
elseif e2>e+tolInner || (abs(g2)>abs(g)*tolGrad && strcmp(up_mode,'ep'))
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% No decrease in energy despite the step size adjustments.
% In some difficult cases the EP search direction may not
% result in decrease of the energy or the gradient
% despite of the step size adjustment. One reason for this
% may be the parallel EP search direction
% => try the negative gradient as the search direction
%
% or if the problem persists
% => try resetting the search direction
if abs(g2)>abs(g)*tolGrad && strcmp(up_mode,'ep')
% try switching to gradient based updates
up_mode='grad';
df_lim=1e3;
df=0.1;
if ismember(display,{'iter'})
fprintf('switch to gradient updates, ')
end
elseif ~sdir_reset
if ismember(display,{'iter'})
fprintf('reset the search direction, ')
end
sdir_reset=true;
elseif g2<0 && abs(g2)<abs(g) && e2>e
if ismember(display,{'final','iter'})
fprintf('Unable to continue: gradients of the inner-loop objective are inconsistent\n')
end
break;
else
df=df*0.1;
end
% the new search direction
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
% the initial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
% re-init the step size adjustment record
rec_sadj=[0 e g];
else
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% decrease of energy => accept the new state
dInner=abs(e-e2); % the inner loop energy change
% accept the new site parameters (nu_q,tau_q)
[mf,Vf,Sf,nu_q,tau_q,lnZ_q]=deal(mf2,Vf2,Sf2,nu_q2,tau_q2,lnZ_q2);
% accept also the new tilted distributions
[lnZ_r,lnZ_i,m_r,V_r,e]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,e2);
% check that the new cavity variances are positive and not too large
tau_s2=1./Vf;
pcavity=all( (tau_s2-eta.*tau_q )>=tauc_min);
supdate=false;
if pcavity && (dInner<tolInner || ninner>=max_ninner)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% try to update the surrogate distribution on the condition that
% - the cavity variances are positive and not too large
% - the new tilted moments are proper
% - sufficient tolerance or the maximum number of inner
% loop updates is exceeded
% update the surrogate distribution
nu_s2=mf.*tau_s2;
lnZ_s2=0.5*sum( (-log(tau_s2) +nu_s2.^2 ./tau_s2)./eta );
% update the tilted distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s2,tau_s2,display);
% evaluate the new energy
e2 = lnZ_q + lnZ_r2 -lnZ_s2;
if isfinite(e2)
% a successful surrogate update
supdate=true;
ninner=0; % reset the inner loop iteration counter
% update the convergence criteria
tol_e=abs(e2-e);
% accept the new state
[lnZ_r,lnZ_i,m_r,V_r,lnZ_s,nu_s,tau_s,e]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,lnZ_s2,nu_s2,tau_s2,e2);
if ismember(display,{'iter'})
fprintf('surrogate update, ')
end
else
% Improper tilted moments even though the cavity variances are
% positive. This is an indication of numerically unstable
% tilted moment integrations but fractional updates usually help
% => try switching to fractional updates
pcavity=false;
if ismember(display,{'iter'})
fprintf('surrogate update failed, ')
end
end
end
if all(eta==eta1) && ~pcavity && (dInner<tolInner || ninner>=max_ninner)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If the inner loop moments (within tolerance) are matched
% but the new cavity variances are negative or the tilted moment
% integrations fail after the surrogate update
% => switch to fractional EP.
%
% This is a rare situation and most likely the
% hyperparameters are such that the approximating family
% is not flexible enough, i.e., the hyperparameters are
% unsuitable for the data.
%
% One can also try to reduce the lower limit for the
% cavity precisions tauc_min=1./(Vc_lim*diag(K)), i.e.
% increase the maximum cavity variance Vc_lim.
% try switching to fractional updates
eta=repmat(eta2,n,1);
% correct the surrogate normalization accordingly
% the surrogate distribution is not updated
lnZ_s2=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta );
% update the tilted distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
% evaluate the new energy
e2 = lnZ_q + lnZ_r2 -lnZ_s2;
if isfinite(e2)
% successful switch to fractional energy
supdate=true;
pcavity=true;
ninner=0; % reset the inner loop iteration counter
% accept the new state
[lnZ_r,lnZ_i,m_r,V_r,lnZ_s,e]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,lnZ_s2,e2);
% start with ep search direction
up_mode='ep';
df_lim=0.9;
df=0.1;
if ismember(display,{'iter'})
fprintf('switching to fractional EP, ')
end
else
% Improper tilted moments even with fractional updates
% This is very unlikely to happen because decreasing the
% fraction parameter (eta2<eta1) stabilizes the
% tilted moment integrations
% revert back to the previous fraction parameter
eta=repmat(eta1,n,1);
if ismember(display,{'final','iter'})
fprintf('Unable to switch to the fractional EP, check that eta2<eta1\n')
end
break;
end
end
if all(eta==eta2) && ~pcavity && (dInner<tolInner || ninner>=10)
% Surrogate updates do not result into positive cavity variances
% even with fractional updates with eta2 => terminate iterations
if ismember(display,{'final','iter'})
fprintf('surrogate update failed with fractional updates, try decreasing eta2\n')
end
break
end
if ~supdate
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% no successful surrogate update, no sufficient tolerance,
% or the maximum number of inner loop updates is not yet exceeded
% => continue with the same surrogate distribution
ninner=ninner+1; % increase inner loop iteration counter
if ismember(display,{'iter'})
fprintf('inner-loop update, ')
end
end
% the new search direction
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
% the initial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
% re-init step size adjustment record
rec_sadj=[0 e g];
end
if ismember(display,{'iter'})
% maximum difference of the marginal moments
tol_m=[max(abs(mf-m_r)) max(abs(Vf-V_r))];
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, df=%6f, eta=%.2f\n',i1,e,tol_m(1),tol_m(2),df,eta(1))
end
%%%%%%%%%%%%%%%%%%%%%%%
% check for convergence
convergence = tol_e<=tolStop;
if convergence
if ismember(display,{'final','iter'})
% maximum difference of the marginal moments
tol_m=[max(abs(mf-m_r)) max(abs(Vf-V_r))];
fprintf('Convergence, iter %d, e=%.6f, dm=%.4f, dV=%.4f, df=%6f, eta=%.2f\n',i1,e,tol_m(1),tol_m(2),df,eta(1))
end
break
end
end % end of the double-loop updates
end
% the current energy is not finite or no convergence
if ~isfinite(e)
fprintf('GPEP_E: Initial energy not defined, check the hyperparameters\n')
elseif ~convergence
fprintf('GPEP_E: No convergence, %d iter, e=%.6f, dm=%.4f, dV=%.4f, df=%6f, eta=%.2f\n',i1,e,tol_m(1),tol_m(2),df,eta(1))
fprintf('GPEP_E: Check the hyperparameters, increase maxiter and/or max_ninner, or decrease tolInner\n')
end
edata=-e; % the data contribution to the marginal posterior density
% =====================================================================================
% Evaluate the prior contribution to the error from covariance functions and likelihood
% =====================================================================================
% Evaluate the prior contribution to the error from covariance functions
eprior = 0;
for i=1:ncf
gpcf = gp.cf{i};
eprior = eprior - gpcf.fh.lp(gpcf);
% eprior = eprior - feval(gpcf.fh.lp, gpcf, x, y);
end
% Evaluate the prior contribution to the error from likelihood functions
if isfield(gp, 'lik') && isfield(gp.lik, 'p')
likelih = gp.lik;
eprior = eprior - likelih.fh.lp(likelih);
end
% the total energy
e = edata + eprior;
sigm2vec_i = 1./(tau_s-eta.*tau_q); % vector of cavity variances
muvec_i = (nu_s-eta.*nu_q).*sigm2vec_i; % vector of cavity means
logZ_i = lnZ_i; % vector of tilted normalization factors
% check that the posterior covariance is positive definite and
% calculate its Cholesky decomposition
switch gp.type
case 'FULL'
[L, notpositivedefinite] = chol(Sf);
b = [];
La2 = [];
if notpositivedefinite || ~isfinite(e)
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
case 'FIC'
La2 = Luu;
L = L2;
b = Kv_ff - Qv_ff;
end
nutilde = nu_q;
tautilde = tau_q;
% store values to the cache
ch.w = w;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.L = L;
ch.nu_q = nu_q;
ch.tau_q = tau_q;
ch.La2 = La2;
ch.b = b;
ch.eta = eta;
ch.logZ_i = logZ_i;
ch.sigm2vec_i = sigm2vec_i;
ch.muvec_i = muvec_i;
ch.datahash = datahash;
end
otherwise
error('Unknown optim method!');
end
end
function [e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite()
% Instead of stopping to chol error, return NaN
e = NaN;
edata = NaN;
eprior = NaN;
tautilde = NaN;
nutilde = NaN;
L = NaN;
La2 = NaN;
b = NaN;
muvec_i = NaN;
sigm2vec_i = NaN;
logZ_i = NaN;
datahash = NaN;
eta = NaN;
w = NaN;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.tautilde = tautilde;
ch.nutilde = nutilde;
ch.L = L;
ch.La2 = La2;
ch.b = b;
ch.muvec_i = muvec_i;
ch.sigm2vec_i = sigm2vec_i;
ch.logZ_i = logZ_i;
ch.eta = eta;
ch.datahash=datahash;
ch.w = NaN;
end
end
function [m_q,S_q,lnZ_q,L1,L2]=evaluate_q(nu_q,tau_q,K,display)
% function for determining the parameters of the q-distribution
% when site variances tau_q may be negative
%
% q(f) = N(f|0,K)*exp( -0.5*f'*diag(tau_q)*f + nu_q'*f )/Z_q = N(f|m_q,S_q)
%
% S_q = inv(inv(K)+diag(tau_q))
% m_q = S_q*nu_q;
%
% det(eye(n)+K*diag(tau_q))) = det(L1)^2 * det(L2)^2
% where L1 and L2 are upper triangular
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nu_q);
ii1=find(tau_q>0); n1=length(ii1); W1=sqrt(tau_q(ii1));
ii2=find(tau_q<0); n2=length(ii2); W2=sqrt(abs(tau_q(ii2)));
L=zeros(n);
S_q=K;
if ~isempty(ii1)
% Cholesky decomposition for the positive sites
L1=(W1*W1').*K(ii1,ii1);
L1(1:n1+1:end)=L1(1:n1+1:end)+1;
L1=chol(L1);
L(:,ii1) = bsxfun(@times,K(:,ii1),W1')/L1;
S_q=S_q-L(:,ii1)*L(:,ii1)';
else
L1=1;
end
if ~isempty(ii2)
% Cholesky decomposition for the negative sites
V=bsxfun(@times,K(ii2,ii1),W1')/L1;
L2=(W2*W2').*(V*V'-K(ii2,ii2));
L2(1:n2+1:end)=L2(1:n2+1:end)+1;
[L2,pd]=chol(L2);
if pd==0
L(:,ii2)=bsxfun(@times,K(:,ii2),W2')/L2 -L(:,ii1)*(bsxfun(@times,V,W2)'/L2);
S_q=S_q+L(:,ii2)*L(:,ii2)';
else
L2=[];
if ismember(display,{'iter'})
fprintf('Negative definite q-distribution.\n')
end
end
else
L2=1;
end
%V_q=diag(S_q);
m_q=S_q*nu_q;
% log normalization
lnZ_q = -sum(log(diag(L1))) -sum(log(diag(L2))) +0.5*sum(m_q.*nu_q);
end
function [m_q,S_q,lnZ_q,L1,L2]=evaluate_q2(nu_q,tau_q,LK_uu, K_fu, Kv_ff, Qv_ff, display)
% function for determining the parameters of the q-distribution
% when site variances tau_q may be negative
%
% q(f) = N(f|0,K)*exp( -0.5*f'*diag(tau_q)*f + nu_q'*f )/Z_q = N(f|m_q,S_q)
%
% S_q = inv(inv(K)+diag(tau_q)) where K is sparse approximation for prior
% covariance
% m_q = S_q*nu_q;
%
% det(eye(n)+K*diag(tau_q))) = det(L1)^2 * det(L2)^2
% where L1 and L2 are upper triangular
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nu_q);
S_q = Kv_ff;
m_q = nu_q;
D = Kv_ff - Qv_ff;
L1 = sqrt(1 + D.*tau_q);
L = [];
if any(~isreal(L1))
if ismember(display,{'iter'})
fprintf('Negative definite q-distribution.\n')
end
else
U = K_fu;
WDtilde = tau_q./(1+tau_q.*D);
% Evaluate diagonal of S_q
ii1=find(WDtilde>0); n1=length(ii1); W1=sqrt(WDtilde(ii1)); % WS^-1
ii2=find(WDtilde<0); n2=length(ii2); W2=sqrt(abs(WDtilde(ii2))); % WS^-1
if ~isempty(ii2) || ~isempty(ii1)
if ~isempty(ii1)
UWS(:,ii1) = bsxfun(@times, U(ii1,:)', W1');
end
if ~isempty(ii2)
UWS(:,ii2) = bsxfun(@times, U(ii2,:)', W2');
end
[L, p] = chol(LK_uu*LK_uu' + UWS(:,ii1)*UWS(:,ii1)' - UWS(:,ii2)*UWS(:,ii2)', 'lower');
if p~=0
L=[];
if ismember(display,{'iter'})
fprintf('Negative definite q-distribution.\n')
end
else
S = 1 + D.*tau_q;
% S_q = diag(D./S) + diag(1./S)*U*inv(L*L')*U'*diag(1./S);
S_q = D./S + sum((bsxfun(@times, 1./S, U)/L').^2,2);
m_q = D.*nu_q./S + (U*(L'\(L\(U'*(nu_q./S)))))./S;
end
else
end
% end
end
% log normalization
L2 = L;
lnZ_q = -0.5*sum(log(L1.^2)) - sum(log(diag(L))) + sum(log(diag(LK_uu))) +0.5*sum(m_q.*nu_q);
end
function [lnZ_r,lnZ_i,m_r,V_r,p]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% function for determining the parameters of the r-distribution
% (the product of the tilted distributions)
%
% r(f) = exp(-lnZ_r) * prod_i p(y(i)|f(i)) * exp( -0.5*f(i)^2 tau_r(i) + nu_r(i)*f(i) )
% ~ prod_i N(f(i)|m_r(i),V_r(i))
%
% tau_r = tau_s - tau_q
% nu_r = nu_s - nu_q
%
% lnZ_i(i) = log int p(y(i)|f(i)) * N(f(i)|nu_r(i)/tau_r(i),1/tau_r(i)) df(i)
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nu_q);
[lnZ_i,m_r,V_r,nu_r,tau_r]=deal(zeros(n,1));
p=false(n,1);
for si=1:n
% cavity distribution
tau_r_si=tau_s(si)-eta(si)*tau_q(si);
if tau_r_si<=0
% if ismember(display,{'iter'})
% %fprintf('Negative cavity precision at site %d\n',si)
% end
continue
end
nu_r_si=nu_s(si)-eta(si)*nu_q(si);
% tilted moments
[lnZ_si,m_r_si,V_r_si] = fh_tm(si, nu_r_si/tau_r_si, 1/tau_r_si, eta(si));
if ~isfinite(lnZ_si) || V_r_si<=0
% if ismember(display,{'iter'})
% fprintf('Improper normalization or tilted variance at site %d\n',si)
% end
continue
end
% store the new parameters
[nu_r(si),tau_r(si),lnZ_i(si),m_r(si),V_r(si)]=deal(nu_r_si,tau_r_si,lnZ_si,m_r_si,V_r_si);
p(si)=true;
end
lnZ_r=sum(lnZ_i./eta) +0.5*sum((-log(tau_r) +nu_r.^2 ./tau_r)./eta);
end
function [dnu_q,dtau_q]=ep_update_dir(m_q,V_q,m_r,V_r,eta,up_mode,tolUpdate)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% update direction for double-loop EP
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% V_q=diag(S_q);
switch up_mode
case 'ep'
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% site updates by moment matching
[dnu_q,dtau_q]=deal(zeros(size(m_q)));
%ind_up=V_r>0 & max(abs(V_r-V_q),abs(m_r-m_q))>tolUpdate;
ind_up=V_r>0 & (abs(V_r-V_q) > tolUpdate*abs(V_q) | abs(m_r-m_q) > tolUpdate*abs(m_q));
dnu_q(ind_up) = ( m_r(ind_up)./V_r(ind_up) - m_q(ind_up)./V_q(ind_up) ) ./ eta(ind_up);
dtau_q(ind_up) = ( 1./V_r(ind_up) - 1./V_q(ind_up) )./ eta(ind_up);
case 'grad'
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% gradient descend
% Not used at the moment!
% evaluate the gradients wrt nu_q and tau_q
gnu_q = m_q - m_r;
gtau_q = 0.5*(V_r + m_r.^2 - V_q - m_q.^2);
% the search direction
dnu_q=-gnu_q;
dtau_q=-gtau_q;
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_gaussian.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_gaussian.m
| 11,092 |
utf_8
|
3a8dfb3827eaef0a74a1d280cc4cbf20
|
function lik = lik_gaussian(varargin)
%LIK_GAUSSIAN Create a Gaussian likelihood structure
%
% Description
% LIK = LIK_GAUSSIAN('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a Gaussian likelihood structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% LIK = LIK_GAUSSIAN(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood function structure with the named
% parameters altered with the specified values.
%
% Parameters for Gaussian likelihood function [default]
% sigma2 - variance [0.1]
% sigma2_prior - prior for sigma2 [prior_logunif]
% n - number of observations per input (See using average
% observations below)
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% Using average observations
% The lik_gaussian can be used to model data where each input vector is
% attached to an average of varying number of observations. That is, we
% have input vectors x_i, average observations y_i and sample sizes n_i.
% Each observation is distributed
%
% y_i ~ N(f(x_i), sigma2/n_i)
%
% The model is constructed as lik_gaussian('n', n), where n is the same
% length as y and collects the sample sizes.
%
% See also
% GP_SET, PRIOR_*, LIK_*
% Internal note: Because Gaussian noise can be combined
% analytically to the covariance matrix, lik_gaussian is internally
% little between lik_* and gpcf_* functions.
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_GAUSSIAN';
ip.addOptional('lik', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('sigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.addParamValue('n',[], @(x) isreal(x) && all(x>0));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Gaussian';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Gaussian')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
if init || ~ismember('n',ip.UsingDefaults)
lik.n = ip.Results.n;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_gaussian_pak;
lik.fh.unpak = @lik_gaussian_unpak;
lik.fh.lp = @lik_gaussian_lp;
lik.fh.lpg = @lik_gaussian_lpg;
lik.fh.cfg = @lik_gaussian_cfg;
lik.fh.trcov = @lik_gaussian_trcov;
lik.fh.trvar = @lik_gaussian_trvar;
lik.fh.recappend = @lik_gaussian_recappend;
end
end
function [w s] = lik_gaussian_pak(lik)
%LIK_GAUSSIAN_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_GAUSSIAN_PAK(LIK) takes a likelihood structure LIK
% and combines the parameters into a single row vector W.
% This is a mandatory subfunction used for example in energy
% and gradient computations.
%
% w = [ log(lik.sigma2)
% (hyperparameters of lik.magnSigma2)]'
%
% See also
% LIK_GAUSSIAN_UNPAK
w = []; s = {};
if ~isempty(lik.p.sigma2)
w = [w log(lik.sigma2)];
s = [s; 'log(gaussian.sigma2)'];
% Hyperparameters of sigma2
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_gaussian_unpak(lik, w)
%LIK_GAUSSIAN_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_GAUSSIAN_UNPAK(W, LIK) takes a likelihood structure
% LIK and extracts the parameters from the vector W to the LIK
% structure. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(lik.sigma2)
% (hyperparameters of lik.magnSigma2)]'
%
% See also
% LIK_GAUSSIAN_PAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of sigma2
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
end
function lp = lik_gaussian_lp(lik)
%LIK_GAUSSIAN_LP Evaluate the log prior of likelihood parameters
%
% Description
% LP = LIK_T_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters.
% This subfunctions is needed when there are likelihood
% parameters.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_G, GP_E
lp = 0;
if ~isempty(lik.p.sigma2)
likp=lik.p;
lp = likp.sigma2.fh.lp(lik.sigma2, likp.sigma2) + log(lik.sigma2);
end
end
function lpg = lik_gaussian_lpg(lik)
%LIK_GAUSSIAN_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = LIK_GAUSSIAN_LPG(LIK) takes a Gaussian likelihood
% function structure LIK and returns LPG = d log (p(th))/dth,
% where th is the vector of parameters. This subfunction is
% needed when there are likelihood parameters.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_E, GP_G
lpg = [];
if ~isempty(lik.p.sigma2)
likp=lik.p;
lpgs = likp.sigma2.fh.lpg(lik.sigma2, likp.sigma2);
lpg = lpgs(1).*lik.sigma2 + 1;
if length(lpgs) > 1
lpg = [lpg lpgs(2:end)];
end
end
end
function DKff = lik_gaussian_cfg(lik, x, x2)
%LIK_GAUSSIAN_CFG Evaluate gradient of covariance with respect to
% Gaussian noise
%
% Description
% Gaussian likelihood is a special case since it can be
% analytically combined with covariance functions and thus we
% compute gradient of covariance instead of gradient of likelihood.
%
% DKff = LIK_GAUSSIAN_CFG(LIK, X) takes a Gaussian likelihood
% function structure LIK, a matrix X of input vectors and
% returns DKff, the gradients of Gaussian noise covariance
% matrix Kff = k(X,X) with respect to th (cell array with
% matrix elements). This subfunction is needed only in Gaussian
% likelihood.
%
% DKff = LIK_GAUSSIAN_CFG(LIK, X, X2) takes a Gaussian
% likelihood function structure LIK, a matrix X of input
% vectors and returns DKff, the gradients of Gaussian noise
% covariance matrix Kff = k(X,X) with respect to th (cell
% array with matrix elements). This subfunction is needed
% only in Gaussian likelihood.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_E, GP_G
DKff = {};
if ~isempty(lik.p.sigma2)
if isempty(lik.n)
DKff{1}=lik.sigma2;
else
n=size(x,1);
DKff{1} = sparse(1:n, 1:n, lik.sigma2./lik.n, n, n);
end
end
end
function DKff = lik_gaussian_ginput(lik, x, t, g_ind, gdata_ind, gprior_ind, varargin)
%LIK_GAUSSIAN_GINPUT Evaluate gradient of likelihood function with
% respect to x.
%
% Description
% DKff = LIK_GAUSSIAN_GINPUT(LIK, X) takes a likelihood
% function structure LIK, a matrix X of input vectors and
% returns DKff, the gradients of likelihood matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed only in Gaussian likelihood.
%
% DKff = LIK_GAUSSIAN_GINPUT(LIK, X, X2) takes a likelihood
% function structure LIK, a matrix X of input vectors and
% returns DKff, the gradients of likelihood matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed only in Gaussian likelihood.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_E, GP_G
end
function C = lik_gaussian_trcov(lik, x)
%LIK_GAUSSIAN_TRCOV Evaluate training covariance matrix
% corresponding to Gaussian noise
%
% Description
% C = LIK_GAUSSIAN_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This subfunction is needed only in Gaussian likelihood.
%
% See also
% LIK_GAUSSIAN_COV, LIK_GAUSSIAN_TRVAR, GP_COV, GP_TRCOV
[n, m] =size(x);
n1=n+1;
if isempty(lik.n)
C = sparse(1:n,1:n,ones(n,1).*lik.sigma2,n,n);
else
C = sparse(1:n, 1:n, lik.sigma2./lik.n, n, n);
end
end
function C = lik_gaussian_trvar(lik, x)
%LIK_GAUSSIAN_TRVAR Evaluate training variance vector
% corresponding to Gaussian noise
%
% Description
% C = LIK_GAUSSIAN_TRVAR(LIK, TX) takes in covariance function
% of a Gaussian process LIK and matrix TX that contains
% training inputs. Returns variance vector C. Every element i
% of C contains variance of input i in TX. This subfunction is
% needed only in Gaussian likelihood.
%
%
% See also
% LIK_GAUSSIAN_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
if isempty(lik.n)
C=repmat(lik.sigma2,n,1);
else
C=lik.sigma2./lik.n(:);
end
end
function reclik = lik_gaussian_recappend(reclik, ri, lik)
%RECAPPEND Record append
%
% Description
% RECLIK = LIK_GAUSSIAN_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood function record structure RECLIK, record index RI
% and likelihood function structure LIK with the current MCMC
% samples of the parameters. Returns RECLIK which contains all
% the old samples and the current samples from LIK. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reclik.type = 'lik_gaussian';
% Initialize the parameters
reclik.sigma2 = [];
reclik.n = [];
% Set the function handles
reclik.fh.pak = @lik_gaussian_pak;
reclik.fh.unpak = @lik_gaussian_unpak;
reclik.fh.lp = @lik_gaussian_lp;
reclik.fh.lpg = @lik_gaussian_lpg;
reclik.fh.cfg = @lik_gaussian_cfg;
reclik.fh.trcov = @lik_gaussian_trcov;
reclik.fh.trvar = @lik_gaussian_trvar;
reclik.fh.recappend = @lik_gaussian_recappend;
reclik.p=[];
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
likp = lik.p;
% record sigma2
reclik.sigma2(ri,:)=lik.sigma2;
if isfield(likp,'sigma2') && ~isempty(likp.sigma2)
reclik.p.sigma2 = likp.sigma2.fh.recappend(reclik.p.sigma2, ri, likp.sigma2);
end
% record n if given
if isfield(lik,'n') && ~isempty(lik.n)
reclik.n(ri,:)=lik.n(:)';
end
end
end
|
github
|
lcnbeapp/beapp-master
|
surrogate_sls.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/surrogate_sls.m
| 20,578 |
utf_8
|
fc29ee001fe374a168e9aa872d77d472
|
function [samples,samplesf,diagn] = surrogate_sls(f, x, opt, gp, xx, yy, z, varargin)
%SURROGATE_SLS Markov Chain Monte Carlo sampling using Surrogate data Slice Sampling
%
% Description
% SAMPLES = SURROGATE_SLS(F, X, OPTIONS) uses slice sampling to sample
% from the distribution P ~ EXP(-F), where F is the first
% argument to SLS. Markov chain starts from point X and the
% sampling from multivariate distribution is implemented by
% sampling each variable at a time either using overrelaxation
% or not. See SLS_OPT for details. A simple multivariate scheme
% using hyperrectangles is utilized when method is defined 'multi'.
%
% SAMPLES = SURROGATE_SLS(F, X, OPTIONS, [], P1, P2, ...) allows additional
% arguments to be passed to F(). The fourth argument is ignored,
% but included for compatibility with HMC and the optimisers.
%
% [SAMPLES, ENERGIES, DIAGN] = SLS(F, X, OPTIONS) Returns some additional
% diagnostics for the values in SAMPLES and ENERGIES.
%
% See SSLS_OPT and SLS_OPT for the optional parameters in the OPTIONS structure.
%
% See also
% METROP2, HMC2, SLS_OPT, SLS
% Based on "Slice Sampling" by Radford M. Neal in "The Annals of Statistics"
% 2003, Vol. 31, No. 3, 705-767, (c) Institute of Mathematical Statistics, 2003
% "Slice sampling covariance hyperparameters of latent Gaussian models"
% by Iain Murray and Ryan P. Adams, 2010, Arxiv preprint arXiv:1006.0868
% Copyright (c) Toni Auranen, 2003-2006
% Copyright (c) Ville Tolvanen, 2012
% This software is distributed under the GNU General Public
% Licence (version 3 or later); please refer to the file
% Licence.txt, included with the software, for details.
% Set empty options to default values
opt = ssls_opt(opt);
%if opt.display, disp(opt); end
if opt.display == 1
opt.display = 2; % verbose
elseif opt.display == 2
opt.display = 1; % all
end
% Forces x to be a row vector
x = x(:)';
% Set up some variables
nparams = length(x);
n = size(gp.latentValues,1);
samples = zeros(opt.nsamples,nparams);
samplesf = zeros(n,opt.nsamples);
if nargout >= 2
save_energies = 1;
energies = zeros(opt.nsamples,1);
else
save_energies = 0;
end
if nargout >= 3
save_diagnostics = 1;
else
save_diagnostics = 0;
end
if nparams == 1
multivariate = 0;
if strcmp(opt.method,'multi')
opt.method = 'stepping';
end
end
if nparams > 1
multivariate = 1;
end
rej = 0;
rej_step = 0;
rej_old = 0;
x_0 = x;
f_0 = f;
ncf = length(gp.cf);
umodal = opt.unimodal;
nomit = opt.nomit;
nsamples = opt.nsamples;
display_info = opt.display;
method = opt.method;
overrelaxation = opt.overrelaxation;
overrelaxation_info = ~isempty(find(overrelaxation));
w = opt.wsize;
maxiter = opt.maxiter;
m = opt.mlimit;
p = opt.plimit;
a = opt.alimit;
mmin = opt.mmlimits(1,:);
mmax = opt.mmlimits(2,:);
if isfield(opt, 'scale')
scale = opt.scale;
else
scale = 5;
end
if multivariate
if length(w) == 1
w = w.*ones(1,nparams);
end
if length(m) == 1
m = m.*ones(1,nparams);
end
if length(p) == 1
p = p.*ones(1,nparams);
end
if length(overrelaxation) == 1
overrelaxation = overrelaxation.*ones(1,nparams);
end
if length(a) == 1
a = a.*ones(1,nparams);
end
if length(mmin) == 1
mmin = mmin.*ones(1,nparams);
end
if length(mmax) == 1
mmax = mmax.*ones(1,nparams);
end
end
if overrelaxation_info
nparams_or = length(find(overrelaxation));
end
if ~isempty(find(w<=0))
error('Parameter ''wsize'' must be positive.');
end
if (strcmp(method,'stepping') || strcmp(method,'doubling')) && isempty(find(mmax-mmin>2*w))
error('Check parameter ''mmlimits''. The interval is too small in comparison to parameter ''wsize''.');
end
if strcmp(method,'stepping') && ~isempty(find(m<1))
error('Parameter ''mlimit'' must be >0.');
end
if overrelaxation_info && ~isempty(find(a<1))
error('Parameter ''alimit'' must be >0.');
end
if strcmp(method,'doubling') && ~isempty(find(p<1))
error('Parameter ''plimit'' must be >0.');
end
ind_umodal = 0;
j = 0;
% y_new = -f(x_0,varargin{:});
% S = diag(ones(size(f)));
% The main loop of slice sampling
for i = 1-nomit:1:nsamples
% Treshold
[y, tmp, eta, g] = getY(gp, xx, yy, z, f_0, x_0, []);
% fprintf('\n')
% fprintf('Treshold: %g\n',y);
switch method
% Multivariate rectangle sampling step
case 'multi'
x_new = x_0;
L = max(x_0 - w.*rand(1,length(x_0)),mmin);
R = min(L + w,mmax);
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp,xx,yy,z,[], x_new, eta, g);
while y >= y_new
% disp(y_new)
L(x_new < x_0) = x_new(x_new < x_0);
R(x_new >= x_0) = x_new(x_new >= x_0);
if sum(abs(L-R))<1e-8
error('BUG DETECTED: Shrunk to minimum position and still not acceptable.');
end
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
end % while
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
f_0 = f_new;
if i > 0
samples(i,:) = x_new;
latent_opt = esls(opt.latent_opt);
gp = gp_unpak(gp, x_new);
for ii=1:opt.fsamples-1
f_new = esls(f_new, latent_opt, gp, xx, yy, z);
end
samplesf(:,i) = f_new;
f_0 = samplesf(:,end);
end
% Save energies
% if save_energies && i > 0
% energies(i) = -y_new;
% end
% Display energy information
if display_info == 1
fprintf('Finished multi-step %4d Energy: %g\n',i,-y_new);
end
case 'multimm'
x_new = x_0;
% if isinf(y)
% x_new = mmin + (mmax-mmin).*rand(1,length(x_new));
% y_new = -f(x_new,varargin{:});
% else
L = mmin;
R = mmax;
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
while y >= y_new
L(x_new < x_0) = x_new(x_new < x_0);
R(x_new >= x_0) = x_new(x_new >= x_0);
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
end % while
% end % isinf(y)
% Save sampling step and set up the new 'old' sample
% fprintf('Accepted: %g\n',y_new);
x_0 = x_new;
f_0 = f_new;
if i > 0
samples(i,:) = x_new;
latent_opt = esls(opt.latent_opt);
gp = gp_unpak(gp, x_new);
for ii=1:opt.fsamples-1
f_new = esls(f_new, latent_opt, gp, xx, yy, z);
end
samplesf(:,i) = f_new;
f_0 = samplesf(:,end);
end
% Save energies
% if save_energies && i > 0
% energies(i) = -y_new;
% end
%
% Display energy information
if display_info == 1
fprintf('Finished multimm-step %4d Energy: %g\n',i,-y_new);
end
% Other sampling steps
otherwise
ind_umodal = ind_umodal + 1;
x_new = x_0;
f_new = f_0;
for j = 1:nparams
L = x_new;
R = x_new;
switch method
case 'stepping'
[L, R] = stepping_out(f_new,y,x_new,L,R,w,m,j,mmin,mmax,display_info,umodal,xx,yy,gp,z,eta,g,varargin{:});
case 'doubling'
[L, R] = doubling(f_new,y,x_new,L,R,w,m,j,mmin,mmax,display_info,umodal,xx,yy,gp,z,eta,g,varargin{:});
case 'minmax'
L(j) = mmin(j);
R(j) = mmax(j);
otherwise
error('unknown method');
end % switch
if overrelaxation(j)
[x_new, f_new, rej_step, rej_old, y_new] = bisection(f_new,y,x_new,L,R,w,a,rej_step,j,umodal,xx,yy,gp,z,eta,g);
else
[x_new, f_new] = shrinkage(f_new,y,x_new,w,L,R,method,j,maxiter,umodal,xx,yy,gp,z,eta,g);
end % if overrelaxation
if umodal % adjust the slice if the distribution is known to be unimodal
w(j) = (w(j)*ind_umodal + abs(x_0(j)-x_new(j)))/(ind_umodal+1);
end % if umodal
% end % if isinf(y)
end % j:nparams
if overrelaxation_info & multivariate
rej = rej + rej_step/nparams_or;
elseif overrelaxation_info & ~multivariate
rej = rej + rej_step;
end
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
f_0 = f_new;
if i > 0
samples(i,:) = x_new;
latent_opt = esls(opt.latent_opt);
gp = gp_unpak(gp, x_new);
for ii=1:opt.fsamples-1
f_new = esls(f_new, latent_opt, gp, xx, yy, z);
end
samplesf(:,i) = f_new;
f_0 = f_new;
end
% Save energies
% if save_energies && i > 0
% energies(i) = -y_new;
% end
% Display information and keep track of rejections (overrelaxation)
if display_info == 1
if ~multivariate && overrelaxation_info && rej_old
fprintf(' Sample %4d rejected (overrelaxation).\n',i);
rej_old = 0;
rej_step = 0;
elseif multivariate && overrelaxation_info
fprintf('Finished step %4d (RR: %1.1f, %d/%d) Energy: %g\n',i,100*rej_step/nparams_or,nparams_or,nparams,-y_new);
rej_step = 0;
rej_old = 0;
else
fprintf('Finished step %4d Energy: %g\n',i,-y_new);
end
else
rej_old = 0;
rej_step = 0;
end
end % switch
end % i:nsamples
% Save diagnostics
if save_diagnostics
diagn.opt = opt;
end
% Display rejection information after slice sampling is complete (overrelaxation)
if overrelaxation_info && nparams == 1 && display_info == 1
fprintf('\nRejected samples due to overrelaxation (percentage): %1.1f\n',100*rej/nsamples);
elseif overrelaxation_info && nparams > 1 && display_info == 1
fprintf('\nAverage rejections per step due to overrelaxation (percentage): %1.1f\n',100*rej/nsamples);
end
% Display the elapsed time
%if display_info == 1
% if (cputime-t)/60 < 4
% fprintf('\nElapsed cputime (seconds): %1.1f\n\n',cputime-t);
% else
% fprintf('\nElapsed cputime (minutes): %1.1f\n\n',(cputime-t)/60);
% end
%end
%disp(w);
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [x_new, f_new, rej, rej_old, y_new] = bisection(f,y,x_0,L,R,w,a,rej,j,um,xx,yy,gp,z,eta,g);
%function [x_new, y_new, rej, rej_old] = bisection(f,y,x_0,L,R,w,a,rej,j,um,varargin);
%
% Bisection for overrelaxation (stepping-out needs to be used)
x_new = x_0;
f_new = f(:,end);
M = (L + R) / 2;
l = L;
r = R;
q = w(j);
s = a(j);
if (R(j) - L(j)) < 1.1*w(j)
while 1
M(j) = (l(j) + r(j))/2;
[y_new, f_new] = getY(gp,xx,yy,z, f_new, M, eta, g);
if s == 0 || y < y_new
break;
end
if x_0(j) > M(j)
l(j) = M(j);
else
r(j) = M(j);
end
s = s - 1;
q = q / 2;
end % while
end % if
ll = l;
rr = r;
while s > 0
s = s - 1;
q = q / 2;
tmp_ll = ll;
tmp_ll(j) = tmp_ll(j) + q;
tmp_rr = rr;
tmp_rr(j) = tmp_rr(j) - q;
[y_new_ll] = getY(gp,xx,yy,z,f_new,tmp_ll, eta, g);
[y_new_rr] = getY(gp,xx,yy,z,f_new,tmp_rr, eta, g);
if y >= y_new_ll
ll(j) = ll(j) + q;
end
if y >= y_new_rr
rr(j) = rr(j) - q;
end
end % while
x_new(j) = ll(j) + rr(j) - x_0(j);
[y_new, f_new] = getY(gp,xx,yy,z,f_new, x_new, eta, g);
% y_new = -f(x_new,varargin{:});
if x_new(j) < l(j) || x_new(j) > r(j) || y >= y_new
x_new(j) = x_0(j);
rej = rej + 1;
rej_old = 1;
else
rej_old = 0;
f(:,end+1) = f_new;
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [x_new, f_new] = shrinkage(f,y,x_0,w,L,R,method,j,maxiter,um,xx,yy,gp,z,eta,g,varargin);
%function [x_new, y_new] = shrinkage(f,y,x_0,w,L,R,method,j,maxiter,um,varargin);
%
% Shrinkage with acceptance-check for doubling scheme
% - acceptance-check is skipped if the distribution is defined
% to be unimodal by the user
iter = 0;
x_new = x_0;
l = L(j);
r = R(j);
f_new = f(:,end);
% [y, tmp, eta, g] = getY(gp, xx, yy, z, f_new, x_0, []);
while 1
x_new(j) = l + (r-l).*rand;
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
if strcmp(method,'doubling')
if y < y_new && (um || accept(f,y,x_0,x_new,w,L,R,j,varargin{:}))
break;
end
else
if y < y_new
% f(:,end+1) = f_new;
break;
end
end % if strcmp
if x_new(j) < x_0(j)
l = x_new(j);
else
r = x_new(j);
end % if
if abs(l-r) < 1e-8
error('bug')
end
iter = iter + 1;
if iter > maxiter
fprintf('Maximum number (%d) of iterations reached for parameter %d during shrinkage.\n',maxiter,j);
if strcmp(method,'minmax')
error('Check function F, decrease the interval ''mmlimits'' or increase the value of ''maxiter''.');
else
error('Check function F or increase the value of ''maxiter''.');
end
end
end % while
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [L,R] = stepping_out(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,xx,yy,gp,z,eta,g,varargin);
%function [L,R] = stepping_out(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,varargin);
%
% Stepping-out procedure
f_new = f(:,end);
x_new = x_0;
if um % if the user defines the distribution to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
while y < y_new
% while y < -f(L,varargin{:})
L(j) = L(j) - w(j);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
break;
else
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
end
end
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
while y < y_new
% while y < -f(R,varargin{:})
R(j) = R(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
break;
else
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
end
end
else % if the distribution is not defined to be unimodal
L(j) = x_0(j) - w(j).*rand;
J = floor(m(j).*rand);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
J = 0;
end
R(j) = L(j) + w(j);
K = (m(j)-1) - J;
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
K = 0;
end
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
while J > 0 && y < y_new
% while J > 0 && y < -f(L,varargin{:})
L(j) = L(j) - w(j);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
break;
end
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
J = J - 1;
end
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
while K > 0 && y < y_new
% while K > 0 && y < -f(R,varargin{:})
R(j) = R(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
break;
end
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
K = K - 1;
end
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [L,R] = doubling(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,xx,yy,gp,z,eta,g,varargin)
%function [L,R] = doubling(f,y,x_0,L,R,w,p,j,mmin,mmax,di,um,varargin);
%
% Doubling scheme for slice sampling
f_new = f(:,end);
x_new = x_0;
if um % if the user defines the distribution to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
Ao = 1;
else
Ao = 0;
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
Bo = 1;
else
Bo = 0;
end
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
while (Ao == 0 && y < AL) || (Bo == 0 && y < AR)
if rand < 1/2
L(j) = L(j) - (R(j)-L(j));
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
Ao = 1;
else
Ao = 0;
end
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
else
R(j) = R(j) + (R(j)-L(j));
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
Bo = 1;
else
Bo = 0;
end
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
end
end % while
else % if the distribution is not defined to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
K = p(j);
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
while K > 0 && (y < AL || y < AR)
if rand < 1/2
L(j) = L(j) - (R(j)-L(j));
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
else
R(j) = R(j) + (R(j)-L(j));
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
end
K = K - 1;
end % while
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function out = accept(f,y,x_0,x_new,w,L,R,j,varargin)
%function out = accept(f,y,x_0,x_new,w,L,R,j,varargin)
%
% Acceptance check for doubling scheme
out = [];
l = L;
r = R;
d = 0;
while r(j)-l(j) > 1.1*w(j)
m = (l(j)+r(j))/2;
if (x_0(j) < m && x_new(j) >= m) || (x_0(j) >= m && x_new(j) < m)
d = 1;
end
if x_new(j) < m
r(j) = m;
else
l(j) = m;
end
if d && y >= -f(l,varargin{:}) && y >= -f(r,varargin{:})
out = 0;
break;
end
end % while
if isempty(out)
out = 1;
end;
function [y, f_new, eta, g] = getY(gp, xx, yy, z, f, w, eta, g)
if isempty(f) && (isempty(eta) || isempty(g))
error('Must provide either current latent values f to get treshold or eta & g to get new latent values')
end
gp = gp_unpak(gp, w);
if ~isfield(gp.lik, 'nondiagW') || ismember(gp.lik.type, {'LGP' 'LGPC'})
[K, C] = gp_trcov(gp, xx);
else
if ~isfield(gp.lik,'xtime')
nl=[0 repmat(size(yy,1), 1, length(gp.comp_cf))];
else
xtime=gp.lik.xtime;
nl=[0 size(gp.lik.xtime,1) size(yy,1)];
end
nl=cumsum(nl);
nlp=length(nl)-1;
K = zeros(nl(end));
for i1=1:nlp
if i1==1 && isfield(gp.lik, 'xtime')
K((1+nl(i1)):nl(i1+1),(1+nl(i1)):nl(i1+1)) = gp_trcov(gp, xtime, gp.comp_cf{i1});
else
K((1+nl(i1)):nl(i1+1),(1+nl(i1)):nl(i1+1)) = gp_trcov(gp, xx, gp.comp_cf{i1});
end
end
C=K;
end
% for ii=1:size(yy,1)
% [tmp,tmp, m2(ii,:)] = gp.lik.fh.tiltedMoments(gp.lik, yy, ii, C(ii,ii), 0, z);
% end
% S = diag(1./(1./m2 - 1./diag(C)));
S = 10*eye(size(K));
if isempty(eta) || isempty(g)
g = mvnrnd(f,S)';
end
R = S-S*((S+K)\S);
R = (R+R')./2;
LR = chol(R,'lower');
m = R*(S\g);
if isempty(eta) || isempty(g)
eta = LR\(f-m);
f_new = [];
tr = 1; % return treshold
else
f_new = LR*eta + m;
tr = 0; % return y for treshold comparison
end
% Log prior for proposed hyperparameters
lp = 0;
for i3=1:length(gp.cf)
gpcf = gp.cf{i3};
lp = lp + gpcf.fh.lp(gpcf);
end
if isfield(gp, 'lik') && isfield(gp.lik, 'p')
likelih = gp.lik;
lp = lp + likelih.fh.lp(likelih);
end
if tr
% return treshold
y = log(rand(1)) + gp.lik.fh.ll(gp.lik, yy, f, z) + mnorm_lpdf (g', 0, C + S) + lp;
else
% return comparison value with proposed parameters
y = gp.lik.fh.ll(gp.lik, yy, f_new, z) + mnorm_lpdf (g', 0, C + S) + lp;
end
function opt = ssls_opt(opt)
% Default opt for surrogate sls.
% fsamples - number of latent samples per hyperparameter sample
if ~isfield(opt, 'fsamples')
opt.fsamples = 2;
end
if nargin < 1
opt=[];
end
if nargin < 1
opt=[];
end
if ~isfield(opt,'nsamples')
opt.nsamples = 1;
end
if ~isfield(opt,'nomit')
opt.nomit = 0;
end
if ~isfield(opt,'display')
opt.display = 0;
end
if ~isfield(opt,'method')
opt.method = 'multi';
end
if ~isfield(opt,'overrelaxation')
opt.overrelaxation = 0;
elseif opt.overrelaxation == 1 && (strcmp(opt.method,'doubling') || strcmp(opt.method,'minmax'))
opt.method = 'stepping';
end
if ~isfield(opt,'alimit')
opt.alimit = 4;
end
if ~isfield(opt,'wsize')
opt.wsize = 2;
end
if ~isfield(opt,'mlimit')
opt.mlimit = 4;
end
if ~isfield(opt,'maxiter')
opt.maxiter = 50;
end
if ~isfield(opt,'plimit')
opt.plimit = 2;
end
if ~isfield(opt,'unimodal')
opt.unimodal = 0;
end
if ~isfield(opt,'mmlimits')
opt.mmlimits = [opt.wsize-(opt.wsize*opt.mlimit); opt.wsize+(opt.wsize*opt.mlimit)];
end
|
github
|
lcnbeapp/beapp-master
|
lik_lgpc.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_lgpc.m
| 15,380 |
windows_1250
|
8b88129ba333f69e6d189e704cecc43b
|
function lik = lik_lgpc(varargin)
%LIK_LGPC Create a logistic Gaussian process likelihood structure for
% conditional density estimation
%
% Description
% LIK = LIK_LGPC creates a logistic Gaussian process likelihood
% structure for conditional density estimation
%
% The likelihood contribution for the $k$th conditional slice
% is defined as follows:
% __ n
% p(y_k|f_k) = || i=1 exp(f_ki) / Sum_{j=1}^n exp(f_kj),
%
% where f contains latent values.
%
% See also
% LGPCDENS, GP_SET, LIK_*
%
% Copyright (c) 2012 Jaakko Riihimäki and Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LGPC';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'LGPC';
lik.nondiagW = true;
else
if ~isfield(lik,'type') || ~isequal(lik.type,'LGPC')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_lgpc_pak;
lik.fh.unpak = @lik_lgpc_unpak;
lik.fh.ll = @lik_lgpc_ll;
lik.fh.llg = @lik_lgpc_llg;
lik.fh.llg2 = @lik_lgpc_llg2;
lik.fh.llg3 = @lik_lgpc_llg3;
lik.fh.tiltedMoments = @lik_lgpc_tiltedMoments;
lik.fh.predy = @lik_lgpc_predy;
lik.fh.invlink = @lik_lgpc_invlink;
lik.fh.recappend = @lik_lgpc_recappend;
end
end
function [w,s] = lik_lgpc_pak(lik)
%LIK_LGPC_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LGPC_PAK(LIK) takes a likelihood structure LIK
% and returns an empty verctor W. If LGPC likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% LIK_LGPC_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_lgpc_unpak(lik, w)
%LIK_LGPC_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_LGPC_UNPAK(W, LIK) Doesn't do anything.
%
% If LGPC likelihood had parameters this would extract them
% parameters from the vector W to the LIK structure. This is
% a mandatory subfunction used for example in energy and
% gradient computations.
%
% See also
% LIK_LGPC_PAK, GP_UNPAK
lik=lik;
w=w;
end
function logLik = lik_lgpc_ll(lik, y, f, z)
%LIK_LGPC_LL Log likelihood
%
% Description
% E = LIK_LGPC_LL(LIK, Y, F, Z) takes a likelihood data
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_LGPC_LLG, LIK_LGPC_LLG3, LIK_LGPC_LLG2, GPLA_E
y2=reshape(y,fliplr(lik.gridn));
f2=reshape(f,fliplr(lik.gridn));
n2=sum(y2);
qj2=exp(f2);
logLik=sum(sum(f2.*y2)-n2.*log(sum(qj2)));
end
function deriv = lik_lgpc_llg(lik, y, f, param, z)
%LIK_LGPC_LLG Gradient of the log likelihood
%
% Description
% G = LIK_LGPC_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F. Returns the gradient of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% 'param' or 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_LLG2, LIK_LGPC_LLG3, GPLA_E
switch param
case 'latent'
y2=reshape(y,fliplr(lik.gridn));
f2=reshape(f,fliplr(lik.gridn));
n2=sum(y2);
qj2=exp(f2);
pj2=bsxfun(@rdivide,qj2,sum(qj2));
deriv2=y2-bsxfun(@times,n2,pj2);
deriv=deriv2(:);
end
end
function g2 = lik_lgpc_llg2(lik, y, f, param, z)
%function g2 = lik_lgpc_llg2(lik, y, f, param, z)
%LIK_LGPC_LLG2 Second gradients of the log likelihood
%
% Description
% G2 = LIK_LGPC_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z,
% and latent values F. Returns the Hessian of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. G2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_LLG, LIK_LGPC_LLG3, GPLA_E
switch param
case 'latent'
% g2 is not the second gradient of the log likelihood but only a
% vector to form the exact gradient term in gpla_nd_e, gpla_nd_g and
% gpla_nd_pred functions
f2=reshape(f,fliplr(lik.gridn));
qj2=exp(f2);
pj2=bsxfun(@rdivide,qj2,sum(qj2));
g2=pj2(:);
end
end
function g3 = lik_lgpc_llg3(lik, y, f, param, z)
%LIK_LGPC_LLG3 Third gradients of the log likelihood
%
% Description
% G3 = LIK_LGPC_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F and returns the third gradients of the
% log likelihood with respect to PARAM. At the moment PARAM
% can be only 'latent'. G3 is a vector with third gradients.
% This subfunction is needed when using Laplace approximation
% for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_LLG, LIK_LGPC_LLG2, GPLA_E, GPLA_G
switch param
case 'latent'
f2=reshape(f,fliplr(lik.gridn));
qj2=exp(f2);
pj2=bsxfun(@rdivide,qj2,sum(qj2));
g3=pj2(:);
end
end
function [logM_0, m_1, sigm2hati1] = lik_lgpc_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LGPC_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LGPC_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, incedence counts
% Y, expected counts Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
if isempty(z)
error(['lik_lgpc -> lik_lgpc_tiltedMoments: missing z!'...
'LGPC likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_lgpc and gpla_e. ']);
end
yy = y(i1);
avgE = z(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_lgpc_norm(yy(i),myy_i(i),sigm2_i(i),avgE(i));
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_lgpc_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_lgpc_predy(lik, Ef, Varf, yt, zt)
%LIK_LGPC_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LGPC_PREDY(LIK, EF, VARF YT, ZT)
% Returns also the predictive density of YT, that is
% p(yt | y,zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the incedence counts YT, expected counts ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_LGPC_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_lgpc -> lik_lgpc_predy: missing zt!'...
'LGPC likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_lgpc and gpla_e. ']);
end
avgE = zt;
lpy = zeros(size(Ef));
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
if nargout > 1
% Evaluate Ey and Vary
for i1=1:length(Ef)
%%% With quadrature
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f) exp(log(avgE(i1))+f+norm_lpdf(f,myy_i,sigm_i));
Ey(i1) = quadgk(F,minf,maxf);
EVary(i1) = Ey(i1);
F3 = @(f) exp(2*log(avgE(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
end
Vary = EVary + VarEy;
end
% Evaluate the posterior predictive densities of the given observations
for i1=1:length(Ef)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = LGPC * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_lgpc_norm(...
yt(i1),Ef(i1),Varf(i1),avgE(i1));
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_lgpc_norm(yy,myy_i,sigm2_i,avgE)
%INIT_LGPC_NORM
%
% Description
% Return function handle to a function evaluating LGPC *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_lgpc. This
% subfunction is needed by sufunctions tiltedMoments, siteDeriv
% and predy.
%
% See also
% LIK_LGPC_TILTEDMOMENTS, LIK_LGPC_PREDY
% avoid repetitive evaluation of constant part
ldconst = -gammaln(yy+1) - log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @lgpc_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_lgpc_norm;
ldg = @log_lgpc_norm_g;
ldg2 = @log_lgpc_norm_g2;
% Set the limits for integration
% LGPC likelihood is log-concave so the lgpc_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the LGPC likelihood and Gaussian
mu=log(yy/avgE);
s2=1./(yy+1./sigm2_i);
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=3; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_lgpc -> init_lgpc_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_lgpc -> init_lgpc_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = lgpc_norm(f)
% LGPC * Gaussian
mu = avgE.*exp(f);
integrand = exp(ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_lgpc_norm(f)
% log(LGPC * Gaussian)
% log_lgpc_norm is used to avoid underflow when searching
% integration interval
mu = avgE.*exp(f);
log_int = ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_lgpc_norm_g(f)
% d/df log(LGPC * Gaussian)
% derivative of log_lgpc_norm
mu = avgE.*exp(f);
g = -mu+yy...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_lgpc_norm_g2(f)
% d^2/df^2 log(LGPC * Gaussian)
% second derivate of log_lgpc_norm
mu = avgE.*exp(f);
g2 = -mu...
-1/sigm2_i;
end
end
function mu = lik_lgpc_invlink(lik, f, z)
%LIK_LGPC_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_LGPC_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_PREDY
mu = z.*exp(f);
end
function reclik = lik_lgpc_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_LGPC_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'LGPC';
% Set the function handles
reclik.fh.pak = @lik_lgpc_pak;
reclik.fh.unpak = @lik_lgpc_unpak;
reclik.fh.ll = @lik_lgpc_ll;
reclik.fh.llg = @lik_lgpc_llg;
reclik.fh.llg2 = @lik_lgpc_llg2;
reclik.fh.llg3 = @lik_lgpc_llg3;
reclik.fh.tiltedMoments = @lik_lgpc_tiltedMoments;
reclik.fh.predy = @lik_lgpc_predy;
reclik.fh.invlink = @lik_lgpc_invlink;
reclik.fh.recappend = @lik_lgpc_recappend;
return
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_softmax.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_softmax.m
| 10,116 |
UNKNOWN
|
f393d5bbd44c08a0eaf0996ae3fb72f7
|
function lik = lik_softmax(varargin)
%LIK_SOFTMAX Create a softmax likelihood structure
%
% Description
% LIK = LIK_SOFTMAX creates Softmax likelihood for multi-class
% classification problem. The observed class label with C
% classes is given as 1xC vector where C-1 entries are 0 and the
% observed class label is 1.
%
% The likelihood is defined as follows:
% __ n
% p(y^c|f^1, ..., f^C) = || i=1 exp(f_i^C)/(sum^C_c=1 exp(f_i^c))
%
% where y^c is the observation of cth class, f^c is the latent variable
% corresponding to cth class and C is the number of classes.
%
% See also
% GP_SET, LIK_*
% Copyright (c) 2010 Jaakko Riihim�ki, Pasi Jyl�nki
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_SOFTMAX';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Softmax';
lik.nondiagW=true;
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Softmax')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_softmax_pak;
lik.fh.unpak = @lik_softmax_unpak;
lik.fh.ll = @lik_softmax_ll;
lik.fh.llg = @lik_softmax_llg;
lik.fh.llg2 = @lik_softmax_llg2;
lik.fh.llg3 = @lik_softmax_llg3;
lik.fh.tiltedMoments = @lik_softmax_tiltedMoments;
lik.fh.predy = @lik_softmax_predy;
lik.fh.recappend = @lik_softmax_recappend;
end
end
function [w,s] = lik_softmax_pak(lik)
%LIK_SOFTMAX_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_SOFTMAX_PAK(LIK) takes a likelihood structure LIK and
% returns an empty verctor W. If Softmax likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
%
% See also
% LIK_SOFTMAX_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_softmax_unpak(lik, w)
%LIK_SOFTMAX_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_SOFTMAX_UNPAK(W, LIK) Doesn't do anything.
%
% If Softmax likelihood had parameters this would extracts them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
%
% See also
% LIK_SOFTMAX_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_softmax_ll(lik, y, f2, z)
%LIK_SOFTMAX_LL Log likelihood
%
% Description
% LL = LIK_SOFTMAX_LL(LIK, Y, F) takes a likelihood structure
% LIK, class labels Y (NxC matrix), and latent values F (NxC
% matrix). Returns the log likelihood, log p(y|f,z). This
% subfunction is needed when using Laplace approximation or
% MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_SOFTMAX_LLG, LIK_SOFTMAX_LLG3, LIK_SOFTMAX_LLG2, GPLA_E
if ~isempty(find(y~=1 & y~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
% Reshape to NxC matrix
f2=reshape(f2,size(y));
% softmax:
ll = y(:)'*f2(:) - sum(log(sum(exp(f2),2)));
end
function llg = lik_softmax_llg(lik, y, f2, param, z)
%LIK_SOFTMAX_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_SOFTMAX_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the gradient of the log likelihood with respect to PARAM. At
% the moment PARAM can be 'param' or 'latent'. This subfunction
% is needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_SOFTMAX_LL, LIK_SOFTMAX_LLG2, LIK_SOFTMAX_LLG3, GPLA_E
if ~isempty(find(y~=1 & y~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
% Reshape to NxC matrix
f2=reshape(f2,size(y));
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,size(y,2)));
pi_vec=pi2(:);
llg = y(:)-pi_vec;
end
function [pi_vec, pi_mat] = lik_softmax_llg2(lik, y, f2, param, z)
%LIK_SOFTMAX_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_SOFTMAX_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the Hessian of the log likelihood with respect to PARAM. At
% the moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace
% approximation or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_SOFTMAX_LL, LIK_SOFTMAX_LLG, LIK_SOFTMAX_LLG3, GPLA_E
% softmax:
% Reshape to NxC matrix
f2=reshape(f2,size(y));
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,size(y,2)));
pi_vec=pi2(:);
[n,nout]=size(y);
pi_mat=zeros(nout*n, n);
for i1=1:nout
pi_mat((1+(i1-1)*n):(nout*n+1):end)=pi2(:,i1);
end
% D=diag(pi_vec);
% llg2=-D+pi_mat*pi_mat';
end
function dw_mat = lik_softmax_llg3(lik, y, f, param, z)
%LIK_SOFTMAX_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_SOFTMAX_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be only 'latent'.
% LLG3 is a vector with third gradients. This subfunction is
% needed when using Laplace approximation for inference with
% non-Gaussian likelihoods.
%
% See also
% LIK_SOFTMAX_LL, LIK_SOFTMAX_LLG, LIK_SOFTMAX_LLG2, GPLA_E, GPLA_G
if ~isempty(find(y~=1 & y~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
[n,nout] = size(y);
f2 = reshape(f,n,nout);
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,nout));
pi_vec=pi2(:);
dw_mat=zeros(nout,nout,nout,n);
for cc3=1:nout
for ii1=1:n
pic=pi_vec(ii1:n:(nout*n));
for cc1=1:nout
for cc2=1:nout
% multinom third derivatives
cc_sum_tmp=0;
if cc1==cc2 && cc1==cc3 && cc2==cc3
cc_sum_tmp=cc_sum_tmp+pic(cc1);
end
if cc1==cc2
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc3);
end
if cc2==cc3
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc2);
end
if cc1==cc3
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc2);
end
cc_sum_tmp=cc_sum_tmp+2*pic(cc1)*pic(cc2)*pic(cc3);
dw_mat(cc1,cc2,cc3,ii1)=cc_sum_tmp;
end
end
end
end
end
function [logM_0, m_1, sigm2hati1] = lik_softmax_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
end
function [lpy, Ey, Vary] = lik_softmax_predy(lik, Ef, Varf, yt, zt)
%LIK_SOFTMAX_PREDY Returns the predictive mean, variance and density of
%y
%
% Description
% LPY = LIK_SOFTMAX_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | y, zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the succes counts YT, numbers of trials ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [EY, VARY] = LIK_SOFTMAX_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This
% subfunction is needed when computing posterior predictive
% distributions for future observations.
%
%
% See also
% GPEP_PRED, GPLA_PRED, GPMC_PRED
if ~isempty(find(yt~=1 & yt~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
S=10000;
[ntest,nout]=size(yt);
pi=zeros(ntest,nout);
lpy=zeros(ntest,nout);
Ef=reshape(Ef(:),ntest,nout);
[notused,notused,c] =size(Varf);
if c>1
mcmc=false;
else
mcmc=true;
Varf=reshape(Varf(:),ntest,nout);
end
for i1=1:ntest
if mcmc
Sigm_tmp = (Varf(i1,:));
f_star=bsxfun(@plus, Ef(i1,:), bsxfun(@times, sqrt(Sigm_tmp), ...
randn(S,nout)));
else
Sigm_tmp=(Varf(:,:,i1)'+Varf(:,:,i1))./2;
f_star=mvnrnd(Ef(i1,:), Sigm_tmp, S);
end
tmp = exp(f_star);
tmp = tmp./(sum(tmp, 2)*ones(1,size(tmp,2)));
pi(i1,:)=mean(tmp);
ytmp = repmat(yt(i1,:),S,1);
lpy(i1,:) = log(mean(tmp.^(ytmp).*(1-tmp).^(1-ytmp)));
end
if nargout > 1
Ey = 2*pi-1;
Vary = 1-(2*pi-1).^2;
Ey=Ey(:);
end
lpy=lpy(:);
end
function reclik = lik_softmax_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_SOFTMAX_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Softmax';
reclik.nondiagW = true;
% Set the function handles
reclik.fh.pak = @lik_softmax_pak;
reclik.fh.unpak = @lik_softmax_unpak;
reclik.fh.ll = @lik_softmax_ll;
reclik.fh.llg = @lik_softmax_llg;
reclik.fh.llg2 = @lik_softmax_llg2;
reclik.fh.llg3 = @lik_softmax_llg3;
reclik.fh.tiltedMoments = @lik_softmax_tiltedMoments;
reclik.fh.predy = @lik_softmax_predy;
reclik.fh.recappend = @lik_softmax_recappend;
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_noise.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_noise.m
| 12,126 |
utf_8
|
70defae513090d936fda63764c2de76c
|
function gpcf = gpcf_noise(varargin)
%GPCF_NOISE Create a independent noise covariance function
%
% Description
% GPCF = GPCF_NOISE('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates
% independent noise covariance function structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_NOISE(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for independent noise covariance function [default]
% noiseSigma2 - variance of the independent noise [0.1]
% noiseSigma2_prior - prior for noiseSigma2 [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_NOISE';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('noiseSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('noiseSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_noise';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_noise')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameter
if init || ~ismember('noiseSigma2',ip.UsingDefaults)
gpcf.noiseSigma2=ip.Results.noiseSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('noiseSigma2_prior',ip.UsingDefaults)
gpcf.p.noiseSigma2=ip.Results.noiseSigma2_prior;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_noise_pak;
gpcf.fh.unpak = @gpcf_noise_unpak;
gpcf.fh.lp = @gpcf_noise_lp;
gpcf.fh.lpg = @gpcf_noise_lpg;
gpcf.fh.cfg = @gpcf_noise_cfg;
gpcf.fh.ginput = @gpcf_noise_ginput;
gpcf.fh.cov = @gpcf_noise_cov;
gpcf.fh.trcov = @gpcf_noise_trcov;
gpcf.fh.trvar = @gpcf_noise_trvar;
gpcf.fh.recappend = @gpcf_noise_recappend;
end
end
function [w, s] = gpcf_noise_pak(gpcf)
%GPCF_NOISE_PAK Combine GP covariance function parameters into
% one vector.
%
% Description
% W = GPCF_NOISE_PAK(GPCF) takes a covariance function data
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.noiseSigma2)
% (hyperparameters of gpcf.magnSigma2)]'
%
%
% See also
% GPCF_NOISE_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.noiseSigma2)
w(1) = log(gpcf.noiseSigma2);
s = [s 'log(noise.noiseSigma2)'];
% Hyperparameters of noiseSigma2
[wh sh] = gpcf.p.noiseSigma2.fh.pak(gpcf.p.noiseSigma2);
w = [w wh];
s = [s sh];
end
end
function [gpcf, w] = gpcf_noise_unpak(gpcf, w)
%GPCF_NOISE_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_NOISE_UNPAK(GPCF, W) takes a covariance
% function data structure GPCF and a hyper-parameter vector W,
% and returns a covariance function data structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.noiseSigma2)
% (hyperparameters of gpcf.magnSigma2)]'
%
% See also
% GPCF_NOISE_PAK
if ~isempty(gpcf.p.noiseSigma2)
gpcf.noiseSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.noiseSigma2.fh.unpak(gpcf.p.noiseSigma2, w);
gpcf.p.noiseSigma2 = p;
end
end
function lp = gpcf_noise_lp(gpcf)
%GPCF_NOISE_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_NOISE_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_G, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the
% "real" samples. On the other hand errors are evaluated in the
% W-space so we need take into account also the Jacobian of
% transformation W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.noiseSigma2)
% Evaluate the prior contribution to the error.
lp = gpp.noiseSigma2.fh.lp(gpcf.noiseSigma2, gpp.noiseSigma2) +log(gpcf.noiseSigma2);
end
end
function lpg = gpcf_noise_lpg(gpcf)
%GPCF_NOISE_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_NOISE_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.noiseSigma2)
lpgs = gpp.noiseSigma2.fh.lpg(gpcf.noiseSigma2, gpp.noiseSigma2);
lpg = [lpg lpgs(1).*gpcf.noiseSigma2+1 lpgs(2:end)];
end
end
function DKff = gpcf_noise_cfg(gpcf, x, x2, mask, i1)
%GPCF_NOISE_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_NOISE_CFG(GPCF, X) takes a covariance function
% data structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_NOISE_CFG(GPCF, X, X2) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_NOISE_CFG(GPCF, X, [], MASK) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the diagonal of gradients of covariance
% matrix Kff = k(X,X2) with respect to th (cell array with
% matrix elements). This subfunction is needed when using
% sparse approximations (e.g. FIC).
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_E, GP_G
DKff = {};
if ~isempty(gpcf.p.noiseSigma2)
gpp=gpcf.p;
DKff{1}=gpcf.noiseSigma2;
end
if nargin==4
% Use memory save option
if i1==0
% Return number of hyperparameters
DKff=1;
return
end
DKff=DKff{1};
end
end
function DKff = gpcf_noise_ginput(gpcf, x, t, i1)
%GPCF_NOISE_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_NOISE_GINPUT(GPCF, X) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_NOISE_GINPUT(GPCF, X, X2) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_E, GP_G
end
function C = gpcf_noise_cov(gpcf, x1, x2)
% GP_NOISE_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_NOISE_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_NOISE_TRCOV, GPCF_NOISE_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
C = sparse([],[],[],n1,n2,0);
end
function C = gpcf_noise_trcov(gpcf, x)
%GP_NOISE_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_NOISE_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
%
% See also
% GPCF_NOISE_COV, GPCF_NOISE_TRVAR, GP_COV, GP_TRCOV
[n, m] =size(x);
n1=n+1;
C = sparse([],[],[],n,n,0);
C(1:n1:end)=C(1:n1:end)+gpcf.noiseSigma2;
end
function C = gpcf_noise_trvar(gpcf, x)
% GP_NOISE_TRVAR Evaluate training variance vector
%
% Description
% C = GP_NOISE_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
%
% See also
% GPCF_NOISE_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C=ones(n,1)*gpcf.noiseSigma2;
end
function reccf = gpcf_noise_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_NOISE_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the hyperparameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_noise';
% Initialize parameters
reccf.noiseSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_noise_pak;
reccf.fh.unpak = @gpcf_noise_unpak;
reccf.fh.e = @gpcf_noise_lp;
reccf.fh.lpg = @gpcf_noise_lpg;
reccf.fh.cfg = @gpcf_noise_cfg;
reccf.fh.cov = @gpcf_noise_cov;
reccf.fh.trcov = @gpcf_noise_trcov;
reccf.fh.trvar = @gpcf_noise_trvar;
% gpcf.fh.sampling = @hmc2;
reccf.sampling_opt = hmc2_opt;
reccf.fh.recappend = @gpcf_noise_recappend;
reccf.p=[];
reccf.p.noiseSigma2=[];
if ~isempty(ri.p.noiseSigma2)
reccf.p.noiseSigma2 = ri.p.noiseSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record noiseSigma2
reccf.noiseSigma2(ri,:)=gpcf.noiseSigma2;
if isfield(gpp,'noiseSigma2') && ~isempty(gpp.noiseSigma2)
reccf.p.noiseSigma2 = gpp.noiseSigma2.fh.recappend(reccf.p.noiseSigma2, ri, gpcf.p.noiseSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_scaled.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_scaled.m
| 14,371 |
utf_8
|
e5089583dd57a67a67f52a293560482a
|
function gpcf = gpcf_scaled(varargin)
%GPCF_SCALED Create a scaled covariance function
%
% Description
% GPCF = GPCF_scaled('cf', {GPCF_1, GPCF_2, ...})
% creates a scaled version of a covariance function as follows
% GPCF_scaled = diag(x(:,scaler))*GPCF*diag(x(:,scaler))
% where x is the matrix of inputs (see, e.g. gp_trcov).
%
% Parameters for the scaled covariance function are [default]
% cf - covariance function to be scaled (compulsory)
% scaler - the input that is used for scaling [1]
%
% See also
% GP_SET, GPCF_*
% For more information on models leading to scaled covariance function see,
% for example:
%
% GELFAND, KIM, SIRMANS, and BANERJEE (2003). Spatial Modeling With
% Spatially Varying Coefficient Processes. Journal of the American
% Statistical Association June 2003, Vol. 98, No. 462
%
% Copyright (c) 2009-2012 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 2 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_SCALED';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('cf',[], @isstruct);
ip.addParamValue('scaler',1, @(x) isscalar(x) && x>0);
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_scaled';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_scaled')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init || ~ismember('cf',ip.UsingDefaults)
% Initialize parameters
gpcf.cf = {};
cfs=ip.Results.cf;
if ~isempty(cfs)
gpcf.cf{1} = cfs;
else
error('A covariance function has to be given in cf');
end
end
if init || ~ismember('scaler',ip.UsingDefaults)
gpcf.scaler = ip.Results.scaler;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_scaled_pak;
gpcf.fh.unpak = @gpcf_scaled_unpak;
gpcf.fh.lp = @gpcf_scaled_lp;
gpcf.fh.lpg = @gpcf_scaled_lpg;
gpcf.fh.cfg = @gpcf_scaled_cfg;
gpcf.fh.ginput = @gpcf_scaled_ginput;
gpcf.fh.cov = @gpcf_scaled_cov;
gpcf.fh.trcov = @gpcf_scaled_trcov;
gpcf.fh.trvar = @gpcf_scaled_trvar;
gpcf.fh.recappend = @gpcf_scaled_recappend;
end
end
function [w, s] = gpcf_scaled_pak(gpcf)
%GPCF_scaled_PAK Combine GP covariance function parameters into one vector
%
% Description
% W = GPCF_scaled_PAK(GPCF, W) loops through all the covariance
% functions and packs their parameters into one vector as
% described in the respective functions. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_scaled_UNPAK
w = []; s = {};
cf = gpcf.cf{1};
[wi si] = feval(cf.fh.pak, cf);
w = [w wi];
s = [s; si];
end
function [gpcf, w] = gpcf_scaled_unpak(gpcf, w)
%GPCF_scaled_UNPAK Sets the covariance function parameters into
% the structures
%
% Description
% [GPCF, W] = GPCF_scaled_UNPAK(GPCF, W) loops through all the
% covariance functions and unpacks their parameters from W to
% each covariance function structure. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_scaled_PAK
%
cf = gpcf.cf{1};
[cf, w] = feval(cf.fh.unpak, cf, w);
gpcf.cf{1} = cf;
end
function lp = gpcf_scaled_lp(gpcf)
%GPCF_scaled_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_scaled_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LPG, GP_E
lp = 0;
cf = gpcf.cf{1};
lp = lp + feval(cf.fh.lp, cf);
end
function lpg = gpcf_scaled_lpg(gpcf)
%GPCF_scaled_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_scaled_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LP, GP_G
lpg = [];
% Evaluate the gradients
cf = gpcf.cf{1};
lpg=[lpg cf.fh.lpg(cf)];
end
function DKff = gpcf_scaled_cfg(gpcf, x, x2, mask, i1)
%GPCF_scaled_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_scaled_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_scaled_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_scaled_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_scaled_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LP, GP_G
[n, m] =size(x);
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
DKff=gpcf.cf{1}.fh.cfg(gpcf.cf{1},[],[],[],0);
return
end
else
savememory=0;
end
DKff = {};
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
% Evaluate the gradients
DKff = {};
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.cfg(cf, x);
else
DK = {cf.fh.cfg(cf,x,[],[],i1)};
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale;
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_scaled -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
n2 = length(x2);
scale2 = sparse(1:n2,1:n2,x2(:,gpcf.scaler),n2,n2);
% Evaluate the gradients
DKff = {};
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.cfg(cf, x, x2);
else
DK = {cf.fh.cfg(cf,x, x2, [], i1)};
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale2;
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
% Evaluate the gradients
DKff = {};
scale = x(:,gpcf.scaler);
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.cfg(cf, x, [], 1);
else
DK = cf.fh.cfg(cf, x, [], 1, i1);
end
for j = 1:length(DK)
DKff{end+1} = scale.*DK{j}.*scale;
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_scaled_ginput(gpcf, x, x2,i1)
%GPCF_scaled_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_scaled_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_scaled_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_scaled_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
DKff = {};
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.ginput(cf, x);
else
DK = cf.fh.ginput(cf,x,[],i1);
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale;
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_scaled -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
n2 = length(x2);
scale2 = sparse(1:n2,1:n2,x2(:,gpcf.scaler),n2,n2);
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.ginput(cf, x, x2);
else
DK = cf.fh.ginput(cf,x,x2,i1);
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale2;
end
end
end
function C = gpcf_scaled_cov(gpcf, x1, x2)
%GP_scaled_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_scaled_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
%
% See also
% GPCF_scaled_TRCOV, GPCF_scaled_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
scale = sparse(1:n1,1:n1,x1(:,gpcf.scaler),n1,n1);
scale2 = sparse(1:n2,1:n2,x2(:,gpcf.scaler),n2,n2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
cf = gpcf.cf{1};
C = scale*feval(cf.fh.cov, cf, x1, x2)*scale2;
end
function C = gpcf_scaled_trcov(gpcf, x)
%GP_scaled_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_scaled_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction
% and energy computations.
%
% See also
% GPCF_scaled_COV, GPCF_scaled_TRVAR, GP_COV, GP_TRCOV
n = length(x);
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
cf = gpcf.cf{1};
C = scale*feval(cf.fh.trcov, cf, x)*scale;
end
function C = gpcf_scaled_trvar(gpcf, x)
% GP_scaled_TRVAR Evaluate training variance vector
%
% Description
% C = GP_scaled_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_scaled_COV, GP_COV, GP_TRCOV
cf = gpcf.cf{1};
C = x(:,gpcf.scaler).*feval(cf.fh.trvar, cf, x).*x(:,gpcf.scaler);
end
function reccf = gpcf_scaled_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_scaled_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC, GP_MC->RECAPPEND
% Initialize record
if nargin == 2
reccf.type = 'gpcf_scaled';
cf = ri.cf{1};
reccf.cf{1} = feval(cf.fh.recappend, [], ri.cf{1});
reccf.scaler = ri.scaler;
% Set the function handles
reccf.fh.pak = @gpcf_scaled_pak;
reccf.fh.unpak = @gpcf_scaled_unpak;
reccf.fh.e = @gpcf_scaled_lp;
reccf.fh.lpg = @gpcf_scaled_lpg;
reccf.fh.cfg = @gpcf_scaled_cfg;
reccf.fh.cov = @gpcf_scaled_cov;
reccf.fh.trcov = @gpcf_scaled_trcov;
reccf.fh.trvar = @gpcf_scaled_trvar;
reccf.fh.recappend = @gpcf_scaled_recappend;
return
end
%loop over all of the covariance functions
cf = gpcf.cf{1};
reccf.cf{1} = feval(cf.fh.recappend, reccf.cf{1}, ri, cf);
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_ppcs2.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_ppcs2.m
| 39,334 |
utf_8
|
edb49e8f28a995e0555b3b9deb81b3e9
|
function gpcf = gpcf_ppcs2(varargin)
%GPCF_PPCS2 Create a piece wise polynomial (q=2) covariance function
%
% Description
% GPCF = GPCF_PPCS2('nin',nin,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates piece wise polynomial (q=2) covariance function
% structure in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
% Obligatory parameter is 'nin', which tells the dimension
% of input space.
%
% GPCF = GPCF_PPCS2(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for piece wise polynomial (q=2) covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% l_nin - order of the polynomial [floor(nin/2) + 3]
% Has to be greater than or equal to default.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The piecewise polynomial function is the following:
%
% k_pp2(x_i, x_j) = ma2*cs^(l+2)*((l^2+4*l+3)*r^2 + (3*l+6)*r +3)
%
% where r = sum( (x_i,d - x_j,d)^2/l^2_d )
% l = floor(l_nin/2) + 3
% cs = max(0,1-r)
% and l_nin must be greater or equal to gpcf.nin
%
% NOTE! Use of gpcf_ppcs2 requires that you have installed
% GPstuff with SuiteSparse.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo, Jouni Hartikainen
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_PPCS2';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('l_nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Check that SuiteSparse is available
if ~exist('ldlchol')
error('SuiteSparse is not installed (or it is not in the path). gpcf_ppcs2 cannot be used!')
end
init=true;
gpcf.nin=ip.Results.nin;
if isempty(gpcf.nin)
error('nin has to be given for ppcs: gpcf_ppcs2(''nin'',NIN,...)')
end
gpcf.type = 'gpcf_ppcs2';
% cf is compactly supported
gpcf.cs = 1;
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_ppcs2')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_ppcs2_pak;
gpcf.fh.unpak = @gpcf_ppcs2_unpak;
gpcf.fh.lp = @gpcf_ppcs2_lp;
gpcf.fh.lpg = @gpcf_ppcs2_lpg;
gpcf.fh.cfg = @gpcf_ppcs2_cfg;
gpcf.fh.ginput = @gpcf_ppcs2_ginput;
gpcf.fh.cov = @gpcf_ppcs2_cov;
gpcf.fh.trcov = @gpcf_ppcs2_trcov;
gpcf.fh.trvar = @gpcf_ppcs2_trvar;
gpcf.fh.recappend = @gpcf_ppcs2_recappend;
end
% Initialize parameters
if init || ~ismember('l_nin',ip.UsingDefaults)
gpcf.l=ip.Results.l_nin;
if isempty(gpcf.l)
gpcf.l = floor(gpcf.nin/2) + 3;
end
if gpcf.l < gpcf.nin
error('The l_nin has to be greater than or equal to the number of inputs!')
end
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_ppcs2_pak(gpcf)
%GPCF_PPCS2_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PPCS2_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS2_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(ppcs2.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(ppcs2.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(ppcs2.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_ppcs2_unpak(gpcf, w)
%GPCF_PPCS2_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PPCS2_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS2_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_ppcs2_lp(gpcf)
%GPCF_PPCS2_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PPCS2_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_ppcs2_lpg(gpcf)
%GPCF_PPCS2_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PPCS2_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_ppcs2_cfg(gpcf, x, x2, mask, i1)
%GPCF_PPCS2_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PPCS2_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PPCS2_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS2_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS2_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_ppcs2_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
l = gpcf.l;
[I,J] = find(Cdm);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
[n, m] =size(x);
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS2
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
d2 = 0;
for i = 1:m
d2 = d2 + s2.*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
% Calculate the gradient matrix
const1 = 2.*l^2+8.*l+6;
const2 = l^2+4.*l+3;
D = -ma2.*cs.^(l+1).*d.*(cs.*(const1.*d+3.*l+6)-(l+2).*(const2.*d2+(3.*l+6).*d+3))/3;
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
d_l2 = [];
for i = 1:m
d_l2(:,i) = s2(i).*(x(I,i) - x(J,i)).^2;
d2 = d2 + d_l2(:,i);
end
d = sqrt(d2);
d_l = d_l2;
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = -ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*d+3*l+6)-(const2.*d2+(l+2)*(3*l+6).*d+(l+2)*3));
int = d ~= 0;
for i = i1
% Calculate the gradient matrix
D = d_l(:,i).*Dd;
% Divide by r in cases where r is non-zero
D(int) = D(int)./d(int);
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x, x2(ii,:));
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS2
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
dist1 = 0;
for i=1:m
dist1 = dist1 + s2.*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = 2.*l^2+8.*l+6;
const2 = l^2+4.*l+3;
DK_l = -ma2.*cs1.^(l+1).*d1.*(cs1.*(const1.*d1+3.*l+6)-(l+2).*(const2.*dist1+(3.*l+6).*d1+3))/3;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
d_l1 = [];
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
d_l1{i} = s2(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = l^2+4.*l+3;
const2 = 3.*l+6;
for i = i1
% Calculate the gradient matrix
DK_l = ma2.*(l+2).*d_l1{i}.*cs1.^(l+1).*(const1.*dist1 + const2.*d1 + 3)./3;
DK_l = DK_l - ma2.*cs1.^(l+2).*d_l1{i}.*(2.*const1.*d1 + const2)./3;
% Divide by r in cases where r is non-zero
DK_l(d1 ~= 0) = DK_l(d1 ~= 0)./d1(d1 ~= 0);
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_ppcs2_ginput(gpcf, x, x2, i1)
%GPCF_PPCS2_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PPCS2_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS2_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS2_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LP, GP_G
[n, m] =size(x);
ii1=0;
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
K = gpcf_ppcs2_trcov(gpcf, x);
l = gpcf.l;
[I,J] = find(K);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = zeros(length(col_ind),1);
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*d+3*l+6) - (const2.*d.^2 + (l+2)*(3*l+6).*d+(l+2)*3));
Dd = sparse(I,J,Dd,n,n);
d = sparse(I,J,d,n,n);
row = ones(n,1);
cols = 1:n;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(:,j));
apu = full(Dd(:,j)).*s2(i).*(x(j,i)-x(:,i));
apu(ind) = apu(ind)./d(ind,j);
D = sparse(row*j, cols, apu, n, n);
D = D+D';
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
n2 = size(x2,1);
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS2
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
d = sqrt(d2);
cs = max(1-d,0);
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*d+3*l+6) - (const2.*d.^2 + (l+2)*(3*l+6).*d+(l+2)*3));
row = ones(n2,1);
cols = 1:n2;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(j,:));
apu = Dd(j,:).*s2(i).*(x(j,i)-x2(:,i))';
apu(ind) = apu(ind)./d(j,ind);
D = sparse(row*j, cols, apu, n, n2);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
end
function C = gpcf_ppcs2_cov(gpcf, x1, x2, varargin)
%GP_PPCS2_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PPCS2_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PPCS2_TRCOV, GPCF_PPCS2_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x1);
[n2,m2]=size(x2);
else
% If scaled euclidean metric
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m1);
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n1*n2));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
I0=zeros(ntriplets,1);
J0=zeros(ntriplets,1);
nn0=0;
for ii1=1:n2
d = zeros(n1,1);
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x1, x2(ii1,:));
else
for j=1:m1
d = d + s2(j).*(x1(:,j)-x2(ii1,j)).^2;
end
end
%d = sqrt(d);
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
R2 = sqrt(R2);
%len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
I(ntrip_prev+1:ntriplets) = I2;
J(ntrip_prev+1:ntriplets) = ii1;
R(ntrip_prev+1:ntriplets) = R2;
I0(nn0+1:nn0+length(I0t)) = I0t;
J0(nn0+1:nn0+length(I0t)) = ii1;
nn0 = nn0+length(I0t);
end
r = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets));
[I,J,r] = find(r);
cs = full(sparse(max(0, 1-r)));
C = ma2.*cs.^(l+2).*((l^2+4*l+3).*r.^2+(3*l+6).*r+3)/3;
C = sparse(I,J,C,n1,n2) + sparse(I0,J0,ma2,n1,n2);
end
function C = gpcf_ppcs2_trcov(gpcf, x)
%GP_PPCS2_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PPCS2_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_PPCS2_COV, GPCF_PPCS2_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
else
% If a scaled euclidean metric try first mex-implementation
% and if there is not such...
C = trcov(gpcf,x);
% ... evaluate the covariance here.
if isnan(C)
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
else
return
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n*n));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
ntripletsz = max(1,floor(0.03.^2*n*n));
Iz = zeros(ntripletsz,1);
Jz = zeros(ntripletsz,1);
ntripletsz = 0;
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:));
else
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
%d = sqrt(d);
% store zero distance index
[I2z,J2z] = find(d==0);
% create triplets for distances 0<d<1
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
if (ntriplets > len)
I(2*len) = 0;
J(2*len) = 0;
R(2*len) = 0;
end
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = ii1+I2;
J(ind_tr) = ii1;
R(ind_tr) = sqrt(R2);
% create triplets for distances d==0 (i~=j)
lenz = length(Iz);
ntrip_prevz = ntripletsz;
ntripletsz = ntripletsz + length(I2z);
if (ntripletsz > lenz)
Iz(2*lenz) = 0;
Jz(2*lenz) = 0;
end
ind_trz = ntrip_prevz+1:ntripletsz;
Iz(ind_trz) = ii1+I2z;
Jz(ind_trz) = ii1;
end
% create a lower triangular sparse distance matrix from the triplets for distances 0<d<1
R = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets),n,n);
% create a lower triangular sparse covariance matrix from the
% triplets for distances d==0 (i~=j)
Rz = sparse(Iz(1:ntripletsz),Jz(1:ntripletsz),repmat(ma2,1,ntripletsz),n,n);
% Find the non-zero elements of R.
[I,J,rn] = find(R);
% Compute covariances for distances 0<d<1
const1 = l^2+4*l+3;
const2 = 3*l+6;
cs = max(0,1-rn);
C = ma2.*cs.^(l+2).*(const1.*rn.^2+const2.*rn+3)/3;
% create a lower triangular sparse covariance matrix from the triplets for distances 0<d<1
C = sparse(I,J,C,n,n);
% add the lower triangular covariance matrix for distances d==0 (i~=j)
C = C + Rz;
% form a square covariance matrix and add the covariance matrix for i==j (d==0)
C = C + C' + sparse(1:n,1:n,ma2,n,n);
end
function C = gpcf_ppcs2_trvar(gpcf, x)
%GP_PPCS2_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PPCS2_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_PPCS2_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_ppcs2_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PPCS2_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_ppcs2';
reccf.nin = ri.nin;
reccf.l = floor(reccf.nin/2)+3;
% cf is compactly supported
reccf.cs = 1;
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_ppcs2_pak;
reccf.fh.unpak = @gpcf_ppcs2_unpak;
reccf.fh.e = @gpcf_ppcs2_lp;
reccf.fh.lpg = @gpcf_ppcs2_lpg;
reccf.fh.cfg = @gpcf_ppcs2_cfg;
reccf.fh.cov = @gpcf_ppcs2_cov;
reccf.fh.trcov = @gpcf_ppcs2_trcov;
reccf.fh.trvar = @gpcf_ppcs2_trvar;
reccf.fh.recappend = @gpcf_ppcs2_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_sum.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_sum.m
| 14,153 |
utf_8
|
b881f1be2b12d89b06edcbbfe5dbad0c
|
function gpcf = gpcf_sum(varargin)
%GPCF_SUM Create a sum form covariance function
%
% Description
% GPCF = GPCF_SUM('cf', {GPCF_1, GPCF_2, ...})
% creates a sum form covariance function
% GPCF = GPCF_1 + GPCF_2 + ... + GPCF_N
%
% See also
% GP_SET, GPCF_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_SUM';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('cf',[], @iscell);
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_sum';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_sum')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init || ~ismember('cf',ip.UsingDefaults)
% Initialize parameters
gpcf.cf = {};
cfs=ip.Results.cf;
if ~isempty(cfs)
for i = 1:length(cfs)
gpcf.cf{i} = cfs{i};
end
else
error('At least one covariance function has to be given in cf');
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_sum_pak;
gpcf.fh.unpak = @gpcf_sum_unpak;
gpcf.fh.lp = @gpcf_sum_lp;
gpcf.fh.lpg = @gpcf_sum_lpg;
gpcf.fh.cfg = @gpcf_sum_cfg;
gpcf.fh.ginput = @gpcf_sum_ginput;
gpcf.fh.cov = @gpcf_sum_cov;
gpcf.fh.trcov = @gpcf_sum_trcov;
gpcf.fh.trvar = @gpcf_sum_trvar;
gpcf.fh.recappend = @gpcf_sum_recappend;
end
end
function [w, s] = gpcf_sum_pak(gpcf)
%GPCF_SUM_PAK Combine GP covariance function parameters into one vector
%
% Description
% W = GPCF_SUM_PAK(GPCF, W) loops through all the covariance
% functions and packs their parameters into one vector as
% described in the respective functions. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_SUM_UNPAK
ncf = length(gpcf.cf);
w = []; s = {};
for i=1:ncf
cf = gpcf.cf{i};
[wi si] = cf.fh.pak(cf);
w = [w wi];
s = [s; si];
end
end
function [gpcf, w] = gpcf_sum_unpak(gpcf, w)
%GPCF_SUM_UNPAK Sets the covariance function parameters into
% the structures
%
% Description
% [GPCF, W] = GPCF_SUM_UNPAK(GPCF, W) loops through all the
% covariance functions and unpacks their parameters from W to
% each covariance function structure. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_SUM_PAK
%
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
[cf, w] = cf.fh.unpak(cf, w);
gpcf.cf{i} = cf;
end
end
function lp = gpcf_sum_lp(gpcf)
%GPCF_SUM_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_SUM_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LPG, GP_E
lp = 0;
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
lp = lp + cf.fh.lp(cf);
end
end
function lpg = gpcf_sum_lpg(gpcf)
%GPCF_SUM_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SUM_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LP, GP_G
lpg = [];
ncf = length(gpcf.cf);
% Evaluate the gradients
for i=1:ncf
cf = gpcf.cf{i};
lpg=[lpg cf.fh.lpg(cf)];
end
end
function DKff = gpcf_sum_cfg(gpcf, x, x2, mask, i1)
%GPCF_SUM_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_SUM_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_SUM_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_SUM_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_SUM_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LP, GP_G
[n, m] =size(x);
ncf = length(gpcf.cf);
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
i3=0;
for k=1:ncf
% Number of hyperparameters for each covariance funtion
cf=gpcf.cf{k};
i3(k)=cf.fh.cfg(cf,[],[],[],0);
end
if i1==0
% Return number of hyperparameters
DKff=sum(i3);
return
end
% Help indices
i3=cumsum(i3);
ind=find(cumsum(i3 >= i1)==1);
if ind>1
i1=[ind i1-i3(ind-1)];
else
i1=[ind i1];
end
i2=i1(1);
else
savememory=0;
i2=1:ncf;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
% % evaluate the individual covariance functions
% for i=1:ncf
% cf = gpcf.cf{i};
% C{i} = cf.fh.trcov(cf, x);
% end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
for i=i2
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x);
else
DK{1} = cf.fh.cfg(cf,x,[],[],i1(2));
end
DKff = [DKff DK];
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_sum -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
for i=i2
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x, x2);
else
DK{1} = cf.fh.cfg(cf,x,x2,[],i1(2));
end
DKff = [DKff DK];
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
for i=i2
cf = gpcf.cf{i};
if savememory
DK = cf.fh.cfg(cf, x, [], 1, i1(2));
else
DK = cf.fh.cfg(cf, x, [], 1);
end
DKff = [DKff DK];
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_sum_ginput(gpcf, x, x2, i1)
%GPCF_SUM_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_SUM_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_SUM_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_SUM_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LP, GP_G
[n, m] =size(x);
ncf = length(gpcf.cf);
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
% evaluate the gradient for training covariance
if ~savememory
DKff=cellfun(@(a) zeros(n,n), cell(1,numel(x)), 'UniformOutput', 0);
else
DKff=cellfun(@(a) zeros(n,n), cell(1,n), 'UniformOutput', 0);
end
if nargin == 2 || isempty(x2)
% Evaluate the gradients
ind = 1:ncf;
for i=1:ncf
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.ginput(cf, x);
else
DK = cf.fh.ginput(cf,x,[],i1);
end
for j=1:length(DK)
DKff{j} = DKff{j} + DK{j};
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_sum -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
% Evaluate the gradients
for i=1:ncf
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.ginput(cf, x, x2);
else
DK = cf.fh.ginput(cf,x,x2,i1);
end
for j=1:length(DK)
DKff{j} = DKff{j} + DK{j};
end
end
end
end
function C = gpcf_sum_cov(gpcf, x1, x2)
%GP_SUM_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_SUM_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
%
% See also
% GPCF_SUM_TRCOV, GPCF_SUM_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 0;
for i=1:ncf
cf = gpcf.cf{i};
C = C + cf.fh.cov(cf, x1, x2);
end
end
function C = gpcf_sum_trcov(gpcf, x)
%GP_SUM_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_SUM_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_SUM_COV, GPCF_SUM_TRVAR, GP_COV, GP_TRCOV
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 0;
for i=1:ncf
cf = gpcf.cf{i};
C = C + cf.fh.trcov(cf, x);
end
end
function C = gpcf_sum_trvar(gpcf, x)
% GP_SUM_TRVAR Evaluate training variance vector
%
% Description
% C = GP_SUM_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_SUM_COV, GP_COV, GP_TRCOV
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 0;
for i=1:ncf
cf = gpcf.cf{i};
C = C + cf.fh.trvar(cf, x);
end
end
function reccf = gpcf_sum_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_SUM_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC, GP_MC->RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_sum';
% Initialize parameters
ncf = length(ri.cf);
for i=1:ncf
cf = ri.cf{i};
reccf.cf{i} = cf.fh.recappend([], ri.cf{i});
end
% Set the function handles
reccf.fh.pak = @gpcf_sum_pak;
reccf.fh.unpak = @gpcf_sum_unpak;
reccf.fh.e = @gpcf_sum_lp;
reccf.fh.lpg = @gpcf_sum_lpg;
reccf.fh.cfg = @gpcf_sum_cfg;
reccf.fh.cov = @gpcf_sum_cov;
reccf.fh.trcov = @gpcf_sum_trcov;
reccf.fh.trvar = @gpcf_sum_trvar;
reccf.fh.recappend = @gpcf_sum_recappend;
else
% Append to the record
% Loop over all of the covariance functions
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
reccf.cf{i} = cf.fh.recappend(reccf.cf{i}, ri, cf);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_matern32.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_matern32.m
| 27,919 |
utf_8
|
578c80a98a7b515abf2ed96d9a055e23
|
function gpcf = gpcf_matern32(varargin)
%GPCF_MATERN32 Create a Matern nu=3/2 covariance function
%
% Description
% GPCF = GPCF_MATERN32('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates Matern nu=3/2 covariance function structure in which
% the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_MATERN32(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for Matern nu=3/2 covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_MATERN32';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_matern32';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_matern32')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_matern32_pak;
gpcf.fh.unpak = @gpcf_matern32_unpak;
gpcf.fh.lp = @gpcf_matern32_lp;
gpcf.fh.lpg = @gpcf_matern32_lpg;
gpcf.fh.cfg = @gpcf_matern32_cfg;
gpcf.fh.ginput = @gpcf_matern32_ginput;
gpcf.fh.cov = @gpcf_matern32_cov;
gpcf.fh.trcov = @gpcf_matern32_trcov;
gpcf.fh.trvar = @gpcf_matern32_trvar;
gpcf.fh.recappend = @gpcf_matern32_recappend;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_matern32_pak(gpcf, w)
%GPCF_MATERN32_PAK Combine GP covariance function hyper-parameters
% into one vector.
%
% Description
% W = GPCF_MATERN32_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_MATERN32_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(matern32.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wm sm] = gpcf.metric.fh.pak(gpcf.metric);
w = [w wm];
s = [s; sm];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(matern32.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(matern32.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_matern32_unpak(gpcf, w)
%GPCF_MATERN32_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_MATERN32_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical to
% the input, except that the covariance hyper-parameters have
% been set to the values in W. Deletes the values set to GPCF
% from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_MATERN32_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_matern32_lp(gpcf)
%GPCF_MATERN32_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_MATERN32_LP(GPCF, X, T) takes a covariance function
% structure GPCF together with a matrix X of input
% vectors and a vector T of target vectors and evaluates log
% p(th) x J, where th is a vector of MATERN32 parameters and J
% is the Jacobian of transformation exp(w) = th. (Note that
% the parameters are log transformed, when packed.) This is
% a mandatory subfunction used for example in energy computations.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LPG, GP_E
%
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_matern32_lpg(gpcf)
%GPCF_MATERN32_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_MATERN32_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_matern32_cfg(gpcf, x, x2, mask,i1)
%GPCF_MATERN32_CFG Evaluate gradient of covariance function
% hyper-prior with respect to the parameters.
%
% Description
% DKff = GPCF_MATERN32_CFG(GPCF, X) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X) with respect to th (cell array with matrix
% elements). This is a mandatory subfunction used for example
% in gradient computations.
%
% DKff = GPCF_MATERN32_CFG(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_MATERN32_CFG(GPCF, X, [], MASK)
% takes a covariance function structure GPCF, a matrix X
% of input vectors and returns DKff, the diagonal of gradients
% of covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed when
% using sparse approximations (e.g. FIC).
%
% DKff = GPCF_MATERN32_CFG(GPCF, X, X2, [], i) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradient of covariance matrix
% Kff = k(X,X2) with respect to ith hyperparameter (matrix).
% 5th input can also be used without X2. This subfunction is
% needed when using memory save option in gp_set.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
if ~isempty(gpcf.p.magnSigma2)
i=1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_matern32_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -gpcf.magnSigma2.*3.*dist.*distg{i}.*exp(-sqrt(3).*dist);
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if savememory
if i1==1
DKff=DKff{ii1};
return
else
ii1=ii1-1;
i1=i1-1;
end
else
i1=1:m;
end
if ~isempty(gpcf.p.lengthScale)
ma2 = gpcf.magnSigma2;
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic MATERN32
s = 1./gpcf.lengthScale;
dist = 0;
for i=1:m
D = bsxfun(@minus,x(:,i),x(:,i)');
dist = dist + D.^2;
end
D = ma2.*3.*dist.*s.^2.*exp(-sqrt(3.*dist).*s);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
dist=sqrt(dist);
for i=i1
D = 3.*ma2.*s(i).*(bsxfun(@minus,x(:,i),x(:,i)')).^2.*exp(-sqrt(3).*dist);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_matern32 -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -gpcf.magnSigma2.*3.*dist.*distg{i}.*exp(-sqrt(3).*dist);
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if savememory
if i1==1
DKff=DKff{ii1};
return
else
ii1=ii1-1;
i1=i1-1;
end
else
i1=1:m;
end
if ~isempty(gpcf.p.lengthScale)
% Evaluate help matrix for calculations of derivatives with respect
% to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic matern32
s = 1./gpcf.lengthScale;
ma2 = gpcf.magnSigma2;
dist = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
DK_l = 3.*ma2.*s.^2.*dist.*exp(-s.*sqrt(3.*dist));
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
for i=i1
DK_l = 3.*ma2.*s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2.*exp(-sqrt(3.*dist));
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_matern32_ginput(gpcf, x, x2, i1)
%GPCF_MATERN32_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_MATERN32_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MATERN32_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MATERN32_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to ith covariate in X (matrix). This
% subfunction is needed when using memory save option in gp_set.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LP, GP_G
[n, m] =size(x);
ma2 = gpcf.magnSigma2;
ii1 = 0;
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
if nargin == 2 || isempty(x2)
if isfield(gpcf,'metric')
K = gpcf.fh.trcov(gpcf, x);
dist = gpcf.metric.fh.dist(gpcf.metric, x);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K./(1+sqrt(3)*dist).*3.*dist.*gdist{ii1};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x(:,i2)')).^2;
end
if ~savememory
i1=1:m;
end
for i=i1
for j = 1:n
D1 = zeros(n,n);
D1(j,:) = (s(i)).*bsxfun(@minus,x(j,i),x(:,i)');
D1 = D1 + D1';
DK = -3.*ma2.*exp(-sqrt(3.*dist)).*D1;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3 || nargin == 4
if isfield(gpcf,'metric')
K = gpcf.fh.cov(gpcf, x, x2);
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K./(1+sqrt(3)*dist).*3.*dist.*gdist{ii1};
end
else
[n2, m2] =size(x2);
if length(gpcf.lengthScale) == 1
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x2(:,i2)')).^2;
end
if ~savememory
i1=1:m;
end
ii1 = 0;
for i=i1
for j = 1:n
D1 = zeros(n,n2);
D1(j,:) = (s(i)).*bsxfun(@minus,x(j,i),x2(:,i)');
DK = -3.*ma2.*exp(-sqrt(3.*dist)).*D1;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function C = gpcf_matern32_cov(gpcf, x1, x2)
%GP_MATERN32_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_MATERN32_COV(GP, TX, X) takes in covariance function
% of a Gaussian process GP and two matrixes TX and X that
% contain input vectors to GP. Returns covariance matrix C.
% Every element ij of C contains covariance between inputs i
% in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
%
% See also
% GPCF_MATERN32_TRCOV, GPCF_MATERN32_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x1, x2);
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
else
if isfield(gpcf,'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./gpcf.lengthScale.^2;
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist=zeros(n1,n2);
for j=1:m1
dist = dist + s2(j).*(bsxfun(@minus,x1(:,j),x2(:,j)')).^2;
end
dist = sqrt(dist);
C = ma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
end
C(C<eps)=0;
end
end
function C = gpcf_matern32_trcov(gpcf, x)
%GP_MATERN32_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_MATERN32_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_MATERN32_COV, GPCF_MATERN32_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
ma2 = gpcf.magnSigma2;
dist = gpcf.metric.fh.dist(gpcf.metric, x);
C = ma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
else
% Try to use the C-implementation
C = trcov(gpcf,x);
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s2 = 1./(gpcf.lengthScale).^2;
if size(s2)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
% Here we take advantage of the
% symmetry of covariance matrix
C=zeros(n,n);
for i1=2:n
i1n=(i1-1)*n;
for i2=1:i1-1
ii=i1+(i2-1)*n;
for i3=1:m
C(ii)=C(ii)+s2(i3).*(x(i1,i3)-x(i2,i3)).^2; % the covariance function
end
C(i1n+i2)=C(ii);
end
end
dist = sqrt(C);
C = ma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
C(C<eps)=0;
end
end
end
function C = gpcf_matern32_trvar(gpcf, x)
%GP_MATERN32_TRVAR Evaluate training variance vector
%
% Description
% C = GP_MATERN32_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every element i
% of C contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
%
% See also
% GPCF_MATERN32_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_matern32_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_MATERN32_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_matern32';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_matern32_pak;
reccf.fh.unpak = @gpcf_matern32_unpak;
reccf.fh.e = @gpcf_matern32_lp;
reccf.fh.lpg = @gpcf_matern32_lpg;
reccf.fh.cfg = @gpcf_matern32_cfg;
reccf.fh.cov = @gpcf_matern32_cov;
reccf.fh.trcov = @gpcf_matern32_trcov;
reccf.fh.trvar = @gpcf_matern32_trvar;
reccf.fh.recappend = @gpcf_matern32_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_logit.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_logit.m
| 14,344 |
utf_8
|
c16705d8ddef02a29ecefd128787faef
|
function lik = lik_logit(varargin)
%LIK_LOGIT Create a Logit likelihood structure
%
% Description
% LIK = LIK_LOGIT creates Logit likelihood for classification
% problem with class labels {-1,1}.
%
% The likelihood is defined as follows:
% __ n
% p(y|f) = || i=1 1/(1 + exp(-y_i*f_i) )
% where f is the latent value vector.
%
% See also
% GP_SET, LIK_*
%
% Copyright (c) 2008-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LOGIT';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Logit';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Logit')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_logit_pak;
lik.fh.unpak = @lik_logit_unpak;
lik.fh.ll = @lik_logit_ll;
lik.fh.llg = @lik_logit_llg;
lik.fh.llg2 = @lik_logit_llg2;
lik.fh.llg3 = @lik_logit_llg3;
lik.fh.tiltedMoments = @lik_logit_tiltedMoments;
lik.fh.predy = @lik_logit_predy;
lik.fh.invlink = @lik_logit_invlink;
lik.fh.recappend = @lik_logit_recappend;
end
end
function [w,s] = lik_logit_pak(lik)
%LIK_LOGIT_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LOGIT_PAK(LIK) takes a likelihood structure LIK and
% returns an empty verctor W. If Logit likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% See also
% LIK_NEGBIN_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_logit_unpak(lik, w)
%LIK_LOGIT_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_LOGIT_UNPAK(W, LIK) Doesn't do anything.
%
% If Logit likelihood had parameters this would extracts them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% See also
% LIK_LOGIT_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_logit_ll(lik, y, f, z)
%LIK_LOGIT_LL Log likelihood
%
% Description
% E = LIK_LOGIT_LL(LIK, Y, F) takes a likelihood structure
% LIK, class labels Y, and latent values F. Returns the log
% likelihood, log p(y|f,z). This subfunction is also used in
% information criteria (DIC, WAIC) computations.
%
% See also
% LIK_LOGIT_LLG, LIK_LOGIT_LLG3, LIK_LOGIT_LLG2, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
ll = sum(-log(1+exp(-y.*f)));
end
function llg = lik_logit_llg(lik, y, f, param, z)
%LIK_LOGIT_LLG Gradient of the log likelihood
%
% Description
% G = LIK_LOGIT_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the gradient of the log likelihood with respect to PARAM. At the
% moment PARAM can be 'param' or 'latent'. This subfunction is
% needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_LLG2, LIK_LOGIT_LLG3, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
t = (y+1)/2;
PI = 1./(1+exp(-f));
llg = t - PI;
%llg = (y+1)/2 - 1./(1+exp(-f));
end
function llg2 = lik_logit_llg2(lik, y, f, param, z)
%LIK_LOGIT_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_LOGIT_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the Hessian of the log likelihood with respect to PARAM. At
% the moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace approximation
% or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_LLG, LIK_LOGIT_LLG3, GPLA_E
PI = 1./(1+exp(-f));
llg2 = -PI.*(1-PI);
end
function llg3 = lik_logit_llg3(lik, y, f, param, z)
%LIK_LOGIT_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_LOGIT_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be only 'latent'.
% LLG3 is a vector with third gradients. This subfunction is
% needed when using Laplace approximation for inference with
% non-Gaussian likelihoods.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_LLG, LIK_LOGIT_LLG2, GPLA_E, GPLA_G
if ~isempty(find(abs(y)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
t = (y+1)/2;
PI = 1./(1+exp(-f));
llg3 = -PI.*(1-PI).*(1-2*PI);
end
function [logM_0, m_1, sigm2hati1] = lik_logit_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGIT_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LOGIT_TILTEDMOMENTS(LIK, Y, I, S2, MYY)
% takes a likelihood structure LIK, class labels Y, index I
% and cavity variance S2 and mean MYY. Returns the zeroth
% moment M_0, mean M_1 and variance M_2 of the posterior
% marginal (see Rasmussen and Williams (2006): Gaussian
% processes for Machine Learning, page 55). This subfunction
% is needed when using EP for inference with non-Gaussian
% likelihoods.
%
% See also
% GPEP_E
% don't check this here, because this function is called so often by EP
% if ~isempty(find(abs(y)~=1))
% error('lik_logit: The class labels have to be {-1,1}')
% end
yy = y(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Logit * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_logit_norm(yy(i),myy_i(i),sigm2_i(i));
if isnan(minf) || isnan(maxf)
logM_0(i)=NaN; m_1(i)=NaN; sigm2hati1(i)=NaN;
continue
end
% Integrate with an adaptive Gauss-Kronrod quadrature
% (Rasmussen and Nickish use in GPML interpolation between
% a cumulative Gaussian scale mixture and linear tail
% approximation, which could be faster, but quadrature also
% takes only a fraction of the time EP uses overall, so no
% need to change...)
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically should be
% sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
%warning('lik_logit_tilted_moments: sigm2hati1 >= sigm2_i');
sigm2hati1(i)=sigm2_i(i)-eps;
end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_logit_predy(lik, Ef, Varf, yt, zt)
%LIK_LOGIT_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LOGIT_PREDY(LIK, EF, VARF, YT)
% Returns logarithm of the predictive density of YT, that is
% p(yt | y) = \int p(yt | f) p(f|y) df.
% This requires also the class labels YT. This subfunction
% is needed when computing posterior predictive distributions
% for future observations.
%
% [LPY, EY, VARY] = LIK_LOGIT_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns also the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if nargout > 1
py1 = zeros(length(Ef),1);
for i1=1:length(Ef)
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f)1./(1+exp(-f)).*norm_pdf(f,myy_i,sigm_i);
py1(i1) = quadgk(F,minf,maxf);
end
Ey = 2*py1-1;
Vary = 1-(2*py1-1).^2;
end
if ~isempty(find(abs(yt)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
% Quadrature integration
lpy = zeros(length(yt),1);
for i1 = 1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Poisson * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_logit_norm(...
yt(i1),Ef(i1),Varf(i1));
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_logit_norm(yy,myy_i,sigm2_i)
%INIT_LOGIT_NORM
%
% Description
% Return function handle to a function evaluating Logit *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_logit. This
% subfunction is needed by subfunctions tiltedMoments, siteDeriv
% and predy.
%
% See also
% LIK_LOGIT_TILTEDMOMENTS, LIK_LOGIT_PREDY
% avoid repetitive evaluation of constant part
ldconst = -log(sigm2_i)/2 -log(2*pi)/2;
% Create function handle for the function to be integrated
df = @logit_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_logit_norm;
ldg = @log_logit_norm_g;
ldg2 = @log_logit_norm_g2;
% Set the limits for integration
% Logit likelihood is log-concave so the logit_norm
% function is unimodal, which makes things easier
% approximate guess for the location of the mode
if sign(myy_i)==sign(yy)
% the log likelihood is flat on this side
modef = myy_i;
else
% the log likelihood is approximately yy*f on this side
modef=sign(myy_i)*max(abs(myy_i)-sigm2_i,0);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=2; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
if isinf(modeld) || isnan(modeld)
minf=NaN;maxf=NaN;
return
end
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_logit -> init_logit_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_logit -> init_logit_norm: ' ...
'integration interval maximum not found ' ...
'even after looking hard!'])
end
end
function integrand = logit_norm(f)
% Logit * Gaussian
integrand = exp(ldconst ...
-log(1+exp(-yy.*f)) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_logit_norm(f)
% log(Logit * Gaussian)
% log_logit_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst ...
-log(1+exp(-yy.*f)) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_logit_norm_g(f)
% d/df log(Logit * Gaussian)
% derivative of log_logit_norm
g = yy./(exp(f*yy)+1)...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_logit_norm_g2(f)
% d^2/df^2 log(Logit * Gaussian)
% second derivate of log_logit_norm
a=exp(f*yy);
g2 = -a*(yy./(a+1)).^2 ...
-1/sigm2_i;
end
end
function p = lik_logit_invlink(lik, f, z)
%LIK_LOGIT_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_LOGIT_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_PREDY
p = logitinv(f);
end
function reclik = lik_logit_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_LOGIT_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Logit';
% Set the function handles
reclik.fh.pak = @lik_logit_pak;
reclik.fh.unpak = @lik_logit_unpak;
reclik.fh.ll = @lik_logit_ll;
reclik.fh.llg = @lik_logit_llg;
reclik.fh.llg2 = @lik_logit_llg2;
reclik.fh.llg3 = @lik_logit_llg3;
reclik.fh.tiltedMoments = @lik_logit_tiltedMoments;
reclik.fh.predy = @lik_logit_predy;
reclik.fh.invlink = @lik_logit_invlink;
reclik.fh.recappend = @lik_logit_recappend;
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_loggaussian.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_loggaussian.m
| 25,546 |
utf_8
|
0e6db76a65c1b277dbce5a945a7538f4
|
function lik = lik_loggaussian(varargin)
%LIK_LOGGAUSSIAN Create a right censored log-Gaussian likelihood structure
%
% Description
% LIK = LIK_LOGGAUSSIAN('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a likelihood structure for right censored log-Gaussian
% survival model in which the named parameters have the
% specified values. Any unspecified parameters are set to
% default values.
%
% LIK = LIK_LOGGAUSSIAN(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for log-Gaussian likelihood [default]
% sigma2 - variance [1]
% sigma2_prior - prior for sigma2 [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ (2*pi*s^2)^(-(1-z_i)/2)*y_i^-(1-z_i)
% *exp(-1/(2*s^2)*(1-z_i)*(log(y_i) - f_i)^2)
% *(1-norm_cdf((log(y_i)-f_i)/s))^z_i ]
%
%
% where s is the standard deviation of loggaussian distribution.
% z is a vector of censoring indicators with z = 0 for uncensored event
% and z = 1 for right censored event.
%
% When using the log-Gaussian likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2012 Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LOGGAUSSIAN';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('sigma2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Log-Gaussian';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Log-Gaussian')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_loggaussian_pak;
lik.fh.unpak = @lik_loggaussian_unpak;
lik.fh.lp = @lik_loggaussian_lp;
lik.fh.lpg = @lik_loggaussian_lpg;
lik.fh.ll = @lik_loggaussian_ll;
lik.fh.llg = @lik_loggaussian_llg;
lik.fh.llg2 = @lik_loggaussian_llg2;
lik.fh.llg3 = @lik_loggaussian_llg3;
lik.fh.tiltedMoments = @lik_loggaussian_tiltedMoments;
lik.fh.siteDeriv = @lik_loggaussian_siteDeriv;
lik.fh.invlink = @lik_loggaussian_invlink;
lik.fh.predy = @lik_loggaussian_predy;
lik.fh.recappend = @lik_loggaussian_recappend;
lik.fh.predcdf = @lik_loggaussian_predcdf;
end
end
function [w,s] = lik_loggaussian_pak(lik)
%LIK_LOGGAUSSIAN_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LOGGAUSSIAN_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.sigma2)
%
% See also
% LIK_LOGGAUSSIAN_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.sigma2)
w = log(lik.sigma2);
s = [s; 'log(loggaussian.sigma2)'];
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_loggaussian_unpak(lik, w)
%LIK_LOGGAUSSIAN_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_LOGGAUSSIAN_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.sigma2)
%
% See also
% LIK_LOGGAUSSIAN_PAK, GP_UNPAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
end
function lp = lik_loggaussian_lp(lik, varargin)
%LIK_LOGGAUSSIAN_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_LOGGAUSSIAN_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This subfunction
% is needed when there are likelihood parameters.
%
% See also
% LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, LIK_LOGGAUSSIAN_LLG2, GPLA_E
% If prior for sigma2 parameter, add its contribution
lp=0;
if ~isempty(lik.p.sigma2)
lp = lik.p.sigma2.fh.lp(lik.sigma2, lik.p.sigma2) +log(lik.sigma2);
end
end
function lpg = lik_loggaussian_lpg(lik)
%LIK_LOGGAUSSIAN_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_LOGGAUSSIAN_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, LIK_LOGGAUSSIAN_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.sigma2)
% Evaluate the gprior with respect to sigma2
ggs = lik.p.sigma2.fh.lpg(lik.sigma2, lik.p.sigma2);
lpg = ggs(1).*lik.sigma2 + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_loggaussian_ll(lik, y, f, z)
%LIK_LOGGAUSSIAN_LL Log likelihood
%
% Description
% LL = LIK_LOGGAUSSIAN_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, LIK_LOGGAUSSIAN_LLG2, GPLA_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_ll: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
ll = sum(-(1-z)./2*log(2*pi*s2) - (1-z).*log(y) - (1-z)./(2*s2).*(log(y)-f).^2 ...
+ z.*log(1-norm_cdf((log(y)-f)./sqrt(s2))));
end
function llg = lik_loggaussian_llg(lik, y, f, param, z)
%LIK_LOGGAUSSIAN_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_LOGGAUSSIAN_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_LLG2, LIK_LOGGAUSSIAN_LLG3, GPLA_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_llg: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
r = log(y)-f;
switch param
case 'param'
llg = sum(-(1-z)./(2.*s2) + (1-z).*r.^2./(2.*s2^2) + z./(1-norm_cdf(r/sqrt(s2))) ...
.* (r./(sqrt(2.*pi).*2.*s2.^(3/2)).*exp(-1/(2.*s2).*r.^2)));
% correction for the log transformation
llg = llg.*lik.sigma2;
case 'latent'
llg = (1-z)./s2.*r + z./(1-norm_cdf(r/sqrt(s2))).*(1/sqrt(2*pi*s2) .* exp(-1/(2.*s2).*r.^2));
end
end
function llg2 = lik_loggaussian_llg2(lik, y, f, param, z)
%LIK_LOGGAUSSIAN_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_LOGGAUSSIAN_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, GPLA_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_llg2: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
r = log(y)-f;
switch param
case 'param'
case 'latent'
llg2 = (z-1)./s2 + z.*(-exp(-r.^2/s2)./(2*pi*s2.*(1-norm_cdf(r/sqrt(s2))).^2) ...
+ r./(sqrt(2*pi).*s2^(3/2).*(1-norm_cdf(r/sqrt(s2)))).*exp(-r.^2./(2*s2)));
case 'latent+param'
llg2 = -(1-z)./s2^2.*(log(y)-f) + z.*(-r./(4*pi*s2^2.*(1-norm_cdf(r/sqrt(s2))).^2) ...
.* exp(-r.^2./s2) + (-1 + r.^2/s2)./(1-norm_cdf(r/sqrt(s2))).*1./(sqrt(2*pi)*2*s2^(3/2)).*exp(-r.^2./(2*s2)));
% correction due to the log transformation
llg2 = llg2.*s2;
end
end
function llg3 = lik_loggaussian_llg3(lik, y, f, param, z)
%LIK_LOGGAUSSIAN_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_LOGGAUSSIAN_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_llg3: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
r = log(y) - f;
switch param
case 'param'
case 'latent'
llg3 = 2.*z./(1-norm_cdf(r/sqrt(s2))).^3.*1./(2*pi*s2)^(3/2).*exp(-3/(2*s2)*r.^2) ...
- z./(1-norm_cdf(r/sqrt(s2))).^2.*r./(pi*s2^2).*exp(-r.^2./s2) ...
- z./(1-norm_cdf(r/sqrt(s2))).^2.*r./(2*pi*s2^2).*exp(-r.^2/s2) ...
- z./(1-norm_cdf(r/sqrt(s2))).^1.*1./(s2^(3/2)*sqrt(2*pi)).*exp(-r.^2/(2*s2)) ...
+ z./(1-norm_cdf(r/sqrt(s2))).^1.*r.^2./(sqrt(2*pi*s2)*s2^2).*exp(-r.^2/(2*s2));
case 'latent2+param'
llg3 = (1-z)./s2^2 + z.*(1./(1-norm_cdf(r/sqrt(s2))).^3.*r./(sqrt(8*pi^3).*s2.^(5/2)).*exp(-3/(2.*s2).*r.^2) ...
+ 1./(1-norm_cdf(r./sqrt(s2))).^2.*1./(4.*pi.*s2^2).*exp(-r.^2./s2) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^2.*r.^2./(2*pi*s2^3).*exp(-r.^2./s2) ...
+ 1./(1-norm_cdf(r./sqrt(s2))).^2.*1./(4*pi*s2^2).*exp(-r.^2/s2) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^1.*r./(sqrt(2*pi)*2*s2^(5/2)).*exp(-r.^2/(2*s2)) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^2.*r.^2./(4*pi*s2^3).*exp(-r.^2/s2) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^1.*r./(sqrt(2*pi)*s2^(5/2)).*exp(-r.^2/(2*s2)) ...
+ 1./(1-norm_cdf(r./sqrt(s2))).^1.*r.^3./(sqrt(2*pi)*2*s2^(7/2)).*exp(-r.^2/(2*s2)));
% correction due to the log transformation
llg3 = llg3.*lik.sigma2;
end
end
function [logM_0, m_1, sigm2hati1] = lik_loggaussian_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGGAUSSIAN_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LOGGAUSSIAN_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, survival times
% Y, censoring indicators Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_tiltedMoments: missing z!'...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpep_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
s2 = lik.sigma2;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_loggaussian_norm(yy(i),myy_i(i),sigm2_i(i),yc(i),s2);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_loggaussian_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_loggaussian_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGGAUSSIAN_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_LOGGAUSSIAN_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, survival times Y, expected
% counts Z, index I and cavity variance S2 and mean MYY.
% Returns E_f [d log p(y_i|f_i) /d a], where a is the
% likelihood parameter and the expectation is over the
% marginal posterior. This term is needed when evaluating the
% gradients of the marginal likelihood estimate Z_EP with
% respect to the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_siteDeriv: missing z!'...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
s2 = lik.sigma2;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Log-Gaussian * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_loggaussian_norm(yy,myy_i,sigm2_i,yc,s2);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*s2;
function g = deriv(f)
r=log(yy)-f;
g = -yc./(2.*s2) + yc.*r.^2./(2.*s2^2) + (1-yc)./(1-norm_cdf(r/sqrt(s2))) ...
.* (r./(sqrt(2.*pi).*2.*s2.^(3/2)).*exp(-1/(2.*s2).*r.^2));
end
end
function [lpy, Ey, Vary] = lik_loggaussian_predy(lik, Ef, Varf, yt, zt)
%LIK_LOGGAUSSIAN_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LOGGAUSSIAN_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the survival times YT, censoring indicators ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_LOGGAUSSIAN_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_loggaussian -> lik_loggaussian_predy: missing zt!'...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input zt. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
yc = 1-zt;
s2 = lik.sigma2;
Ey=[];
Vary=[];
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
if abs(Ef(i1))>700
lpy(i1) = NaN;
else
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Negative-binomial * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_loggaussian_norm(...
yt(i1),Ef(i1),Varf(i1),yc(i1),s2);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
end
function [df,minf,maxf] = init_loggaussian_norm(yy,myy_i,sigm2_i,yc,s2)
%INIT_LOGGAUSSIAN_NORM
%
% Description
% Return function handle to a function evaluating
% loggaussian * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_loggaussian. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_LOGGAUSSIAN_TILTEDMOMENTS, LIK_LOGGAUSSIAN_SITEDERIV,
% LIK_LOGGAUSSIAN_PREDY
% avoid repetitive evaluation of constant part
ldconst = -yc./2.*log(2*pi*s2) -yc.*log(yy) ...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @loggaussian_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_loggaussian_norm;
ldg = @log_loggaussian_norm_g;
ldg2 = @log_loggaussian_norm_g2;
% Set the limits for integration
if yc==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the loggaussian likelihood and Gaussian
mu=log(yy);
%s2=1./(yc+1./sigm2_i);
% s2=s2;
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=4; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_loggaussian -> init_loggaussian_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_loggaussian -> init_loggaussian_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = loggaussian_norm(f)
% loggaussian * Gaussian
integrand = exp(ldconst ...
- yc./(2*s2).*(log(yy)-f).^2 + (1-yc).*log(1-norm_cdf((log(yy)-f)/sqrt(s2))) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_loggaussian_norm(f)
% log(loggaussian * Gaussian)
% log_loggaussian_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst ...
-yc./(2*s2).*(log(yy)-f).^2 + (1-yc).*log(1-norm_cdf((log(yy)-f)/sqrt(s2))) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_loggaussian_norm_g(f)
% d/df log(loggaussian * Gaussian)
% derivative of log_loggaussian_norm
g = yc./s2.*(log(yy)-f) + (1-yc)./(1-norm_cdf((log(yy)-f)/sqrt(s2))).*1/sqrt(2*pi*s2)*exp(-(log(yy)-f).^2./(2*s2)) ...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_loggaussian_norm_g2(f)
% d^2/df^2 log(loggaussian * Gaussian)
% second derivate of log_loggaussian_norm
g2 = -yc./s2 + (1-yc).*(-exp(-(log(yy)-f).^2/s2)./(2*pi*s2.*(1-norm_cdf((log(yy)-f)/sqrt(s2))).^2) ...
+ (log(yy)-f)./(sqrt(2*pi).*s2^(3/2).*(1-norm_cdf((log(yy)-f)/sqrt(s2)))).*exp(-(log(yy)-f).^2./(2*s2))) ...
-1/sigm2_i;
end
end
function cdf = lik_loggaussian_predcdf(lik, Ef, Varf, yt)
%LIK_LOGGAUSSIAN_PREDCDF Returns the predictive cdf evaluated at yt
%
% Description
% CDF = LIK_LOGGAUSSIAN_PREDCDF(LIK, EF, VARF, YT)
% Returns the predictive cdf evaluated at YT given likelihood
% structure LIK, posterior mean EF and posterior Variance VARF
% of the latent variable. This subfunction is needed when using
% functions gp_predcdf or gp_kfcv_cdf.
%
% See also
% GP_PREDCDF
s2 = lik.sigma2;
% Evaluate the posterior predictive densities of the given observations
cdf = zeros(length(yt),1);
for i1=1:length(yt)
% Get a function handle of the likelihood times posterior
% (likelihood * posterior = log-Gaussian * Gaussian)
% and useful integration limits.
% yc=0 when evaluating predictive cdf
[pdf,minf,maxf]=init_loggaussian_norm(...
yt(i1),Ef(i1),Varf(i1),0,s2);
% integrate over the f to get posterior predictive distribution
cdf(i1) = 1-quadgk(pdf, minf, maxf);
end
end
function p = lik_loggaussian_invlink(lik, f)
%LIK_LOGGAUSSIAN Returns values of inverse link function
%
% Description
% P = LIK_LOGGAUSSIAN_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_PREDY
p = exp(f);
end
function reclik = lik_loggaussian_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_LOGGAUSSIAN_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Log-Gaussian';
% Initialize parameter
reclik.sigma2 = [];
% Set the function handles
reclik.fh.pak = @lik_loggaussian_pak;
reclik.fh.unpak = @lik_loggaussian_unpak;
reclik.fh.lp = @lik_loggaussian_lp;
reclik.fh.lpg = @lik_loggaussian_lpg;
reclik.fh.ll = @lik_loggaussian_ll;
reclik.fh.llg = @lik_loggaussian_llg;
reclik.fh.llg2 = @lik_loggaussian_llg2;
reclik.fh.llg3 = @lik_loggaussian_llg3;
reclik.fh.tiltedMoments = @lik_loggaussian_tiltedMoments;
reclik.fh.invlink = @lik_loggaussian_invlink;
reclik.fh.predy = @lik_loggaussian_predy;
reclik.fh.predcdf = @lik_loggaussian_predcdf;
reclik.fh.recappend = @lik_loggaussian_recappend;
reclik.p=[];
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
reclik.sigma2(ri,:)=lik.sigma2;
if ~isempty(lik.p)
reclik.p.sigma2 = lik.p.sigma2.fh.recappend(reclik.p.sigma2, ri, lik.p.sigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpmf_constant.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpmf_constant.m
| 8,340 |
utf_8
|
d28568c446e6b59df03d7a7809de643f
|
function gpmf = gpmf_constant(varargin)
%GPMF_CONSTANT Create a constant mean function
%
% Description
% GPMF = GPMF_CONSTANT('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates constant mean function structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPMF = GPMF_CONSTANT(GPMF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a mean function structure with the named parameters
% altered with the specified values.
%
% Parameters for constant mean function
% constant - constant value for the constant
% base function (default 1)
% prior_mean - prior mean (scalar or vector) for base
% functions' weight prior (default 0)
% prior_cov - prior covariances (scalar or vector)
% for base functions' prior corresponding
% each selected input dimension. In
% multiple dimension case prior_cov is a
% struct containing scalars or vectors.
% The covariances must all be either
% scalars (diagonal cov.matrix) or
% vectors (for non-diagonal cov.matrix)
% (default 100)
%
% See also
% GP_SET, GPMF_LINEAR, GPMF_SQUARED
%
% Copyright (c) 2010 Tuomas Nikoskinen
% Copyright (c) 2011 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPMF_CONSTANT';
ip.addOptional('gpmf', [], @isstruct);
ip.addParamValue('constant',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('prior_mean',0, @(x) isvector(x));
ip.addParamValue('prior_cov',100, @(x) isvector(x));
ip.addParamValue('mean_prior', [], @isstruct);
ip.addParamValue('cov_prior', [], @isstruct);
ip.parse(varargin{:});
gpmf=ip.Results.gpmf;
if isempty(gpmf)
% Initialize a mean function
init=true;
gpmf.type = 'gpmf_constant';
else
% Modify a mean function
if ~isfield(gpmf,'type') && isequal(gpmf.type,'gpmf_constant')
error('First argument does not seem to be a constant mean function')
end
init=false;
end
% Initialize parameters
if init || ~ismember('type',ip.UsingDefaults)
gpmf.constant = ip.Results.constant;
end
if init || ~ismember('prior_mean',ip.UsingDefaults)
gpmf.b=ip.Results.prior_mean(:)';
end
if init || ~ismember('prior_mean',ip.UsingDefaults)
gpmf.B=ip.Results.prior_cov(:)';
end
if init || ~ismember('mean_prior',ip.UsingDefaults)
gpmf.p.b=ip.Results.mean_prior;
end
if init || ~ismember('cov_prior',ip.UsingDefaults)
gpmf.p.B=ip.Results.cov_prior;
end
if init
% Set the function handles to the nested functions
gpmf.fh.geth = @gpmf_geth;
gpmf.fh.pak = @gpmf_pak;
gpmf.fh.unpak = @gpmf_unpak;
gpmf.fh.lp = @gpmf_lp;
gpmf.fh.lpg = @gpmf_lpg;
gpmf.fh.recappend = @gpmf_recappend;
end
end
function h = gpmf_geth(gpmf, x)
%GPMF_GETH Calculate the base function values for a given input.
%
% Description
% H = GPMF_GETH(GPMF,X) takes in a mean function structure
% GPMF and inputs X. The function returns a row vector of
% length(X) containing the constant value which is by default
% 1.
constant=gpmf.constant;
h = repmat(constant,1,length(x(:,1)));
end
function [w, s] = gpmf_pak(gpmf, w)
%GPMF_PAK Combine GP mean function parameters into one vector
%
% Description
% W = GPCF_LINEAR_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W.
%
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_UNPAK
w = []; s = {};
if ~isempty(gpmf.p.b)
w = gpmf.b;
if numel(gpmf.b)>1
s = [s; sprintf('gpmf_constant.b x %d',numel(gpmf.b))];
else
s = [s; 'gpmf_constant.b'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.b.fh.pak(gpmf.p.b);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpmf.p.B)
w = [w log(gpmf.B)];
if numel(gpmf.B)>1
s = [s; sprintf('log(gpmf_constant.B x %d)',numel(gpmf.B))];
else
s = [s; 'log(gpmf_constant.B)'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.B.fh.pak(gpmf.p.B);
w = [w wh];
s = [s; sh];
end
end
function [gpmf, w] = gpmf_unpak(gpmf, w)
%GPCF_LINEAR_UNPAK Sets the mean function parameters
% into the structure
%
% Description
% [GPCF, W] = GPMF_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W.
%
% Assignment is inverse of
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_PAK
gpp=gpmf.p;
if ~isempty(gpp.b)
i2=length(gpmf.b);
i1=1;
gpmf.b = w(i1:i2);
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpmf.p.b.fh.unpak(gpmf.p.b, w);
gpmf.p.b = p;
end
if ~isempty(gpp.B)
i2=length(gpmf.B);
i1=1;
gpmf.B = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpmf.p.B.fh.unpak(gpmf.p.B, w);
gpmf.p.B = p;
end
end
function lp = gpmf_lp(gpmf)
%GPCF_SEXP_LP Evaluate the log prior of covariance function parameters
%
% Description
%
% See also
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lp = lp + gpp.b.fh.lp(gpmf.b, ...
gpp.b);
end
if ~isempty(gpp.B)
lp = lp + gpp.B.fh.lp(gpmf.B, ...
gpp.B) +sum(log(gpmf.B));
end
end
function [lpg_b, lpg_B] = gpmf_lpg(gpmf)
%GPCF_SEXP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SEXP_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
lpg_b=[]; lpg_B=[];
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lll = length(gpmf.b);
lpgs = gpp.b.fh.lpg(gpmf.b, gpp.b);
lpg_b = [lpgs(1:lll) lpgs(lll+1:end)]; %.*gpmf.b+1
end
if ~isempty(gpmf.p.B)
lll = length(gpmf.B);
lpgs = gpp.B.fh.lpg(gpmf.B, gpp.B);
lpg_B = [lpgs(1:lll).*gpmf.B+1 lpgs(lll+1:end)];
end
end
function recmf = gpmf_recappend(recmf, ri, gpmf)
%RECAPPEND Record append
%
% Description
%
% See also
% GP_MC and GP_MC -> RECAPPEND
% Initialize record
if nargin == 2
recmf.type = 'gpmf_constant';
% Initialize parameters
recmf.b= [];
recmf.B = [];
% Set the function handles
recmf.fh.geth = @gpmf_geth;
recmf.fh.pak = @gpmf_pak;
recmf.fh.unpak = @gpmf_unpak;
recmf.fh.lp = @gpmf_lp;
recmf.fh.lpg = @gpmf_lpg;
recmf.fh.recappend = @gpmf_recappend;
recmf.p=[];
recmf.p.b=[];
recmf.p.B=[];
if isfield(ri.p,'b') && ~isempty(ri.p.b)
recmf.p.b = ri.p.b;
end
if ~isempty(ri.p.B)
recmf.p.B = ri.p.B;
end
return
end
gpp = gpmf.p;
% record magnSigma2
if ~isempty(gpmf.b)
recmf.b(ri,:)=gpmf.b;
if ~isempty(recmf.p.b)
recmf.p.b = gpp.b.fh.recappend(recmf.p.b, ri, gpmf.p.b);
end
elseif ri==1
recmf.b=[];
end
if ~isempty(gpmf.B)
recmf.B(ri,:)=gpmf.B;
if ~isempty(recmf.p.B)
recmf.p.B = gpp.B.fh.recappend(recmf.p.B, ri, gpmf.p.B);
end
elseif ri==1
recmf.B=[];
end
end
|
github
|
lcnbeapp/beapp-master
|
gpla_loopred.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpla_loopred.m
| 10,429 |
UNKNOWN
|
430225391c518e3110e0a4535af20ba2
|
function [Eft, Varft, lpyt, Eyt, Varyt] = gpla_loopred(gp, x, y, varargin)
%GPLA_LOOPRED Leave-one-out predictions with Laplace approximation
%
% Description
% [EFT, VARFT, LPYT, EYT, VARYT] = GPLA_LOOPRED(GP, X, Y, OPTIONS)
% takes a Gaussian process structure GP together with a matrix X
% of training inputs and vector Y of training targets, and
% evaluates the leave-one-out predictive distribution at inputs
% X and returns means EFT and variances VARFT of latent
% variables, the logarithm of the predictive densities PYT, and
% the predictive means EYT and variances VARYT of observations
% at input locations X.
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected value
% for ith case.
%
% Laplace leave-one-out is approximated in linear response style
% by expressing the solutions for LOO problem in terms of
% solution for the full problem. The computationally cheap
% solution can be obtained by making the assumption that the
% difference between these two solution is small such that their
% difference may be treated as an Taylor expansion truncated at
% first order (Winther et al 2012, in progress).
%
% See also
% GP_LOOPRED, GP_PRED
% Copyright (c) 2011-2012 Aki Vehtari, Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPLA_LOOPRED';
ip.addRequired('gp', @(x) isstruct(x));
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('method', 'lrs', @(x) ismember(x, {'lrs' 'cavity' 'inla'}))
ip.parse(gp, x, y, varargin{:});
z=ip.Results.z;
method = ip.Results.method;
[tn,nin] = size(x);
switch method
case 'lrs'
% Manfred Opper and Ole Winther (2000). Gaussian Processes for
% Classification: Mean-Field Algorithms. In Neural
% Computation, 12(11):2655-2684.
%
% Ole Winther et al (2012). Work in progress.
% latent posterior
[f, sigm2ii] = gpla_pred(gp, x, y, 'z', z, 'tstind', []);
deriv = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
La = 1./-gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
% really large values don't contribute, but make variance
% computation unstable. 2e15 approx 1/(2*eps)
La = min(La,2e15);
switch gp.type
case 'FULL'
% FULL GP (and compact support GP)
K = gp_trcov(gp,x);
Varft=1./diag(inv(K+diag(La)))-La;
case 'FIC'
% FIC
% Use inverse lemma for FIC low rank covariance matrix approximation
% Code adapated from gp_pred
u = gp.X_u;
m = size(u,1);
% Turn the inducing vector on right direction
if size(u,2) ~= size(x,2)
u=u';
end
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % 1 x f vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
Luu = chol(K_uu,'lower');
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
% Add also La to the vector of diagonal elements
Lav = Cv_ff-Qv_ff + La; % 1 x f, Vector of diagonal elements
% iLaKfu = diag(inv(Lav))*K_fu = inv(La)*K_fu
iLaKfu = zeros(size(K_fu)); % f x u,
n=size(x,1);
for i=1:n
iLaKfu(i,:) = K_fu(i,:)./Lav(i); % f x u
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2;
L = iLaKfu/chol(A);
%Varft=1./diag(inv(K+diag(La)))-La;
Varft=1./(1./Lav - sum(L.^2,2))-La;
case {'PIC' 'PIC_BLOCK'}
% PIC
% Use inverse lemma for PIC low rank covariance matrix approximation
% Code adapated from gp_pred (here Lab is same La in gp_pred)
u = gp.X_u;
ind = gp.tr_index;
if size(u,2) ~= size(x,2)
% Turn the inducing vector on right direction
u=u';
end
% Calculate some help matrices
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % 1 x f vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
Luu = chol(K_uu)';
% Evaluate the Lambda (La) for specific model
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\K_fu';
iLaKfu = zeros(size(K_fu)); % f x u
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
% Add also La to the diagonal
Lab{i} = Cbl_ff - Qbl_ff + diag(La(ind{i}));
iLaKfu(ind{i},:) = Lab{i}\K_fu(ind{i},:);
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
L = iLaKfu/chol(A);
% From this on evaluate the prediction
% See Snelson and Ghahramani (2007) for details
n=size(y,1);
iCv=zeros(n,1);
for i=1:length(ind)
iCv(ind{i},:) = diag(inv(Lab{i}));
end
%Varft=1./diag(inv(K+diag(La)))-La;
Varft=1./(iCv - sum(L.^2,2))-La;
case 'CS+FIC'
% CS+FIC
% Use inverse lemma for CS+FIC
% Code adapated from gp_pred (Here Las is same as La in gp_pred)
u = gp.X_u;
if size(u,2) ~= size(x,2)
% Turn the inducing vector on right direction
u=u';
end
n = size(x,1);
m = size(u,1);
ncf = length(gp.cf);
% Indexes to all non-compact support and compact support covariances.
cf1 = [];
cf2 = [];
% Loop through all covariance functions
for i1 = 1:ncf
if ~isfield(gp.cf{i1},'cs')
% Non-CS covariances
cf1 = [cf1 i1];
else
% CS-covariances
cf2 = [cf2 i1];
end
end
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x, cf1); % f x 1 vector
K_fu = gp_cov(gp, x, u, cf1); % f x u
K_uu = gp_trcov(gp, u, cf1); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
Luu = chol(K_uu)';
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
% Add also La to the vector of diagonal elements
Lav = Cv_ff-Qv_ff + La; % f x 1, Vector of diagonal elements
K_cs = gp_trcov(gp,x,cf2);
Las = sparse(1:n,1:n,Lav,n,n) + K_cs;
iLaKfu = Las\K_fu;
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
L = iLaKfu/chol(A);
%Varft=1./diag(inv(K+diag(La)))-La;
Varft=1./(diag(inv(Las)) - sum(L.^2,2))-La;
otherwise
error('Unknown type of Gaussian process')
end
% check if cavity variances are negative
ii=find(Varft<0);
if ~isempty(ii)
warning('gpla_loopred: some LOO latent variances are negative');
Varft(ii) = gp.jitterSigma2;
end
Eft=f-Varft.*deriv;
if nargout==3
lpyt = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
elseif nargout>3
[lpyt,Eyt,Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
end
case 'cavity'
% using EP equations
% latent posterior
[f, sigm2ii] = gpla_pred(gp, x, y, 'z', z, 'tstind', []);
% "site parameters"
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
deriv = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
sigm2_t = 1./W;
mu_t = f + sigm2_t.*deriv;
% "cavity parameters"
sigma2_i = 1./(1./sigm2ii-1./sigm2_t);
myy_i = sigma2_i.*(f./sigm2ii-mu_t./sigm2_t);
% check if cavity varianes are negative
ii=find(sigma2_i<0);
if ~isempty(ii)
warning('gpla_loopred: some cavity variances are negative');
sigma2_i(ii) = sigm2ii(ii);
myy_i(ii) = f(ii);
end
% leave-one-out predictions
Eft=myy_i;
Varft=sigma2_i;
if nargout==3
lpyt = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
elseif nargout>3
[lpyt,Eyt,Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
end
case 'inla'
% Leonhard Held and Birgit Schr�dle and H�vard Rue (2010)
% Posterior and Cross-validatory Predictive Checks: A
% Comparison of MCMC and INLA. In (eds) Thomas Kneib and
% Gerhard Tutz, Statistical Modelling and Regression
% Structures, pp. 91-110. Springer.
% latent posterior
[f, sigm2ii, lp] = gpla_pred(gp, x, y, 'z', z, 'tstind', []);
Eft = zeros(tn,1);
Varft = zeros(tn,1);
lpyt = zeros(tn,1);
minf = f-6.*sqrt(sigm2ii);
maxf = f+6.*sqrt(sigm2ii);
for i=1:tn
if isempty(z)
z1 = [];
else
z1 = z(i);
end
[m0, m1, m2] = quad_moments(@(x) norm_pdf(x, f(i), sqrt(sigm2ii(i)))./llvec(gp.lik,y(i),x,z1), minf(i), maxf(i));
Eft(i) = m1;
Varft(i) = m2-Eft(i)^2;
lpyt(i) = -log(m0);
end
if nargout>3
[tmp,Eyt,Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
end
if sum((abs(lpyt)./abs(lp) > 5) == 1) > 0.1*tn;
warning('Very bad predictive densities, gpla_loopred might not be reliable, check results!');
end
end
end
function expll = llvec(gplik, y, f, z)
for i=1:size(f,2)
expll(i) = exp(gplik.fh.ll(gplik, y, f(i), z));
end
end
|
github
|
lcnbeapp/beapp-master
|
lgpdens.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lgpdens.m
| 18,497 |
windows_1250
|
c32ced344bd076d227b9aded6cc11400
|
function [p,pq,xx] = lgpdens(x,varargin)
%LGPDENS Logistic-Gaussian Process density estimate for 1D and 2D data
%
% Description
% LGPDENS(X,OPTIONS) Compute and plot LGP density estimate. X is
% 1D or 2D point data. For 1D data plot the mean and 95% region.
% For 2D data plot the density contours.
%
% [P,PQ,XT] = LGPDENS(X,OPTIONS) Compute LGP density estimate
% and return mean density P, 2.5% and 97.5% percentiles PQ, and
% grid locations.
%
% [P,PQ,XT] = LGPDENS(X,XT,OPTIONS) Compute LGP density estimate
% in the given grid locations XT.
%
% OPTIONS is optional parameter-value pair
% gridn - optional number of grid points used in each axis direction
% default is 400 for 1D, 20 for 2D.
% range - tells the estimation range, default is
% [min(min(x),mean(x)-3*std(x)), max(max(x),mean(x)+3*std(x))]
% for 1D [XMIN XMAX]
% for 2D [X1MIN X1MAX X2MIN X2MAX]
% gpcf - optional function handle of a GPstuff covariance function
% (default is @gpcf_sexp)
% latent_method - optional 'Laplace' (default) or 'MCMC'
% int_method - optional 'mode' (default), 'CCD' or 'grid'
% if latent_method is 'MCMC' then int_method is 'MCMC'
% display - defines if messages are displayed.
% 'off' (default) displays no output
% 'on' gives some output
% 'iter' displays output at each iteration
% speedup - defines if speed-up is used.
% 'off' (default) no speed-up is used
% 'on' With SEXP or EXP covariance function in 2D case
% uses Kronecker product structure and approximates the
% full posterior with a low-rank approximation. Otherwise
% with SEXP, EXP, MATERN32 and MATERN52 covariance
% functions in 1D and 2D cases uses FFT/FFT2 matrix-vector
% multiplication speed-up in the Newton's algorithm.
% cond_dens - defines if conditional density estimate is computed.
% 'off' (default) no conditional density
% 'on' computes for 2D the conditional median density
% estimate p(x2|x1) when the matrix [x1 x2] is given as
% input.
% basis_function - defines if basis functions are used.
% 'on' (default) uses linear and quadratic basis
% functions
% 'off' no basis functions
% Copyright (c) 2011-2012 Jaakko Riihimäki and Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LGPDENS';
ip.addRequired('x', @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addOptional('xt',NaN, @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addParamValue('gridn',[], @(x) isnumeric(x));
ip.addParamValue('range',[], @(x) isempty(x)||isreal(x)&&(length(x)==2||length(x)==4));
ip.addParamValue('gpcf',@gpcf_sexp,@(x) ischar(x) || isa(x,'function_handle'));
ip.addParamValue('latent_method','Laplace', @(x) ismember(x,{'EP' 'Laplace' 'MCMC'}))
%ip.addParamValue('latent_method','Laplace', @(x) ismember(x,{'EP' 'Laplace'}))
ip.addParamValue('int_method','mode', @(x) ismember(x,{'mode' 'CCD', 'grid'}))
ip.addParamValue('normalize',false, @islogical);
ip.addParamValue('display', 'off', @(x) islogical(x) || ...
ismember(x,{'on' 'off' 'iter'}))
ip.addParamValue('speedup',[], @(x) ismember(x,{'on' 'off'}));
ip.addParamValue('cond_dens',[], @(x) ismember(x,{'on' 'off'}));
ip.addParamValue('basis_function',[], @(x) ismember(x,{'on' 'off'}));
ip.parse(x,varargin{:});
x=ip.Results.x;
xt=ip.Results.xt;
gridn=ip.Results.gridn;
xrange=ip.Results.range;
gpcf=ip.Results.gpcf;
latent_method=ip.Results.latent_method;
int_method=ip.Results.int_method;
normalize=ip.Results.normalize;
display=ip.Results.display;
speedup=ip.Results.speedup;
cond_dens=ip.Results.cond_dens;
basis_function=ip.Results.basis_function;
[n,m]=size(x);
switch m
case 1 % 1D
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
error('LGPDENS: the input x must be 2D if cond_dens option is ''on''.')
end
% Parameters for a grid
if isempty(gridn)
% number of points
gridn=400;
end
xmin=min(x);xmax=max(x);
if ~isempty(xrange)
% extend given range to include min(x) and max(x)
xmin=min(xmin,xrange(1));
xmax=max(xmax,xrange(2));
elseif ~isnan(xt)
% use xt to define range and
% extend it to include min(x) and max(x)
xmin=min(xmin,min(xt));
xmax=max(xmax,max(xt));
else
xmin=min(xmin,mean(x)-3*std(x));
xmax=max(xmax,mean(x)+3*std(x));
end
% Discretize the data
if isnan(xt)
xx=linspace(xmin,xmax,gridn)';
else
xx=xt;
gridn=numel(xt);
end
xd=xx(2)-xx(1);
yy=hist(x,xx)';
% normalise, so that same prior is ok for different scales
xxn=(xx-mean(xx))./std(xx);
%[Ef,Covf]=gpsmooth(xxn,yy,[xxn; xtn],gpcf,latent_method,int_method);
[Ef,Covf]=gpsmooth(xxn,yy,xxn,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function);
if strcmpi(latent_method,'MCMC')
PJR=zeros(size(Ef,1),size(Covf,3));
for i1=1:size(Covf,3)
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf(:,:,i1),'upper'),Ef(:,i1)');
qjr=exp(qr)';
pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=pjr./xd;
PJR(:,i1)=mean(pjr,2);
end
pjr=PJR;
else
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf,'upper'),Ef');
qjr=exp(qr)';
pjr=bsxfun(@rdivide,qjr,sum(qjr(1:gridn,:)));
pjr=pjr./xd;
end
pp=mean(pjr')';
ppq=prctile(pjr',[2.5 97.5])';
if nargout<1
% no output, do the plot thing
newplot
hp=patch([xx; xx(end:-1:1)],[ppq(:,1); ppq(end:-1:1,2)],[.8 .8 .8]);
set(hp,'edgecolor',[.8 .8 .8])
xlim([xmin xmax])
line(xx,pp,'linewidth',2);
else
p=pp;
pq=ppq;
end
case 2 % 2D
if ~isempty(cond_dens) && strcmpi(cond_dens,'on') && ~isempty(speedup) && strcmp(speedup, 'on')
warning('No speed-up option available with the cond_dens option. Using full covariance instead.')
speedup='off';
end
% Find unique points
[xu,I,J]=unique(x,'rows');
% and count number of repeated x's
counts=crosstab(J);
nu=length(xu);
% Parameters for a grid
if isempty(gridn)
% number of points in each direction
gridn=20;
end
if numel(gridn)==1
gridn(2)=gridn(1);
end
x1min=min(x(:,1));x1max=max(x(:,1));
x2min=min(x(:,2));x2max=max(x(:,2));
if ~isempty(xrange)
% extend given range to include min(x) and max(x)
x1min=min(x1min,xrange(1));
x1max=max(x1max,xrange(2));
x2min=min(x2min,xrange(3));
x2max=max(x2max,xrange(4));
elseif ~isnan(xt)
% use xt to define range and
% extend it to include min(x) and max(x)
x1min=min(x1min,min(xt(:,1)));
x1max=max(x1max,max(xt(:,1)));
x2min=min(x2min,min(xt(:,2)));
x2max=max(x2max,max(xt(:,2)));
else
x1min=min(x1min,mean(x(:,1))-3*std(x(:,1)));
x1max=max(x1max,mean(x(:,1))+3*std(x(:,1)));
x2min=min(x2min,mean(x(:,2))-3*std(x(:,2)));
x2max=max(x2max,mean(x(:,2))+3*std(x(:,2)));
end
% Discretize the data
if isnan(xt)
% Form regular grid to discretize the data
zz1=linspace(x1min,x1max,gridn(1))';
zz2=linspace(x2min,x2max,gridn(2))';
[z1,z2]=meshgrid(zz1,zz2);
z=[z1(:),z2(:)];
nz=length(z);
xx=z;
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
% use ntx2 times more grid points for predictions
if gridn(2)>10
ntx2=3;
else
ntx2=10;
end
zzt1=linspace(x1min,x1max,gridn(1))';
zzt2=linspace(x2min,x2max,gridn(2)*ntx2)';
[zt1,zt2]=meshgrid(zzt1,zzt2);
zt=[zt1(:),zt2(:)];
%nzt=length(zt);
xt=zt;
end
else
xx=xt;
gridn=[length(unique(xx(:,1))) length(unique(xx(:,2)))];
end
yy=zeros(nz,1);
zi=interp2(z1,z2,reshape(1:nz,gridn(2),gridn(1)),xu(:,1),xu(:,2),'nearest');
for i1=1:nu
yy(zi(i1),1)=yy(zi(i1),1)+counts(i1);
end
%ye=ones(nz,1)./nz.*n;
unx1=unique(xx(:,1));
unx2=unique(xx(:,2));
xd=(unx1(2)-unx1(1))*(unx2(2)-unx2(1));
% normalise, so that same prior is ok for different scales
xxn=bsxfun(@rdivide,bsxfun(@minus,xx,mean(xx,1)),std(xx,1));
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
xxtn=bsxfun(@rdivide,bsxfun(@minus,xt,mean(xx,1)),std(xx,1));
end
% [Ef,Covf]=gpsmooth(xxn,yy,[xxn; xtn],gpcf,latent_method,int_method);
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
[Ef,Covf]=gpsmooth(xxn,yy,xxtn,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function);
else
[Ef,Covf]=gpsmooth(xxn,yy,xxn,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function);
end
if strcmpi(latent_method,'MCMC')
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
unx2=(unique(xt(:,2)));
xd2=(unx2(2)-unx2(1));
PJR=zeros(size(Ef,1),size(Covf,3));
for i1=1:size(Covf,3)
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf(:,:,i1),'upper'),Ef(:,i1)');
qjr=exp(qr)';
%pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=qjr;
pjr2=reshape(pjr,[gridn(2)*ntx2 gridn(1) size(pjr,2)]);
for j1=1:size(pjr2,3)
pjr2(:,:,j1)=bsxfun(@rdivide,pjr2(:,:,j1),sum(pjr2(:,:,j1)))./xd2;
end
pjr=reshape(pjr2,[gridn(2)*ntx2*gridn(1) size(pjr,2)]);
PJR(:,i1)=mean(pjr,2);
end
pjr=PJR;
%qp=median(pjr2,3);
%qp=bsxfun(@rdivide,qp,sum(qp,1));
else
PJR=zeros(size(Ef,1),size(Covf,3));
for i1=1:size(Covf,3)
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf(:,:,i1),'upper'),Ef(:,i1)');
qjr=exp(qr)';
pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=pjr./xd;
PJR(:,i1)=mean(pjr,2);
end
pjr=PJR;
%pjr=mean(PJR,2);
end
else
if strcmpi(speedup,'on') && length(Covf)==2
qr1=bsxfun(@plus,bsxfun(@times,randn(1000,size(Ef,1)),sqrt(Covf{1})'),Ef');
qr2=randn(1000,size(Covf{2},1))*Covf{2};
qr=qr1+qr2;
else
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf,'upper'),Ef');
end
qjr=exp(qr)';
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
pjr=zeros(size(qjr));
unx2=unique(xt(:,2));
xd2=(unx2(2)-unx2(1));
for k1=1:size(qjr,2)
qjrtmp=reshape(qjr(:,k1),[gridn(2)*ntx2 gridn(1)]);
qjrtmp=bsxfun(@rdivide,qjrtmp,sum(qjrtmp));
qjrtmp=qjrtmp./xd2;
pjr(:,k1)=qjrtmp(:);
end
else
pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=pjr./xd;
end
end
%if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
% pp=median(pjr')';
%else
pp=mean(pjr')';
%end
ppq=prctile(pjr',[2.5 97.5])';
if nargout<1
% no output, do the plot thing
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
pjr2=reshape(pjr,[gridn(2)*ntx2 gridn(1) size(pjr,2)]);
%qp=median(pjr2,3);
qp=mean(pjr2,3);
qp=bsxfun(@rdivide,qp,sum(qp,1));
qpc=cumsum(qp,1);
PL=[.05 .1 .2 .5 .8 .9 .95];
for i1=1:gridn(1)
pc=qpc(:,i1);
for pli=1:numel(PL),
qi(pli)=find(pc>PL(pli),1);
end,
ql(:,i1)=unx2(qi);
end
hold on
h1=plot(zz1,ql(4,:)','-', 'color', [0 0 255]./255,'linewidth',2);
h2=plot(zz1,ql([3 5],:)','--', 'color', [0 127 0]./255,'linewidth',1);
h3=plot(zz1,ql([2 6],:)','-.', 'color', [255 0 0]./255,'linewidth',1);
h4=plot(zz1,ql([1 7],:)',':', 'color', [0 0 0]./255,'linewidth',1);
hold off
legend([h1 h2(1) h3(1) h4(1)],'.5','.2/.8','.1/.9','.05/.95')
%plot(zz1,ql','linewidth',1)
%legend('.05','.1','.2','.5','.8','.9','.95')
xlim([x1min x1max])
ylim([x2min x2max])
else
G=zeros(size(z1));
G(:)=prctile(pjr',50);
%contour(z1,z2,G);
pp=G(:);
p1=pp./sum(pp);
pu=sort(p1,'ascend');
pc=cumsum(pu);
PL=[.05 .1 .2 .5 .8 .9 .95];
qi=[];
for pli=1:numel(PL)
qi(pli)=find(pc>PL(pli),1);
end
pl=pu(qi).*sum(pp);
contour(z1,z2,G,pl);
%hold on, plot(x(:,1),x(:,2),'kx')
%colorbar
end
else
p=pp;
pq=ppq;
end
otherwise
error('X has to be Nx1 or Nx2')
end
end
function [Ef,Covf] = gpsmooth(xx,yy,xxt,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function)
% Make inference with log Gaussian process and EP or Laplace approximation
% gp_mc and gp_ia still uses numeric display option
if strcmp(display,'off')
displ=0;
else
displ=1;
end
nin = size(xx,2);
% init gp
if strfind(func2str(gpcf),'ppcs')
% ppcs still have nin parameter...
gpcf1 = gpcf('nin',nin);
else
gpcf1 = gpcf();
end
% weakly informative prior
pm = prior_logunif();
pl = prior_t('s2', 10^2, 'nu', 4);
pa = prior_t('s2', 10^2, 'nu', 4);
%pm = prior_sqrtt('s2', 10^2, 'nu', 4);
%pl = prior_t('s2', 1^2, 'nu', 4);
%pa = prior_t('s2', 10^2, 'nu', 4);
% different covariance functions have different parameters
if isfield(gpcf1,'magnSigma2')
gpcf1 = gpcf(gpcf1, 'magnSigma2', .5, 'magnSigma2_prior', pm);
end
if isfield(gpcf1,'lengthScale')
gpcf1 = gpcf(gpcf1, 'lengthScale', .5, 'lengthScale_prior', pl);
end
if isfield(gpcf1,'alpha')
gpcf1 = gpcf(gpcf1, 'alpha', 20, 'alpha_prior', pa);
end
if isfield(gpcf1,'biasSigma2')
gpcf1 = gpcf(gpcf1, 'biasSigma2', 10, 'weightSigma2', 10,'biasSigma2_prior',prior_logunif(),'weightSigma2_prior',prior_logunif());
end
if ~isempty(cond_dens) && strcmp(cond_dens, 'on')
lik=lik_lgpc;
lik.gridn=gridn;
else
lik=lik_lgp;
end
% Create the GP structure
if ~isempty(basis_function) && strcmp(basis_function, 'off')
gp = gp_set('lik', lik, 'cf', {gpcf1}, 'jitterSigma2', 1e-4);
else
%gpmfco = gpmf_constant('prior_mean',0,'prior_cov',100);
gpmflin = gpmf_linear('prior_mean',0,'prior_cov',100);
gpmfsq = gpmf_squared('prior_mean',0,'prior_cov',100);
gp = gp_set('lik', lik, 'cf', {gpcf1}, 'jitterSigma2', 1e-4, 'meanf', {gpmflin,gpmfsq});
end
% First optimise hyperparameters using Laplace approximation
gp = gp_set(gp, 'latent_method', 'Laplace');
opt=optimset('TolFun',1e-2,'TolX',1e-3,'Display',display);
if ~isempty(speedup) && strcmp(speedup, 'on')
gp.latent_opt.gridn=gridn;
gp.latent_opt.pcg_tol=1e-12;
if size(xx,2)==2 && (strcmp(gp.cf{1}.type,'gpcf_sexp') || strcmp(gp.cf{1}.type,'gpcf_exp'))
% exclude eigenvalues smaller than 1e-6 or take 50%
% eigenvalues at most
gp.latent_opt.eig_tol=1e-6;
gp.latent_opt.eig_prct=0.5;
gp.latent_opt.kron=1;
opt.LargeScale='off';
if norm(xx-xxt)~=0
warning('In the low-rank approximation the grid locations xx are used instead of xxt in predictions.')
xxt=xx;
end
elseif strcmp(gp.cf{1}.type,'gpcf_sexp') || strcmp(gp.cf{1}.type,'gpcf_exp') || strcmp(gp.cf{1}.type,'gpcf_matern32') || strcmp(gp.cf{1}.type,'gpcf_matern52')
gp.latent_opt.fft=1;
end
end
if exist('fminunc')
gp=gp_optim(gp,xx,yy,'opt',opt, 'optimf', @fminunc);
else
gp=gp_optim(gp,xx,yy,'opt',opt, 'optimf', @fminlbfgs);
end
%gradcheck(gp_pak(gp), @gpla_nd_e, @gpla_nd_g, gp, xx, yy);
if strcmpi(latent_method,'MCMC')
gp = gp_set(gp, 'latent_method', 'MCMC');
%if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
if size(xx,2)==2
% add more jitter for 2D cases with MCMC
gp = gp_set(gp, 'jitterSigma2', 1e-2);
%error('LGPDENS: MCMC is not implemented if cond_dens option is ''on''.')
end
% Here we use two stage sampling to get faster convergence
hmc_opt=hmc2_opt;
hmc_opt.steps=10;
hmc_opt.stepadj=0.05;
hmc_opt.nsamples=1;
latent_opt.display=0;
latent_opt.repeat = 20;
latent_opt.sample_latent_scale = 0.5;
hmc2('state', sum(100*clock))
% The first stage sampling
[r,g,opt]=gp_mc(gp, xx, yy, 'hmc_opt', hmc_opt, 'latent_opt', latent_opt, 'nsamples', 1, 'repeat', 15, 'display', displ);
%[r,g,opt]=gp_mc(gp, xx, yy, 'latent_opt', latent_opt, 'nsamples', 1, 'repeat', 15);
% re-set some of the sampling options
hmc_opt.steps=4;
hmc_opt.stepadj=0.05;
%latent_opt.repeat = 5;
hmc2('state', sum(100*clock));
% The second stage sampling
% Notice that previous record r is given as an argument
[rgp,g,opt]=gp_mc(gp, xx, yy, 'hmc_opt', hmc_opt, 'nsamples', 500,'latent_opt', latent_opt, 'record', r, 'display', displ);
% Remove burn-in
rgp=thin(rgp,102,4);
[Ef, Covf] = gpmc_jpreds(rgp, xx, yy, xxt);
else
if strcmpi(int_method,'mode')
% Just make prediction for the test points
[Ef,Covf] = gp_pred(gp, xx, yy, xxt);
else
% integrate over the hyperparameters
%[~, ~, ~, Ef, Covf] = gp_ia(opt, gp, xx, yy, xt, param);
gpia=gp_ia(gp, xx, yy, 'int_method', int_method, 'display', displ);
[Ef, Covf]=gpia_jpred(gpia, xx, yy, xxt);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gp_waic.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_waic.m
| 22,950 |
utf_8
|
224efafdd64d00db4c610107413a075d
|
function waic = gp_waic(gp, x, y, varargin)
%GP_WAIC The widely applicable information criterion (WAIC) for GP model
%
% Description
% WAIC = GP_WAIC(GP, X, Y) evaluates WAIC defined by
% Watanabe(2010) given a Gaussian process model GP, training
% inputs X and training outputs Y. Instead of Bayes loss we
% compute the Bayes utility which is just the negative of loss
% used by Watanabe.
%
% WAIC is evaluated as follows when using the variance form
%
% WAIC(n) = BUt(n) - V/n
%
% where BUt(n) is Bayesian training utility, V is functional variance
% and n is the number of training inputs.
%
% BUt = mean(log(p(yt | xt, x, y)))
% V = sum(E[log(p(y|th))^2] - E[log(p(y|th))]^2)
%
% When using the Gibbs training loss, WAIC is evaluated as follows
%
% WAIC(n) = BUt(n) - 2*(BUt(n) - GUt(n))
%
% where BUt(n) is as above and GUt is Gibbs training utility
%
% GUt(n) = E_th[mean(log(p(y|th)))].
%
% GP can be a Gaussian process structure, a record structure
% from GP_MC or an array of GPs from GP_IA.
%
% OPTIONS is optional parameter-value pair
% method - Method to evaluate waic, 'V' = Variance method, 'G' = Gibbs
% training utility method (default = 'V')
% form - Return form, 'mean' returns the mean value and 'all'
% returns the values for all data points (default = 'mean')
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected value
% for ith case.
%
% See also
% GP_DIC, DEMO_MODELASSESMENT1, DEMO_MODELASSESMENT2
%
% References
%
% Watanabe(2010). Equations of states in singular statistical
% estimation. Neural Networks 23 (2010), 20-34
%
% Watanabe(2010). Asymptotic Equivalance of Bayes Cross Validation and
% Widely applicable Information Criterion in Singular Learning Theory.
% Journal of Machine Learning Research 11 (2010), 3571-3594.
%
%
% Copyright (c) 2011-2013 Ville Tolvanen
ip=inputParser;
ip.FunctionName = 'GP_WAIC';
ip.addRequired('gp',@(x) isstruct(x) || iscell(x));
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('method', 'V', @(x) ismember(x,{'V' 'G'}))
ip.addParamValue('form', 'mean', @(x) ismember(x,{'mean','all'}))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.parse(gp, x, y, varargin{:});
method=ip.Results.method;
form=ip.Results.form;
% pass these forward
options=struct();
z = ip.Results.z;
if ~isempty(ip.Results.z)
options.zt=ip.Results.z;
options.z=ip.Results.z;
end
[tn, nin] = size(x);
% ====================================================
if isstruct(gp) % Single GP or MCMC solution
switch gp.type
case {'FULL' 'VAR' 'DTC' 'SOR'}
tstind = [];
case {'FIC' 'CS+FIC'}
tstind = 1:tn;
case 'PIC'
tstind = gp.tr_index;
end
if isfield(gp, 'etr')
% MCMC solution
[Ef, Varf, BUt] = gpmc_preds(gp,x,y, x, 'yt', y, 'tstind', tstind, options);
BUt=log(mean(exp(BUt),2));
GUt = zeros(tn,1);
Elog = zeros(tn,1);
Elog2 = zeros(tn,1);
nsamples = length(gp.edata);
if strcmp(gp.type, 'PIC')
tr_index = gp.tr_index;
gp = rmfield(gp, 'tr_index');
else
tr_index = [];
end
%Ef = zeros(tn, nsamples);
%Varf = zeros(tn, nsamples);
sigma2 = zeros(tn, nsamples);
for j = 1:nsamples
Gp = take_nth(gp,j);
if strcmp(gp.type, 'FIC') | strcmp(gp.type, 'PIC') || strcmp(gp.type, 'CS+FIC') || strcmp(gp.type, 'VAR') || strcmp(gp.type, 'DTC') || strcmp(gp.type, 'SOR')
Gp.X_u = reshape(Gp.X_u,length(Gp.X_u)/nin,nin);
end
Gp.tr_index = tr_index;
gp_array{j} = Gp;
%[Ef(:,j), Varf(:,j)] = gp_pred(Gp, x, y, x, 'yt', y, 'tstind', tstind, options);
if isfield(gp.lik.fh,'trcov')
sigma2(:,j) = repmat(Gp.lik.sigma2,1,tn);
end
end
if isequal(method,'V')
% Evaluate WAIC using the Variance method
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
% fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
% fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
% Elog(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
% .*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))').^2), fmin, fmax);
% Elog2(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
% .*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
%
m = Ef(i,:);
s2 = Varf(i,:);
m0 = 1; m1 = m; m2 = m.^2 + s2; m3 = m.*(m.^2+3*s2);
m4 = m.^4+6.*m.^2.*s2+3*s2.^2;
Elog2(i) = mean((-0.5.*log(2.*pi.*sigma2(i,:)) - y(i).^2./(2.*sigma2(i,:))).*m0 - 1./(2.*sigma2(i,:)) .* m2 + y(i)./sigma2(i,:) .* m1);
Elog(i) = mean((1/4 .* m4 - y(i) .* m3 + (3.*y(i).^2./2+0.5.*log(2.*pi.*sigma2(i,:)).*sigma2(i,:)) .* m2 ...
- (y(i).^3 + y(i).*log(2.*pi.*sigma2(i,:)).*sigma2(i,:)) .* m1 + (y(i).^4./4 + 0.5.*y(i).^2.*log(2.*pi.*sigma2(i,:)).*sigma2(i,:) ...
+ 0.25.*log(2.*pi.*sigma2(i,:)).^2.*sigma2(i,:).^2) .* m0) ./ sigma2(i,:).^2);
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
else
% non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
if ~isequal(gp.lik.type, 'Coxph')
fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
Elog(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*llvec(gp_array, y(i), f, z1).^2), fmin, fmax);
Elog2(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*llvec(gp_array, y(i), f, z1)), fmin, fmax);
else
ntime = size(gp.lik.xtime,1);
for i2=1:nsamples
% Use MC to integrate over latents
ns = 10000;
Sigma_tmp = diag(Varf([1:ntime ntime+i],i2));
f = mvnrnd(Ef([1:ntime ntime+i],i2), Sigma_tmp, ns);
tmp2(i2) = 1/ns * sum(llvec(gp_array{i2}, y(i,:), f', z1));
tmp(i2) = 1/ns * sum((llvec(gp_array{i2}, y(i,:), f', z1)).^2);
end
Elog2(i)=mean(tmp2);
Elog(i)=mean(tmp);
end
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
end
else
% Evaluate WAIC using the expected value form via Gibbs training
% loss
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
else
% non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*llvec(gp_array, y(i), f, z1)), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
end
end
else
% A single GP solution
[Ef, Varf, BUt] = gp_pred(gp, x, y, x, 'yt', y, 'tstind', tstind, options);
GUt = zeros(tn,1);
Elog = zeros(tn,1);
Elog2 = zeros(tn,1);
if isequal(method,'V')
% Estimate WAIC with variance form
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
sigma2 = gp.lik.sigma2;
for i=1:tn
% Analytical moments for Gaussian distribution
m0 = 1; m1 = Ef(i); m2 = Ef(i)^2 + Varf(i); m3 = Ef(i)*(Ef(i)^2+3*Varf(i));
m4 = Ef(i)^4+6*Ef(i)^2*Varf(i)+3*Varf(i)^2;
Elog2(i) = (-0.5*log(2*pi*sigma2) - y(i).^2./(2.*sigma2))*m0 - 1./(2.*sigma2) * m2 + y(i)./sigma2 * m1;
Elog(i) = (1/4 * m4 - y(i) * m3 + (3*y(i).^2./2+0.5*log(2*pi*sigma2).*sigma2) * m2 ...
- (y(i).^3 + y(i).*log(2*pi*sigma2).*sigma2) * m1 + (y(i).^4/4 + 0.5*y(i).^2*log(2*pi*sigma2).*sigma2 ...
+ 0.25*log(2*pi*sigma2).^2.*sigma2.^2) * m0) ./ sigma2.^2;
end
Elog2 = Elog2.^2;
Vn = Elog-Elog2;
if strcmp(form,'mean')
BUt = mean(BUt);
Vn = mean(Vn);
end
waic = BUt - Vn;
else
% Non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
if ~isequal(gp.lik.type, 'Coxph')
fmin = Ef(i)-9*sqrt(Varf(i));
fmax = Ef(i)+9*sqrt(Varf(i));
Elog(i) = quadgk(@(f) norm_pdf(f, Ef(i), sqrt(Varf(i))).*llvec(gp, y(i), f, z1).^2 ,...
fmin, fmax);
Elog2(i) = quadgk(@(f) norm_pdf(f, Ef(i), sqrt(Varf(i))).*llvec(gp, y(i), f, z1) ,...
fmin, fmax);
else
% Use MC to integrate over latents
ntime = size(gp.lik.xtime,1);
ns = 10000;
Sigma_tmp = Varf([1:ntime ntime+i], [1:ntime ntime+i]);
Sigma_tmp = (Sigma_tmp + Sigma_tmp') ./ 2;
f = mvnrnd(Ef([1:ntime ntime+i]), Sigma_tmp, ns);
Elog2(i) = 1/ns * sum(llvec(gp, y(i,:), f', z1));
Elog(i) = 1/ns * sum((llvec(gp, y(i,:), f', z1)).^2);
end
end
Elog2 = Elog2.^2;
Vn = Elog-Elog2;
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
end
else
% WAIC using the expected value form via Gibbs training loss GUt
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
sigma2 = gp.lik.sigma2;
for i=1:tn
if Varf(i)<eps
GUt(i)=(-0.5*log(2*pi*sigma2)- (y(i) - Ef(i)).^2/(2.*sigma2));
else
% GUt(i) = quadgk(@(f) norm_pdf(f,Ef(i),sqrt(Varf(i))).*(-0.5*log(2*pi*sigma2)- (y(i) - f).^2/(2.*sigma2)), fmin, fmax);
m0 = 1; m1 = Ef(i); m2 = Ef(i)^2 + Varf(i);
GUt(i) = (-0.5*log(2*pi*sigma2) - y(i).^2./(2.*sigma2))*m0 - 1./(2.*sigma2) * m2 + y(i)./sigma2 * m1;
end
end
if strcmp(form,'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
else
% Non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
if ~isequal(gp.lik.type, 'Coxph')
fmin = Ef(i)-9*sqrt(Varf(i));
fmax = Ef(i)+9*sqrt(Varf(i));
GUt(i) = quadgk(@(f) norm_pdf(f, Ef(i), sqrt(Varf(i))).*llvec(gp, y(i), f, z1) ,...
fmin, fmax);
else
% If likelihood coxph use mc to integrate over latents
ntime = size(gp.lik.xtime,1);
ns = 10000;
Sigma_tmp = Varf([1:ntime ntime+i], [1:ntime ntime+i]);
Sigma_tmp = (Sigma_tmp + Sigma_tmp') ./ 2;
f = mvnrnd(Ef([1:ntime ntime+i]), Sigma_tmp, ns);
GUt(i) = 1/ns * sum(llvec(gp, y(i), f', z1));
end
end
if strcmp(form,'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
end
end
end
elseif iscell(gp)
% gp_ia solution
switch gp{1}.type
case {'FULL' 'VAR' 'DTC' 'SOR'}
tstind = [];
case {'FIC' 'CS+FIC'}
tstind = 1:tn;
case 'PIC'
tstind = gp{1}.tr_index;
end
[tmp, tmp, BUt] = gp_pred(gp,x,y, x, 'yt', y, 'tstind', tstind, options);
GUt = zeros(tn,1);
Elog = zeros(tn,1);
Elog2 = zeros(tn,1);
nsamples = length(gp);
for j = 1:nsamples
Gp = gp{j};
weight(j) = Gp.ia_weight;
w(j,:) = gp_pak(Gp);
[Ef(:,j), Varf(:,j)] = gp_pred(Gp, x, y, x, 'yt', y, 'tstind', tstind, options);
if isfield(Gp.lik.fh,'trcov')
sigma2(:,j) = repmat(Gp.lik.sigma2,1,tn);
end
end
if isequal(method,'V')
% Evaluate WAIC using the variance form
if isfield(gp{1}.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
Elog(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))').^2), fmin, fmax);
Elog2(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
else
% non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
Elog(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*llvec(gp, y(i), f, z1).^2), fmin, fmax);
Elog2(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*llvec(gp, y(i), f, z1)), fmin, fmax);
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
end
else
% Evaluate WAIC using the expected value form via Gibbs training loss
if isfield(gp{1}.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
else
% non-gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*llvec(gp, y(i), f, z1)), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
end
end
end
end
function lls=llvec(gp, y, fs, z)
% Compute a vector of lls for vector argument fs used by quadgk. In
% case of IA or MC, return a matrix with rows corresponding to one
% GP and columns corresponding to all of the GP's.
if isstruct(gp)
% single gp
lls=zeros(1,size(fs,2));
for i1=1:size(fs,2)
lls(i1)=gp.lik.fh.ll(gp.lik,y,fs(:,i1),z);
end
% else
% % mc
% lls=zeros(length(gp), length(fs));
% for i=1:numel(fs)
% for j=1:numel(gp.edata)
% Gp = take_nth(gp, j);
% lls(j,i) = Gp.lik.fh.ll(Gp.lik, y, fs(i), z);
% end
% end
else
% ia & mc
lls=zeros(length(gp), length(fs));
for i=1:numel(fs)
for j=1:numel(gp)
lls(j,i) = gp{j}.lik.fh.ll(gp{j}.lik, y, fs(i), z);
end
end
end
end
function mpdf = multi_npdf(f, mean, sigma2)
% for every element in f, compute means calculated with
% norm_pdf(f(i), mean, sqrt(sigma2)). If mean and sigma2
% are vectors, returns length(mean) x length(f) matrix.
mpdf = zeros(length(mean), length(f));
for i=1:length(f)
mpdf(:,i) = norm_pdf(f(i), mean, sqrt(sigma2));
end
end
function [m_0, m_1, m_2, m_3, m_4] = moments(fun, a, b, rtol, atol, minsubs)
% QUAD_MOMENTS Calculate the 0th, 1st and 2nd moment of a given
% (unnormalized) probability distribution
%
% [m_0, m_1, m_2] = quad_moments(fun, a, b, varargin)
% Inputs:
% fun = Function handle to the unnormalized probability distribution
% a,b = integration limits [a,b]
% rtol = relative tolerance for the integration (optional, default 1e-6)
% atol = absolute tolerance for the integration (optional, default 1e-10)
%
% Returns the first three moments:
% m0 = int_a^b fun(x) dx
% m1 = int_a^b x*fun(x) dx / m0
% m2 = int_a^b x^2*fun(x) dx / m0
%
% The function uses an adaptive Gauss-Kronrod quadrature. The same set of
% integration points and intervals are used for each moment. This speeds up
% the evaluations by factor 3, since the function evaluations are done only
% once.
%
% The quadrature method is described by:
% L.F. Shampine, "Vectorized Adaptive Quadrature in Matlab",
% Journal of Computational and Applied Mathematics, 211, 2008,
% pp. 131-140.
% Copyright (c) 2010 Jarno Vanhatalo, Jouni Hartikainen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
maxsubs = 650;
if nargin < 4
rtol = 1.e-6;
end
if nargin < 5
atol = 1.e-10;
end
if nargin < 6
minsubs = 10;
end
rtol = max(rtol,100*eps);
atol = max(atol,0);
minsubs = max(minsubs,2); % At least two subintervals are needed
% points and weights
points15 = [0.2077849550078985; 0.4058451513773972; 0.5860872354676911; ...
0.7415311855993944; 0.8648644233597691; 0.9491079123427585; ...
0.9914553711208126];
points = [-points15(end:-1:1); 0; points15];
w15 = [0.2044329400752989, 0.1903505780647854, 0.1690047266392679, ...
0.1406532597155259, 0.1047900103222502, 0.06309209262997855, ...
0.02293532201052922];
w = [w15(end:-1:1), 0.2094821410847278, w15];
w7 = [0,0.3818300505051189,0,0.2797053914892767,0,0.1294849661688697,0];
ew = w - [w7(end:-1:1), 0.4179591836734694, w7];
samples = numel(w);
% split the interval.
if b-a <= 0
c = a; a = b; b=c;
warning('The start of the integration interval was less than the end of it.')
end
apu = a + (1:(minsubs-1))./minsubs*(b-a);
apu = [a,apu,b];
subs = [apu(1:end-1);apu(2:end)];
% Initialize partial sums.
Ifx_ok = 0;
Ifx1_ok = 0;
Ifx2_ok = 0;
Ifx3_ok = 0;
Ifx4_ok = 0;
% The main loop
while true
% subintervals and their midpoints
midpoints = sum(subs)/2;
halfh = diff(subs)/2;
x = bsxfun(@plus,points*halfh,midpoints);
x = reshape(x,1,[]);
fx = fun(x);
fx1 = fx.*x;
fx2 = fx.*x.^2;
fx3 = fx.*x.^3;
fx4 = fx.*x.^4;
fx = reshape(fx,samples,[]);
fx1 = reshape(fx1,samples,[]);
fx2 = reshape(fx2,samples,[]);
fx3 = reshape(fx3,samples,[]);
fx4 = reshape(fx4,samples,[]);
% Subintegrals.
Ifxsubs = (w*fx) .* halfh;
errsubs = (ew*fx) .* halfh;
Ifxsubs1 = (w*fx1) .* halfh;
Ifxsubs2 = (w*fx2) .* halfh;
Ifxsubs3 = (w*fx3) .* halfh;
Ifxsubs4 = (w*fx4) .* halfh;
% Ifx and tol.
Ifx = sum(Ifxsubs) + Ifx_ok;
Ifx1 = sum(Ifxsubs1) + Ifx1_ok;
Ifx2 = sum(Ifxsubs2) + Ifx2_ok;
Ifx3 = sum(Ifxsubs3) + Ifx3_ok;
Ifx4 = sum(Ifxsubs4) + Ifx4_ok;
tol = max(atol,rtol*abs(Ifx));
% determine the indices ndx of Ifxsubs for which the
% errors are acceptable and remove those from subs
ndx = find(abs(errsubs) <= (2/(b-a)*halfh*tol));
subs(:,ndx) = [];
if isempty(subs)
break
end
% Update the integral.
Ifx_ok = Ifx_ok + sum(Ifxsubs(ndx));
Ifx1_ok = Ifx1_ok + sum(Ifxsubs1(ndx));
Ifx2_ok = Ifx2_ok + sum(Ifxsubs2(ndx));
Ifx3_ok = Ifx3_ok + sum(Ifxsubs3(ndx));
Ifx4_ok = Ifx4_ok + sum(Ifxsubs4(ndx));
% Quit if too many subintervals.
nsubs = 2*size(subs,2);
if nsubs > maxsubs
warning('quad_moments: Reached the limit on the maximum number of intervals in use.');
break
end
midpoints(ndx) = [];
subs = reshape([subs(1,:); midpoints; midpoints; subs(2,:)],2,[]); % Divide the remaining subintervals in half
end
% Scale moments
m_0 = Ifx;
m_1 = Ifx1./Ifx;
m_2 = Ifx2./Ifx;
m_3 = Ifx3./Ifx;
m_4 = Ifx4./Ifx;
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_ppcs0.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_ppcs0.m
| 37,470 |
utf_8
|
8e0026784ac7fd43fed2a933ac703b6c
|
function gpcf = gpcf_ppcs0(varargin)
%GPCF_PPCS0 Create a piece wise polynomial (q=0) covariance function
%
% Description
% GPCF = GPCF_PPCS0('nin',nin,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates piece wise polynomial (q=0) covariance function
% structure in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
% Obligatory parameter is 'nin', which tells the dimension
% of input space.
%
% GPCF = GPCF_PPCS0(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for piece wise polynomial (q=0) covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% l_nin - order of the polynomial [floor(nin/2) + 1]
% Has to be greater than or equal to default.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The piecewise polynomial function is the following:
%
% k_pp0(x_i, x_j) = ma2*cs^(l)
%
% where r = sum( (x_i,d - x_j,d).^2./l^2_d )
% l = floor(l_nin/2) + 1
% cs = max(0,1-r);
% and l_nin must be greater or equal to gpcf.nin
%
% NOTE! Use of gpcf_ppcs0 requires that you have installed
% GPstuff with SuiteSparse.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_PPCS0';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('l_nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Check that SuiteSparse is available
if ~exist('ldlchol')
error('SuiteSparse is not installed (or it is not in the path). gpcf_ppcs0 cannot be used!')
end
init=true;
gpcf.nin=ip.Results.nin;
if isempty(gpcf.nin)
error('nin has to be given for ppcs: gpcf_ppcs0(''nin'',NIN,...)')
end
gpcf.type = 'gpcf_ppcs0';
% cf is compactly supported
gpcf.cs = 1;
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_ppcs0')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_ppcs0_pak;
gpcf.fh.unpak = @gpcf_ppcs0_unpak;
gpcf.fh.lp = @gpcf_ppcs0_lp;
gpcf.fh.lpg = @gpcf_ppcs0_lpg;
gpcf.fh.cfg = @gpcf_ppcs0_cfg;
gpcf.fh.ginput = @gpcf_ppcs0_ginput;
gpcf.fh.cov = @gpcf_ppcs0_cov;
gpcf.fh.trcov = @gpcf_ppcs0_trcov;
gpcf.fh.trvar = @gpcf_ppcs0_trvar;
gpcf.fh.recappend = @gpcf_ppcs0_recappend;
end
% Initialize parameters
if init || ~ismember('l_nin',ip.UsingDefaults)
gpcf.l=ip.Results.l_nin;
if isempty(gpcf.l)
gpcf.l = floor(gpcf.nin/2) + 1;
end
if gpcf.l < gpcf.nin
error('The l_nin has to be greater than or equal to the number of inputs!')
end
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_ppcs0_pak(gpcf)
%GPCF_PPCS0_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PPCS0_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS0_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(ppcs0.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(ppcs0.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(ppcs0.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_ppcs0_unpak(gpcf, w)
%GPCF_PPCS0_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PPCS0_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS0_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_ppcs0_lp(gpcf)
%GPCF_PPCS0_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PPCS0_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_ppcs0_lpg(gpcf)
%GPCF_PPCS0_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PPCS0_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_ppcs0_cfg(gpcf, x, x2, mask,i1)
%GPCF_PPCS0_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PPCS0_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PPCS0_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS0_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS0_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return;
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_ppcs0_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
l = gpcf.l;
[I,J] = find(Cdm);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
[n, m] =size(x);
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*cs.^(l-1);
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS0
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
d2 = 0;
for i = 1:m
d2 = d2 + s2.*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
% Calculate the gradient matrix
D = -l.*cs.^(l-1);
D = -d.*ma2.*D;
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
d_l2 = [];
for i = 1:m
d_l2(:,i) = s2(i).*(x(I,i) - x(J,i)).^2;
d2 = d2 + d_l2(:,i);
end
d = sqrt(d2);
d_l = d_l2;
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
Dd = -l.*cs.^(l-1);
Dd = -ma2.*Dd;
int = d ~= 0;
for i = i1
% Calculate the gradient matrix
D = d_l(:,i).*Dd;
% Divide by r in cases where r is non-zero
D(int) = D(int)./d(int);
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x, x2(ii,:));
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*cs.^(l-1);
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS0
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
dist1 = 0;
for i=1:m
dist1 = dist1 + s2.*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
DK_l = -l.*cs1.^(l-1);
DK_l = -d1.*ma2.*DK_l;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
d_l1 = [];
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
d_l1{i} = s2(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
for i = i1
% Calculate the gradient matrix
DK_l = -l.*cs1.^(l-1);
DK_l = -ma2.*DK_l.*d_l1{i};
% Divide by r in cases where r is non-zero
DK_l(d1 ~= 0) = DK_l(d1 ~= 0)./d1(d1 ~= 0);
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_ppcs0_ginput(gpcf, x, x2, i1)
%GPCF_PPCS0_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PPCS0_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS0_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS0_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using memory
% option in gp_set.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LP, GP_G
[n, m] =size(x);
ii1 = 0;
if nargin==4
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
i1=1:m;
end
if nargin == 2 || isempty(x2)
l = gpcf.l;
K = gpcf.fh.trcov(gpcf, x);
[I,J] = find(K);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = zeros(length(col_ind),1);
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*cs.^(l-1);
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS0
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
Dd = -ma2.*l.*cs.^(l-1);
Dd = sparse(I,J,Dd,n,n);
d = sparse(I,J,d,n,n);
row = ones(n,1);
cols = 1:n;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(:,j));
apu = full(Dd(:,j)).*s2(i).*(x(j,i)-x(:,i));
apu(ind) = apu(ind)./d(ind,j);
D = sparse(row*j, cols, apu, n, n);
D = D+D';
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
K = gpcf.fh.cov(gpcf, x, x2);
n2 = size(x2,1);
ii1=0;
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*ma2.*cs.^(l-1);
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS0
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
d = sqrt(dist1);
cs = max(1-d,0);
Dd = -ma2.*l.*cs.^(l-1);
row = ones(n2,1);
cols = 1:n2;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(j,:));
apu = Dd(j,:).*s2(i).*(x(j,i)-x2(:,i))';
apu(ind) = apu(ind)./d(j,ind);
D = sparse(row*j, cols, apu, n, n2);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
end
function C = gpcf_ppcs0_cov(gpcf, x1, x2, varargin)
%GP_PPCS0_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PPCS0_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PPCS0_TRCOV, GPCF_PPCS0_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x1);
[n2,m2]=size(x2);
else
% If scaled euclidean metric
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m1);
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n1*n2));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
I0=zeros(ntriplets,1);
J0=zeros(ntriplets,1);
nn0=0;
for ii1=1:n2
d = zeros(n1,1);
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x1, x2(ii1,:));
else
for j=1:m1
d = d + s2(j).*(x1(:,j)-x2(ii1,j)).^2;
end
end
%d = sqrt(d);
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
R2=sqrt(R2);
%len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
I(ntrip_prev+1:ntriplets) = I2;
J(ntrip_prev+1:ntriplets) = ii1;
R(ntrip_prev+1:ntriplets) = R2;
I0(nn0+1:nn0+length(I0t)) = I0t;
J0(nn0+1:nn0+length(I0t)) = ii1;
nn0 = nn0+length(I0t);
end
r = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets));
[I,J,r] = find(r);
cs = full(sparse(max(0, 1-r)));
C = ma2.*cs.^l;
C = sparse(I,J,C,n1,n2) + sparse(I0,J0,ma2,n1,n2);
end
function C = gpcf_ppcs0_trcov(gpcf, x)
%GP_PPCS0_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PPCS0_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX.
% This is a mandatory subfunction used for example in prediction
% and energy computations.
%
% See also
% GPCF_PPCS0_COV, GPCF_PPCS0_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
else
% If a scaled euclidean metric try first mex-implementation
% and if there is not such...
C = trcov(gpcf,x);
% ... evaluate the covariance here.
if isnan(C)
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
else
return
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n*n));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
ntripletsz = max(1,floor(0.03.^2*n*n));
Iz = zeros(ntripletsz,1);
Jz = zeros(ntripletsz,1);
ntripletsz = 0;
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:));
else
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
%d = sqrt(d);
% store zero distance index
[I2z,J2z] = find(d==0);
% create triplets for distances 0<d<1
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
if (ntriplets > len)
I(2*len) = 0;
J(2*len) = 0;
R(2*len) = 0;
end
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = ii1+I2;
J(ind_tr) = ii1;
R(ind_tr) = sqrt(R2);
% create triplets for distances d==0 (i~=j)
lenz = length(Iz);
ntrip_prevz = ntripletsz;
ntripletsz = ntripletsz + length(I2z);
if (ntripletsz > lenz)
Iz(2*lenz) = 0;
Jz(2*lenz) = 0;
end
ind_trz = ntrip_prevz+1:ntripletsz;
Iz(ind_trz) = ii1+I2z;
Jz(ind_trz) = ii1;
end
% create a lower triangular sparse distance matrix from the triplets for distances 0<d<1
R = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets),n,n);
% create a lower triangular sparse covariance matrix from the
% triplets for distances d==0 (i~=j)
Rz = sparse(Iz(1:ntripletsz),Jz(1:ntripletsz),repmat(ma2,1,ntripletsz),n,n);
% Find the non-zero elements of R.
[I,J,rn] = find(R);
% Compute covariances for distances 0<d<1
cs = max(0,1-rn);
C = ma2.*cs.^l;
% create a lower triangular sparse covariance matrix from the triplets for distances 0<d<1
C = sparse(I,J,C,n,n);
% add the lower triangular covariance matrix for distances d==0 (i~=j)
C = C + Rz;
% form a square covariance matrix and add the covariance matrix for i==j (d==0)
C = C + C' + sparse(1:n,1:n,ma2,n,n);
end
function C = gpcf_ppcs0_trvar(gpcf, x)
%GP_PPCS0_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PPCS0_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory subfunction
% used for example in prediction and energy computations.
%
% See also
% GPCF_PPCS0_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_ppcs0_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PPCS0_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_ppcs0';
reccf.nin = ri.nin;
reccf.l = floor(reccf.nin/2)+4;
% cf is compactly supported
reccf.cs = 1;
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_ppcs0_pak;
reccf.fh.unpak = @gpcf_ppcs0_unpak;
reccf.fh.e = @gpcf_ppcs0_lp;
reccf.fh.lpg = @gpcf_ppcs0_lpg;
reccf.fh.cfg = @gpcf_ppcs0_cfg;
reccf.fh.cov = @gpcf_ppcs0_cov;
reccf.fh.trcov = @gpcf_ppcs0_trcov;
reccf.fh.trvar = @gpcf_ppcs0_trvar;
reccf.fh.recappend = @gpcf_ppcs0_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_probit.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_probit.m
| 11,048 |
UNKNOWN
|
5ab0288a472ad19a923d50739c297a64
|
function lik = lik_probit(varargin)
%LIK_PROBIT Create a Probit likelihood structure
%
% Description
% LIK = LIK_PROBIT creates Probit likelihood for classification
% problem with class labels {-1,1}.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 normcdf(y_i * f_i)
%
% where f is the latent value vector.
%
% See also
% GP_SET, LIK_*
%
% Copyright (c) 2007 Jaakko Riihim�ki
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_PROBIT';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Probit';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Probit')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_probit_pak;
lik.fh.unpak = @lik_probit_unpak;
lik.fh.ll = @lik_probit_ll;
lik.fh.llg = @lik_probit_llg;
lik.fh.llg2 = @lik_probit_llg2;
lik.fh.llg3 = @lik_probit_llg3;
lik.fh.tiltedMoments = @lik_probit_tiltedMoments;
lik.fh.predy = @lik_probit_predy;
lik.fh.invlink = @lik_probit_invlink;
lik.fh.recappend = @lik_probit_recappend;
end
end
function [w,s] = lik_probit_pak(lik)
%LIK_PROBIT_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_PROBIT_PAK(LIK) takes a likelihood structure LIK and
% returns an empty verctor W. If Probit likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% See also
% LIK_NEGBIN_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_probit_unpak(lik, w)
%LIK_PROBIT_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_PROBIT_UNPAK(W, LIK) Doesn't do anything.
%
% If Probit likelihood had parameters this would extracts them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% See also
% LIK_PROBIT_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_probit_ll(lik, y, f, z)
%LIK_PROBIT_LL Log likelihood
%
% Description
% E = LIK_PROBIT_LL(LIK, Y, F) takes a likelihood structure
% LIK, class labels Y, and latent values F. Returns the log
% likelihood, log p(y|f,z). This subfunction is needed when
% using Laplace approximation or MCMC for inference with
% non-Gaussian likelihoods. This subfunction is also used
% in information criteria (DIC, WAIC) computations.
%
% See also
% LIK_PROBIT_LLG, LIK_PROBIT_LLG3, LIK_PROBIT_LLG2, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_probit: The class labels have to be {-1,1}')
end
p = y.*f;
ll = log(norm_cdf(p));
if any(p<-10)
% Asymptotic expansion of norm_cdf
i = find(p<-10);
c = 1 - 1./p(i).^2.*(1-3./p(i).^2.*(1-5./p(i).^2.*(1-7./p(i).^2)));
ll(i) = -0.5*log(2*pi)-p(i).^2./2-log(-p(i))+log(c);
end
ll = sum(ll);
end
function llg = lik_probit_llg(lik, y, f, param, z)
%LIK_PROBIT_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_PROBIT_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F.
% Returns the gradient of the log likelihood with respect to
% PARAM. At the moment PARAM can be 'param' or 'latent'.
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_PROBIT_LL, LIK_PROBIT_LLG2, LIK_PROBIT_LLG3, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_probit: The class labels have to be {-1,1}')
end
switch param
case 'latent'
p=y.*f;
ncdf=norm_cdf(p);
if any(p<-10)
% Asymptotic expansion of norm_cdf
i = find(p<-10);
c = 1 - 1./p(i).^2.*(1-3./p(i).^2.*(1-5./p(i).^2.*(1-7./p(i).^2)));
ncdf(i) = -0.5*log(2*pi)-p(i).^2./2-log(-p(i))+log(c);
end
llg = y.*norm_pdf(f)./ncdf;
end
end
function llg2 = lik_probit_llg2(lik, y, f, param, z)
%LIK_PROBIT_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_PROBIT_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F.
% Returns the Hessian of the log likelihood with respect to
% PARAM. At the moment PARAM can be only 'latent'. LLG2 is a
% vector with diagonal elements of the Hessian matrix (off
% diagonals are zero). This subfunction is needed when using
% Laplace approximation or EP for inference with non-Gaussian
% likelihoods.
%
% See also
% LIK_PROBIT_LL, LIK_PROBIT_LLG, LIK_PROBIT_LLG3, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_probit: The class labels have to be {-1,1}')
end
switch param
case 'latent'
z = y.*f;
ncdf=norm_cdf(z);
if any(z<-10)
% Asymptotic expansion of norm_cdf
i = find(z<-10);
c = 1 - 1./z(i).^2.*(1-3./z(i).^2.*(1-5./z(i).^2.*(1-7./z(i).^2)));
ncdf(i) = -0.5*log(2*pi)-z(i).^2./2-log(-z(i))+log(c);
end
z2 = norm_pdf(f)./ncdf;
llg2 = -z2.^2 - z.*z2;
end
end
function llg3 = lik_probit_llg3(lik, y, f, param, z)
%LIK_PROBIT_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_PROBIT_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be only 'latent'.
% LLG3 is a vector with third gradients. This subfunction is
% needed when using Laplace approximation for inference with
% non-Gaussian likelihoods.
%
% See also
% LIK_PROBIT_LL, LIK_PROBIT_LLG, LIK_PROBIT_LLG2, GPLA_E, GPLA_G
if ~isempty(find(abs(y)~=1))
error('lik_probit: The class labels have to be {-1,1}')
end
switch param
case 'latent'
z=y.*f;
ncdf=norm_cdf(z);
if any(z<-10)
% Asymptotic expansion of norm_cdf
i = find(z<-10);
c = 1 - 1./z(i).^2.*(1-3./z(i).^2.*(1-5./z(i).^2.*(1-7./z(i).^2)));
ncdf(i) = -0.5*log(2*pi)-z(i).^2./2-log(-z(i))+log(c);
end
z2 = norm_pdf(f)./ncdf;
llg3 = 2.*y.*z2.^3 + 3.*f.*z2.^2 - z2.*(y-y.*f.^2);
end
end
function [logM_0, m_1, sigm2hati1] = lik_probit_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_PROBIT_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_PROBIT_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY) takes a likelihood structure LIK, class labels Y, index
% I and cavity variance S2 and mean MYY. Returns the zeroth
% moment M_0, mean M_1 and variance M_2 of the posterior
% marginal (see Rasmussen and Williams (2006): Gaussian
% processes for Machine Learning, page 55). This subfunction
% is needed when using EP for inference with non-Gaussian
% likelihoods.
%
% See also
% GPEP_E
% don't check this, because this function is called so often by EP
% if ~isempty(find(abs(y)~=1))
% error('lik_probit: The class labels have to be {-1,1}')
% end
a=realsqrt(1+sigm2_i);
zi=y(i1).*myy_i./a;
%normc_zi = 0.5.*erfc(-zi./sqrt(2)); % norm_cdf(zi)
normc_zi = 0.5.*erfc(-zi./1.414213562373095); % norm_cdf(zi)
%normp_zi = exp(-0.5.*zi.^2-log(2.*pi)./2); %norm_pdf(zi)
normp_zi = exp(-0.5.*realpow(zi,2)-0.918938533204673); %norm_pdf(zi)
m_1=myy_i+(y(i1).*sigm2_i.*normp_zi)./(normc_zi.*a); % muhati1
sigm2hati1=sigm2_i-(sigm2_i.^2.*normp_zi)./((1+sigm2_i).*normc_zi).*(zi+normp_zi./normc_zi); % sigm2hati1
logM_0 = reallog(normc_zi);
end
function [lpy, Ey, Vary] = lik_probit_predy(lik, Ef, Varf, yt, zt)
%LIK_PROBIT_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_PROBIT_PREDY(LIK, EF, VARF, YT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | y) = \int p(yt | f) p(f|y) df.
% This requires also the class labels YT. This subfunction is
% needed when computing posterior predictive distributions for
% future observations.
%
% [LPY, EY, VARY] = LIK_PROBIT_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPEP_PRED, GPLA_PRED, GPMC_PRED
if nargout > 1
py1 = norm_cdf(Ef./sqrt(1+Varf));
Ey = 2*py1 - 1;
Vary = 1-Ey.^2;
end
if ~isempty(find(abs(yt)~=1))
error('lik_probit: The class labels have to be {-1,1}')
end
lpy=[];
if ~isempty(yt)
p = Ef.*yt./sqrt(1+Varf);
lpy = log(norm_cdf(p)); % Probability p(y_new)
if any(p<-10)
% Asymptotic expansion of norm_cdf
i = find(p<-10);
c = 1 - 1./p(i).^2.*(1-3./p(i).^2.*(1-5./p(i).^2.*(1-7./p(i).^2)));
lpy(i) = -0.5*log(2*pi)-p(i).^2./2-log(-p(i))+log(c);
end
end
end
function p = lik_probit_invlink(lik, f, z)
%LIK_PROBIT_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_PROBIT_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_PROBIT_LL, LIK_PROBIT_PREDY
p = norm_cdf(f);
end
function reclik = lik_probit_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_PROBIT_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also:
% gp_mc
if nargin == 2
reclik.type = 'Probit';
% Set the function handles
reclik.fh.pak = @lik_probit_pak;
reclik.fh.unpak = @lik_probit_unpak;
reclik.fh.ll = @lik_probit_ll;
reclik.fh.llg = @lik_probit_llg;
reclik.fh.llg2 = @lik_probit_llg2;
reclik.fh.llg3 = @lik_probit_llg3;
reclik.fh.tiltedMoments = @lik_probit_tiltedMoments;
reclik.fh.predy = @lik_probit_predy;
reclik.fh.invlink = @lik_probit_invlink;
reclik.fh.recappend = @lik_probit_recappend;
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_negbin.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_negbin.m
| 25,046 |
utf_8
|
a4815ddb29de3291580a7b2ecc130906
|
function lik = lik_negbin(varargin)
%LIK_NEGBIN Create a Negative-binomial likelihood structure
%
% Description
% LIK = LIK_NEGBIN('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates Negative-binomial likelihood structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% LIK = LIK_NEGBIN(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for Negative-binomial likelihood [default]
% disper - dispersion parameter r [10]
% disper_prior - prior for disper [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ (r/(r+mu_i))^r * gamma(r+y_i)
% / ( gamma(r)*gamma(y_i+1) )
% * (mu/(r+mu_i))^y_i ]
%
% where mu_i = z_i*exp(f_i) and r is the dispersion parameter.
% z is a vector of expected mean and f the latent value vector
% whose components are transformed to relative risk
% exp(f_i).
%
% When using the Negbin likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2007-2010 Jarno Vanhatalo & Jouni Hartikainen
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% inputParser checks the arguments and assigns some default values
ip=inputParser;
ip.FunctionName = 'LIK_NEGBIN';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('disper',10, @(x) isscalar(x) && x>0);
ip.addParamValue('disper_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Negbin';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Negbin')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('disper',ip.UsingDefaults)
lik.disper = ip.Results.disper;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('disper_prior',ip.UsingDefaults)
lik.p.disper=ip.Results.disper_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_negbin_pak;
lik.fh.unpak = @lik_negbin_unpak;
lik.fh.lp = @lik_negbin_lp;
lik.fh.lpg = @lik_negbin_lpg;
lik.fh.ll = @lik_negbin_ll;
lik.fh.llg = @lik_negbin_llg;
lik.fh.llg2 = @lik_negbin_llg2;
lik.fh.llg3 = @lik_negbin_llg3;
lik.fh.tiltedMoments = @lik_negbin_tiltedMoments;
lik.fh.siteDeriv = @lik_negbin_siteDeriv;
lik.fh.predy = @lik_negbin_predy;
lik.fh.predprcty = @lik_negbin_predprcty;
lik.fh.invlink = @lik_negbin_invlink;
lik.fh.recappend = @lik_negbin_recappend;
end
end
function [w,s] = lik_negbin_pak(lik)
%LIK_NEGBIN_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_NEGBIN_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.disper)
%
% See also
% LIK_NEGBIN_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.disper)
w = log(lik.disper);
s = [s; 'log(negbin.disper)'];
[wh sh] = lik.p.disper.fh.pak(lik.p.disper);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_negbin_unpak(lik, w)
%LIK_NEGBIN_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_NEGBIN_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.disper)
%
% See also
% LIK_NEGBIN_PAK, GP_UNPAK
if ~isempty(lik.p.disper)
lik.disper = exp(w(1));
w = w(2:end);
[p, w] = lik.p.disper.fh.unpak(lik.p.disper, w);
lik.p.disper = p;
end
end
function lp = lik_negbin_lp(lik, varargin)
%LIK_NEGBIN_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_NEGBIN_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This
% subfunction is needed if there are likelihood parameters.
%
% See also
% LIK_NEGBIN_LLG, LIK_NEGBIN_LLG3, LIK_NEGBIN_LLG2, GPLA_E
% If prior for dispersion parameter, add its contribution
lp=0;
if ~isempty(lik.p.disper)
lp = lik.p.disper.fh.lp(lik.disper, lik.p.disper) +log(lik.disper);
end
end
function lpg = lik_negbin_lpg(lik)
%LIK_NEGBIN_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_NEGBIN_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters.
% This subfunction is needed if there are likelihood parameters.
%
% See also
% LIK_NEGBIN_LLG, LIK_NEGBIN_LLG3, LIK_NEGBIN_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.disper)
% Evaluate the gprior with respect to disper
ggs = lik.p.disper.fh.lpg(lik.disper, lik.p.disper);
lpg = ggs(1).*lik.disper + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_negbin_ll(lik, y, f, z)
%LIK_NEGBIN_LL Log likelihood
%
% Description
% LL = LIK_NEGBIN_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_NEGBIN_LLG, LIK_NEGBIN_LLG3, LIK_NEGBIN_LLG2, GPLA_E
if isempty(z)
error(['lik_negbin -> lik_negbin_ll: missing z! '...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbin and gpla_e. ']);
end
r = lik.disper;
mu = exp(f).*z;
ll = sum(r.*(log(r) - log(r+mu)) + gammaln(r+y) - gammaln(r) - gammaln(y+1) + y.*(log(mu) - log(r+mu)));
end
function llg = lik_negbin_llg(lik, y, f, param, z)
%LIK_NEGBIN_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_NEGBIN_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_NEGBIN_LL, LIK_NEGBIN_LLG2, LIK_NEGBIN_LLG3, GPLA_E
if isempty(z)
error(['lik_negbin -> lik_negbin_llg: missing z! '...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbin and gpla_e. ']);
end
mu = exp(f).*z;
r = lik.disper;
switch param
case 'param'
% Derivative using the psi function
llg = sum(1 + log(r./(r+mu)) - (r+y)./(r+mu) + psi(r + y) - psi(r));
% correction for the log transformation
llg = llg.*lik.disper;
% $$$ % Derivative using sum formulation
% $$$ llg = 0;
% $$$ for i1 = 1:length(y)
% $$$ llg = llg + log(r/(r+mu(i1))) + 1 - (r+y(i1))/(r+mu(i1));
% $$$ for i2 = 0:y(i1)-1
% $$$ llg = llg + 1 / (i2 + r);
% $$$ end
% $$$ end
% $$$ % correction for the log transformation
% $$$ llg = llg.*lik.disper;
case 'latent'
llg = y - (r+y).*mu./(r+mu);
end
end
function llg2 = lik_negbin_llg2(lik, y, f, param, z)
%LIK_NEGBIN_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_NEGBIN_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the Hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_NEGBIN_LL, LIK_NEGBIN_LLG, LIK_NEGBIN_LLG3, GPLA_E
if isempty(z)
error(['lik_negbin -> lik_negbin_llg2: missing z! '...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbin and gpla_e. ']);
end
mu = exp(f).*z;
r = lik.disper;
switch param
case 'param'
case 'latent'
llg2 = - mu.*(r.^2 + y.*r)./(r+mu).^2;
case 'latent+param'
llg2 = (y.*mu - mu.^2)./(r+mu).^2;
% correction due to the log transformation
llg2 = llg2.*lik.disper;
end
end
function llg3 = lik_negbin_llg3(lik, y, f, param, z)
%LIK_NEGBIN_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_NEGBIN_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_NEGBIN_LL, LIK_NEGBIN_LLG, LIK_NEGBIN_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_negbin -> lik_negbin_llg3: missing z! '...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbin and gpla_e. ']);
end
mu = exp(f).*z;
r = lik.disper;
switch param
case 'param'
case 'latent'
llg3 = - mu.*(r.^2 + y.*r)./(r + mu).^2 + 2.*mu.^2.*(r.^2 + y.*r)./(r + mu).^3;
case 'latent2+param'
llg3 = mu.*(y.*r - 2.*r.*mu - mu.*y)./(r+mu).^3;
% correction due to the log transformation
llg3 = llg3.*lik.disper;
end
end
function [logM_0, m_1, sigm2hati1] = lik_negbin_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_NEGBIN_TILTEDMOMENTS Returns the marginal moments for EP
%
% Description
% [M_0, M_1, M2] = LIK_NEGBIN_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, incedence counts
% Y, expected counts Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
% if isempty(z)
% error(['lik_negbin -> lik_negbin_tiltedMoments: missing z!'...
% 'Negbin likelihood needs the expected number of '...
% 'occurrences as an extra input z. See, for '...
% 'example, lik_negbin and gpep_e. ']);
% end
yy = y(i1);
avgE = z(i1);
r = lik.disper;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_negbin_norm(yy(i),myy_i(i),sigm2_i(i),avgE(i),r);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_negbin_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_negbin_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_NEGBIN_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_NEGBIN_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, incedence counts Y, expected
% counts Z, index I and cavity variance S2 and mean MYY.
% Returns E_f [d log p(y_i|f_i) /d a], where a is the
% likelihood parameter and the expectation is over the
% marginal posterior. This term is needed when evaluating the
% gradients of the marginal likelihood estimate Z_EP with
% respect to the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
if isempty(z)
error(['lik_negbin -> lik_negbin_siteDeriv: missing z!'...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbin and gpla_e. ']);
end
yy = y(i1);
avgE = z(i1);
r = lik.disper;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_negbin_norm(yy,myy_i,sigm2_i,avgE,r);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*r;
function g = deriv(f)
mu = avgE.*exp(f);
% Derivative using the psi function
g = 1 + log(r./(r+mu)) - (r+yy)./(r+mu) + psi(r + yy) - psi(r);
% % Derivative using the sum formulation
% g = 0;
% g = g + log(r./(r+mu)) + 1 - (r+yy)./(r+mu);
% for i2 = 0:yy-1
% g = g + 1 ./ (i2 + r);
% end
end
end
function [lpy, Ey, Vary] = lik_negbin_predy(lik, Ef, Varf, yt, zt)
%LIK_NEGBIN_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_NEGBIN_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the incedence counts YT, expected counts ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_NEGBIN_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_negbin -> lik_negbin_predy: missing zt!'...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_negbin and gpla_e. ']);
end
avgE = zt;
r = lik.disper;
lpy = zeros(size(Ef));
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
if nargout > 1
% Evaluate Ey and Vary
for i1=1:length(Ef)
%%% With quadrature
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f) exp(log(avgE(i1))+f+norm_lpdf(f,myy_i,sigm_i));
Ey(i1) = quadgk(F,minf,maxf);
F2 = @(f) exp(log(avgE(i1).*exp(f)+((avgE(i1).*exp(f)).^2/r))+norm_lpdf(f,myy_i,sigm_i));
EVary(i1) = quadgk(F2,minf,maxf);
F3 = @(f) exp(2*log(avgE(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
end
Vary = EVary + VarEy;
end
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Negative-binomial * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_negbin_norm(...
yt(i1),Ef(i1),Varf(i1),avgE(i1),r);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function prctys = lik_negbin_predprcty(lik, Ef, Varf, zt, prcty)
%LIK_BINOMIAL_PREDPRCTY Returns the percentiles of predictive density of y
%
% Description
% PRCTY = LIK_BINOMIAL_PREDPRCTY(LIK, EF, VARF YT, ZT)
% Returns percentiles of the predictive density PY of YT, that is
% This requires also the succes counts YT, numbers of trials ZT.
% This subfunction is needed when using function gp_predprcty.
%
% See also
% GP_PREDPCTY
if isempty(zt)
error(['lik_negbin -> lik_negbin_predprcty: missing zt!'...
'Negbin likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_negbin and gpla_e. ']);
end
opt=optimset('TolX',.5,'Display','off');
nt=size(Ef,1);
prctys = zeros(nt,numel(prcty));
prcty=prcty/100;
r = lik.disper;
for i1=1:nt
ci = sqrt(Varf(i1));
for i2=1:numel(prcty)
minf = nbininv(prcty(i2),r,r./(r+zt(i1).*exp(Ef(i1)-1.96*ci)));
maxf = nbininv(prcty(i2),r,r./(r+zt(i1).*exp(Ef(i1)+1.96*ci)));
a=floor(fminbnd(@(a) (quadgk(@(f) nbincdf(a,r,r./(r+zt(i1).*exp(f))).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,minf,maxf,opt));
if quadgk(@(f) nbincdf(a,r,r./(r+zt(i1).*exp(f))).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)<prcty(i2)
a=a+1;
end
prctys(i1,i2)=a;
end
end
end
function [df,minf,maxf] = init_negbin_norm(yy,myy_i,sigm2_i,avgE,r)
%INIT_NEGBIN_NORM
%
% Description
% Return function handle to a function evaluating
% Negative-Binomial * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_negbin. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_NEGBIN_TILTEDMOMENTS, LIK_NEGBIN_SITEDERIV,
% LIK_NEGBIN_PREDY
% avoid repetitive evaluation of constant part
ldconst = -gammaln(r)-gammaln(yy+1)+gammaln(r+yy)...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @negbin_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_negbin_norm;
ldg = @log_negbin_norm_g;
ldg2 = @log_negbin_norm_g2;
% Set the limits for integration
% Negative-binomial likelihood is log-concave so the negbin_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the Negative-Binomial likelihood and Gaussian
mu=log(yy/avgE);
s2=(yy+r)./(yy.*r);
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=4; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbin -> init_negbin_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbin -> init_negbin_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = negbin_norm(f)
% Negative-binomial * Gaussian
mu = avgE.*exp(f);
integrand = exp(ldconst ...
+yy.*(log(mu)-log(r+mu))+r.*(log(r)-log(r+mu)) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_negbin_norm(f)
% log(Negative-binomial * Gaussian)
% log_negbin_norm is used to avoid underflow when searching
% integration interval
mu = avgE.*exp(f);
log_int = ldconst...
+yy.*(log(mu)-log(r+mu))+r.*(log(r)-log(r+mu))...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_negbin_norm_g(f)
% d/df log(Negative-binomial * Gaussian)
% derivative of log_negbin_norm
mu = avgE.*exp(f);
g = -(r.*(mu - yy))./(mu.*(mu + r)).*mu ...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_negbin_norm_g2(f)
% d^2/df^2 log(Negative-binomial * Gaussian)
% second derivate of log_negbin_norm
mu = avgE.*exp(f);
g2 = -(r*(r + yy))/(mu + r)^2.*mu ...
-1/sigm2_i;
end
end
function mu = lik_negbin_invlink(lik, f, z)
%LIK_NEGBIN_INVLINK Returns values of inverse link function
%
% Description
% MU = LIK_NEGBIN_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_NEGBIN_LL, LIK_NEGBIN_PREDY
mu = z.*exp(f);
end
function reclik = lik_negbin_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_NEGBIN_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Negbin';
% Initialize parameter
reclik.disper = [];
% Set the function handles
reclik.fh.pak = @lik_negbin_pak;
reclik.fh.unpak = @lik_negbin_unpak;
reclik.fh.lp = @lik_negbin_lp;
reclik.fh.lpg = @lik_negbin_lpg;
reclik.fh.ll = @lik_negbin_ll;
reclik.fh.llg = @lik_negbin_llg;
reclik.fh.llg2 = @lik_negbin_llg2;
reclik.fh.llg3 = @lik_negbin_llg3;
reclik.fh.tiltedMoments = @lik_negbin_tiltedMoments;
reclik.fh.predy = @lik_negbin_predy;
reclik.fh.predprcty = @lik_negbin_predprcty;
reclik.fh.invlink = @lik_negbin_invlink;
reclik.fh.recappend = @lik_negbin_recappend;
reclik.p=[];
reclik.p.disper=[];
if ~isempty(ri.p.disper)
reclik.p.disper = ri.p.disper;
end
else
% Append to the record
reclik.disper(ri,:)=lik.disper;
if ~isempty(lik.p)
reclik.p.disper = lik.p.disper.fh.recappend(reclik.p.disper, ri, lik.p.disper);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_cat.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_cat.m
| 11,705 |
UNKNOWN
|
78197ff0860b39f108847aa8abae1847
|
function gpcf = gpcf_cat(varargin)
%GPCF_CAT Create a categorical covariance function
%
% Description
% GPCF = GPCF_CAT('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a categorical covariance function structure in
% which the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_CAT(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Categorical covariance function returns correlation 1 if input
% values X_i and X_j are equal and 0 otherwise.
%
% Parameters for categorical covariance function
% selectedVariables - vector defining which inputs are used
%
% See also
% GP_SET, GPCF_*, PRIOR_*, MEAN_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2008-2010 Jaakko Riihim�ki
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_CAT';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('selectedVariables',[], ...
@(x) isempty(x) || (isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Initialize a covariance function structure
init=true;
gpcf.type = 'gpcf_cat';
else
% Modify a covariance function structure
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_cat')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
elseif isfield(gpcf,'selectedVariables')
gpcf=rmfield(gpcf,'selectedVariables');
end
end
if init
gpcf.fh.pak = @gpcf_cat_pak;
gpcf.fh.unpak = @gpcf_cat_unpak;
gpcf.fh.lp = @gpcf_cat_lp;
gpcf.fh.lpg = @gpcf_cat_lpg;
gpcf.fh.cfg = @gpcf_cat_cfg;
gpcf.fh.ginput = @gpcf_cat_ginput;
gpcf.fh.cov = @gpcf_cat_cov;
gpcf.fh.trcov = @gpcf_cat_trcov;
gpcf.fh.trvar = @gpcf_cat_trvar;
gpcf.fh.recappend = @gpcf_cat_recappend;
end
end
function [w,s] = gpcf_cat_pak(gpcf, w)
%GPCF_CAT_PAK Combine GP covariance function parameters into
% one vector.
%
% Description
% W = GPCF_CAT_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = []
%
% See also
% GPCF_CAT_UNPAK
w = []; s = {};
end
function [gpcf, w] = gpcf_cat_unpak(gpcf, w)
%GPCF_CAT_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_CAT_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance parameters have been set
% to the values in W. Deletes the values set to GPCF from W
% and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = []
%
% See also
% GPCF_CAT_PAK
end
function lp = gpcf_cat_lp(gpcf)
%GPCF_CAT_LP Evaluate the energy of prior of covariance function parameters
%
% Description
% LP = GPCF_CAT_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_CAT_PAK, GPCF_CAT_UNPAK, GPCF_CAT_LPG, GP_E
lp = 0;
end
function lpg = gpcf_cat_lpg(gpcf)
%GPCF_CAT_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_CAT_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_CAT_PAK, GPCF_CAT_UNPAK, GPCF_CAT_LP, GP_G
lpg = [];
end
function DKff = gpcf_cat_cfg(gpcf, x, x2, mask, i1)
%GPCF_CAT_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_CAT_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_CAT_CFG(GPCF, X, X2) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2) with
% respect to th (cell array with matrix elements). This subfunction
% is needed when using sparse approximations (e.g. FIC).
%
% DKff = GPCF_CAT_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_CAT_CFG(GPCF, X, X2, MASK, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_CAT_PAK, GPCF_CAT_UNPAK, GPCF_CAT_LP, GP_G
if nargin==5
% Use memory save option
if i1==0
% Return number of hyperparameters
DKff=0;
return
end
end
DKff = {};
end
function [DKff, lpg] = gpcf_cat_ginput(gpcf, x, x2, i1)
%GPCF_CAT_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_CAT_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_CAT_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_CAT_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_CAT_PAK, GPCF_CAT_UNPAK, GPCF_CAT_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n);
lpg(ii1) = 0;
end
end
elseif nargin == 3 || nargin == 4
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n, size(x2,1));
lpg(ii1) = 0;
end
end
end
end
function C = gpcf_cat_cov(gpcf, x1, x2, varargin)
%GP_CAT_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_CAT_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example
% in prediction and energy computations.
%
% See also
% GPCF_CAT_TRCOV, GPCF_CAT_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
C=repmat(true,n1,n2);
if isfield(gpcf, 'selectedVariables')
for j = 1:length(gpcf.selectedVariables)
jj=gpcf.selectedVariables(j);
C = C & bsxfun(@eq,x1(:,jj),x2(:,jj)');
end
else
for j = 1:m1
C = C & bsxfun(@eq,x1(:,j),x2(:,j)');
end
end
C=double(C);
end
function C = gpcf_cat_trcov(gpcf, x)
%GP_CAT_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_CAT_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_CAT_COV, GPCF_CAT_TRVAR, GP_COV, GP_TRCOV
[n,m]=size(x);
C=repmat(true,n,n);
if isfield(gpcf, 'selectedVariables')
for j = 1:length(gpcf.selectedVariables)
jj=gpcf.selectedVariables(j);
C = C & bsxfun(@eq,x(:,jj),x(:,jj)');
end
else
for j = 1:m
C = C & bsxfun(@eq,x(:,j),x(:,j)');
end
end
C=double(C);
end
function C = gpcf_cat_trvar(gpcf, x)
%GP_CAT_TRVAR Evaluate training variance vector
%
% Description
% C = GP_CAT_TRVAR(GPCF, TX) takes in covariance function of a
% Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy
% computations.
%
% See also
% GPCF_CAT_COV, GP_COV, GP_TRCOV
[n,m]=size(x);
C=ones(n,1);
end
function reccf = gpcf_cat_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_CAT_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_cat';
% Initialize parameters
reccf.coeffSigma2= [];
% Set the function handles
reccf.fh.pak = @gpcf_cat_pak;
reccf.fh.unpak = @gpcf_cat_unpak;
reccf.fh.lp = @gpcf_cat_lp;
reccf.fh.lpg = @gpcf_cat_lpg;
reccf.fh.cfg = @gpcf_cat_cfg;
reccf.fh.cov = @gpcf_cat_cov;
reccf.fh.trcov = @gpcf_cat_trcov;
reccf.fh.trvar = @gpcf_cat_trvar;
reccf.fh.recappend = @gpcf_cat_recappend;
else
% Append to the record
if isfield(gpcf, 'selectedVariables')
reccf.selectedVariables = gpcf.selectedVariables;
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gp_install.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_install.m
| 14,808 |
utf_8
|
f3429c304b3ddc097046417323d93093
|
function gp_install(suiteSparse)
% Matlab function to compile all the c-files to mex in the GPstuff/gp
% folder. The function is called from GPstuff/matlab_install.m but
% can be run separately also.
%
% If you want to use GPstuff without compactly supported (CS)
% covariance functions run as gp_install([]). If you want to use CS
% covariance functions read further.
%
% Some of the sparse GP functionalities in the toolbox require
% SuiteSparse toolbox by Tim Davis. First install SuiteSparse from:
% http://www.cise.ufl.edu/research/sparse/SuiteSparse/current/SuiteSparse/
%
% Note! Install also Metis 4.0.1 as mentioned under header "Other
% packages required:".
%
% After this, compile the c-files in GPstuff/gp as follows:
%
% Run gp_install( suitesparse_path ) in the present directory.
% Here suitesparse_path is a string telling the path to SuiteSparse
% package (for example, '/matlab/toolbox/SuiteSparse/'). Note! It is
% important that suitesparse_path is in right format. Include also
% the last '/' sign in it.
% Parts of the installation code are modified from the
% CHOLMOD/MATLAB/cholmod_install.m file in the SuiteSparse version 3.2.0.
% Copyright (c) 2006-2007, Timothy A. Davis
% Copyright (c) 2008-2010 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% Compile the 'dist_euclidean' mex-function
if (~isempty (strfind (computer, '64')))
% 64-bit MATLAB
mex -O -g -largeArrayDims -output private/dist_euclidean linuxCsource/dist_euclidean.c
else
mex -O -output private/dist_euclidean linuxCsource/dist_euclidean.c
end
if nargin<1 || isempty(suiteSparse)
% Compile without SuiteSparse.
% This means that compactly supported covariance functions can not be used.
% These are: gpcf_ppcs0, gpcf_ppcs1, gpcf_ppcs2, gpcf_ppcs3
% Compile the 'trcov' mex-function
if (~isempty (strfind (computer, '64')))
% 64-bit MATLAB
mex -O -g -largeArrayDims -output private/trcov linuxCsource/trcov.c
mex -O -g -largeArrayDims -output private/dist_euclidean linuxCsource/dist_euclidean.c
else
mex -O -output private/trcov linuxCsource/trcov.c
mex -O -output private/dist_euclidean linuxCsource/dist_euclidean.c
end
fprintf ('\n GP package succesfully compiled ') ;
fprintf ('\n without compactly supported covariance functions\n') ;
else
v = getversion ;
details = 0 ; % 1 if details of each command are to be printed
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
d = '' ;
if (~isempty (strfind (computer, '64')))
% 64-bit MATLAB
d = '-g -largeArrayDims' ;
% Compile the 'trcov' mex-function
mex -O -g -largeArrayDims -output trcov linuxCsource/trcov.c
mex -O -g -largeArrayDims -output ldlrowmodify linuxCsource/ldlrowmodify.c
if v >= 7.8
d = [d ' -DLONG -D''LONGBLAS=UF_long'''];
end
else
mex -O -output trcov linuxCsource/trcov.c
mex -O -output ldlrowmodify linuxCsource/ldlrowmodify.c
end
% Compile the 'spinv', 'ldlrowupdate' and 'ldlrowmodify' mex-functions
% This is awfully long since the functions need all the functionalities of SuiteSparse
include = '-I../../CHOLMOD/MATLAB -I../../AMD/Include -I../../COLAMD/Include -I../../CCOLAMD/Include -I../../CAMD/Include -I../Include -I../../UFconfig' ;
if (v < 7.0)
% do not attempt to compile CHOLMOD with large file support
include = [include ' -DNLARGEFILE'] ;
elseif (~pc)
% Linux/Unix require these flags for large file support
include = [include ' -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE'] ;
end
if (v < 6.5)
% logical class does not exist in MATLAB 6.1 or earlie
include = [include ' -DMATLAB6p1_OR_EARLIER'] ;
end
% Determine the METIS path, and whether or not METIS is available
% $$$ if (nargin == 0)
metis_path = '../../metis-4.0' ;
% $$$ end
% $$$ if (strcmp (metis_path, 'no metis'))
% $$$ metis_path = '' ;
% $$$ end
have_metis = (~isempty (metis_path)) ;
% fix the METIS 4.0.1 rename.h file
if (have_metis)
fprintf ('Compiling CHOLMOD with METIS on MATLAB Version %g\n', v) ;
f = fopen ('rename.h', 'w') ;
if (f == -1)
error ('unable to create rename.h in current directory') ;
end
fprintf (f, '/* do not edit this file; generated by cholmod_make */\n') ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#include "%s/Lib/rename.h"\n', metis_path) ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#define log2 METIS__log2\n') ;
fprintf (f, '#include "mex.h"\n') ;
fprintf (f, '#define malloc mxMalloc\n') ;
fprintf (f, '#define free mxFree\n') ;
fprintf (f, '#define calloc mxCalloc\n') ;
fprintf (f, '#define realloc mxRealloc\n') ;
fclose (f) ;
include = [include ' -I' metis_path '/Lib'] ;
else
fprintf ('Compiling CHOLMOD without METIS on MATLAB Version %g\n', v) ;
include = ['-DNPARTITION ' include] ;
end
%-------------------------------------------------------------------------------
% BLAS option
%-------------------------------------------------------------------------------
% This is exceedingly ugly. The MATLAB mex command needs to be told where to
% fine the LAPACK and BLAS libraries, which is a real portability nightmare.
if (pc)
if (v < 6.5)
% MATLAB 6.1 and earlier: use the version supplied here
lapack = 'lcc_lib/libmwlapack.lib' ;
elseif (v < 7.5)
lapack = 'libmwlapack.lib' ;
else
lapack = 'libmwlapack.lib libmwblas.lib' ;
% There seems to be something weird how Matlab forms the paths
% to lapack in Windows. If the above does not work try the
% below by changing the path to your own Matlab directory.
%lapack = 'C:\Program'' Files''\MATLAB\R2010a\extern\lib\win64\microsoft\libmwlapack.lib C:\Program'' Files''\MATLAB\R2010a\extern\lib\win64\microsoft\libmwblas.lib';
end
else
if (v < 7.5)
lapack = '-lmwlapack' ;
else
lapack = '-lmwlapack -lmwblas' ;
end
end
%-------------------------------------------------------------------------------
cholmod_path = [suiteSparse 'CHOLMOD/'];
include = strrep(include, '../../', suiteSparse);
include = strrep(include, '../', cholmod_path);
include = strrep (include, '/', filesep) ;
amd_src = { ...
'../../AMD/Source/amd_1', ...
'../../AMD/Source/amd_2', ...
'../../AMD/Source/amd_aat', ...
'../../AMD/Source/amd_control', ...
'../../AMD/Source/amd_defaults', ...
'../../AMD/Source/amd_dump', ...
'../../AMD/Source/amd_global', ...
'../../AMD/Source/amd_info', ...
'../../AMD/Source/amd_order', ...
'../../AMD/Source/amd_postorder', ...
'../../AMD/Source/amd_post_tree', ...
'../../AMD/Source/amd_preprocess', ...
'../../AMD/Source/amd_valid' } ;
camd_src = { ...
'../../CAMD/Source/camd_1', ...
'../../CAMD/Source/camd_2', ...
'../../CAMD/Source/camd_aat', ...
'../../CAMD/Source/camd_control', ...
'../../CAMD/Source/camd_defaults', ...
'../../CAMD/Source/camd_dump', ...
'../../CAMD/Source/camd_global', ...
'../../CAMD/Source/camd_info', ...
'../../CAMD/Source/camd_order', ...
'../../CAMD/Source/camd_postorder', ...
'../../CAMD/Source/camd_preprocess', ...
'../../CAMD/Source/camd_valid' } ;
colamd_src = {
'../../COLAMD/Source/colamd', ...
'../../COLAMD/Source/colamd_global' } ;
ccolamd_src = {
'../../CCOLAMD/Source/ccolamd', ...
'../../CCOLAMD/Source/ccolamd_global' } ;
metis_src = {
'Lib/balance', ...
'Lib/bucketsort', ...
'Lib/ccgraph', ...
'Lib/coarsen', ...
'Lib/compress', ...
'Lib/debug', ...
'Lib/estmem', ...
'Lib/fm', ...
'Lib/fortran', ...
'Lib/frename', ...
'Lib/graph', ...
'Lib/initpart', ...
'Lib/kmetis', ...
'Lib/kvmetis', ...
'Lib/kwayfm', ...
'Lib/kwayrefine', ...
'Lib/kwayvolfm', ...
'Lib/kwayvolrefine', ...
'Lib/match', ...
'Lib/mbalance2', ...
'Lib/mbalance', ...
'Lib/mcoarsen', ...
'Lib/memory', ...
'Lib/mesh', ...
'Lib/meshpart', ...
'Lib/mfm2', ...
'Lib/mfm', ...
'Lib/mincover', ...
'Lib/minitpart2', ...
'Lib/minitpart', ...
'Lib/mkmetis', ...
'Lib/mkwayfmh', ...
'Lib/mkwayrefine', ...
'Lib/mmatch', ...
'Lib/mmd', ...
'Lib/mpmetis', ...
'Lib/mrefine2', ...
'Lib/mrefine', ...
'Lib/mutil', ...
'Lib/myqsort', ...
'Lib/ometis', ...
'Lib/parmetis', ...
'Lib/pmetis', ...
'Lib/pqueue', ...
'Lib/refine', ...
'Lib/separator', ...
'Lib/sfm', ...
'Lib/srefine', ...
'Lib/stat', ...
'Lib/subdomains', ...
'Lib/timing', ...
'Lib/util' } ;
for i = 1:length (metis_src)
metis_src {i} = [metis_path '/' metis_src{i}] ;
end
cholmod_matlab = { '../MATLAB/cholmod_matlab' } ;
cholmod_src = {
'../Core/cholmod_aat', ...
'../Core/cholmod_add', ...
'../Core/cholmod_band', ...
'../Core/cholmod_change_factor', ...
'../Core/cholmod_common', ...
'../Core/cholmod_complex', ...
'../Core/cholmod_copy', ...
'../Core/cholmod_dense', ...
'../Core/cholmod_error', ...
'../Core/cholmod_factor', ...
'../Core/cholmod_memory', ...
'../Core/cholmod_sparse', ...
'../Core/cholmod_transpose', ...
'../Core/cholmod_triplet', ...
'../Check/cholmod_check', ...
'../Check/cholmod_read', ...
'../Check/cholmod_write', ...
'../Cholesky/cholmod_amd', ...
'../Cholesky/cholmod_analyze', ...
'../Cholesky/cholmod_colamd', ...
'../Cholesky/cholmod_etree', ...
'../Cholesky/cholmod_factorize', ...
'../Cholesky/cholmod_postorder', ...
'../Cholesky/cholmod_rcond', ...
'../Cholesky/cholmod_resymbol', ...
'../Cholesky/cholmod_rowcolcounts', ...
'../Cholesky/cholmod_rowfac', ...
'../Cholesky/cholmod_solve', ...
'../Cholesky/cholmod_spsolve', ...
'../MatrixOps/cholmod_drop', ...
'../MatrixOps/cholmod_horzcat', ...
'../MatrixOps/cholmod_norm', ...
'../MatrixOps/cholmod_scale', ...
'../MatrixOps/cholmod_sdmult', ...
'../MatrixOps/cholmod_ssmult', ...
'../MatrixOps/cholmod_submatrix', ...
'../MatrixOps/cholmod_vertcat', ...
'../MatrixOps/cholmod_symmetry', ...
'../Modify/cholmod_rowadd', ...
'../Modify/cholmod_rowdel', ...
'../Modify/cholmod_updown', ...
'../Supernodal/cholmod_super_numeric', ...
'../Supernodal/cholmod_super_solve', ...
'../Supernodal/cholmod_super_symbolic', ...
'../Partition/cholmod_ccolamd', ...
'../Partition/cholmod_csymamd', ...
'../Partition/cholmod_camd', ...
'../Partition/cholmod_metis', ...
'../Partition/cholmod_nesdis' } ;
if (pc)
% Windows does not have drand48 and srand48, required by METIS. Use
% drand48 and srand48 in CHOLMOD/MATLAB/Windows/rand48.c instead.
obj_extension = '.obj' ;
cholmod_matlab = [cholmod_matlab {[cholmod_path 'MATLAB\Windows\rand48']}] ;
include = [include ' -I' cholmod_path '\MATLAB\Windows'] ;
else
obj_extension = '.o' ;
end
% compile each library source file
obj = '' ;
source = [amd_src colamd_src ccolamd_src camd_src cholmod_src cholmod_matlab] ;
if (have_metis)
source = [metis_src source] ;
end
source = strrep(source, '../../', suiteSparse);
source = strrep(source, '../', cholmod_path);
kk = 0 ;
for f = source
ff = strrep (f {1}, '/', filesep) ;
slash = strfind (ff, filesep) ;
if (isempty (slash))
slash = 1 ;
else
slash = slash (end) + 1 ;
end
o = ff (slash:end) ;
obj = [obj ' ' o obj_extension] ; %#ok
s = sprintf ('mex %s -DDLONG -O %s -c %s.c', d, include, ff) ;
kk = do_cmd (s, kk, details) ;
end
if pc
% compile mexFunctions
mex_src = 'winCsource\spinv';
s = sprintf ('mex %s -DDLONG -O %s %s.c', d, include, mex_src) ;
s = [s obj];
s = [s ' '];
s = [s lapack];
kk = do_cmd (s, kk, details) ;
%mex_src = 'linuxCsource/ldlrowupdate';
mex_src = 'winCsource\ldlrowupdate';
s = sprintf ('mex %s -DDLONG -O %s %s.c', d, include, mex_src) ;
s = [s obj];
s = [s ' '];
s = [s lapack];
kk = do_cmd (s, kk, details) ;
else
% compile mexFunctions
mex_src = 'linuxCsource/spinv';
s = sprintf ('mex %s -DDLONG -O %s %s.c', d, include, mex_src) ;
s = [s obj];
s = [s ' '];
s = [s lapack];
kk = do_cmd (s, kk, details) ;
%mex_src = 'linuxCsource/ldlrowupdate';
mex_src = 'linuxCsource/ldlrowupdate';
s = sprintf ('mex %s -DDLONG -O %s %s.c', d, include, mex_src) ;
s = [s obj];
s = [s ' '];
s = [s lapack];
kk = do_cmd (s, kk, details) ;
end
% clean up
s = ['delete ' obj] ;
do_cmd (s, kk, details) ;
fprintf ('\nGP package succesfully compiled \n') ;
end
%-------------------------------------------------------------------------------
function kk = do_cmd (s, kk, details)
%DO_CMD: evaluate a command, and either print it or print a "."
if (details)
fprintf ('%s\n', s) ;
else
if (mod (kk, 60) == 0)
fprintf ('\n') ;
end
kk = kk + 1 ;
fprintf ('.') ;
end
eval (s) ;
%-------------------------------------------------------------------------------
function v = getversion
% determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
lik_poisson.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_poisson.m
| 17,766 |
utf_8
|
9883c930a03527bfc0126d7e930f2a82
|
function lik = lik_poisson(varargin)
%LIK_POISSON Create a Poisson likelihood structure
%
% Description
% LIK = LIK_POISSON creates Poisson likelihood structure
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 Poisson(y_i|z_i*exp(f_i))
%
% where z is a vector of expected mean and f the latent value
% vector whose components are transformed to relative risk
% exp(f_i).
%
% When using the Poisson likelihood you need to give the vector
% z as an extra parameter to each function that requires y also.
% For example, you should call gpla_e as follows
% gpla_e(w, gp, x, y, 'z', z)
%
% See also
% GP_SET, LIK_*
%
% Copyright (c) 2006-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_POISSON';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Poisson';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Poisson')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_poisson_pak;
lik.fh.unpak = @lik_poisson_unpak;
lik.fh.ll = @lik_poisson_ll;
lik.fh.llg = @lik_poisson_llg;
lik.fh.llg2 = @lik_poisson_llg2;
lik.fh.llg3 = @lik_poisson_llg3;
lik.fh.tiltedMoments = @lik_poisson_tiltedMoments;
lik.fh.predy = @lik_poisson_predy;
lik.fh.invlink = @lik_poisson_invlink;
lik.fh.predprcty = @lik_poisson_predprcty;
lik.fh.recappend = @lik_poisson_recappend;
end
end
function [w,s] = lik_poisson_pak(lik)
%LIK_POISSON_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_POISSON_PAK(LIK) takes a likelihood structure LIK
% and returns an empty verctor W. If Poisson likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% See also
% LIK_NEGBIN_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_poisson_unpak(lik, w)
%LIK_POISSON_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_POISSON_UNPAK(W, LIK) Doesn't do anything.
%
% If Poisson likelihood had parameters this would extract them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
%
% See also
% LIK_POISSON_PAK, GP_UNPAK
lik=lik;
w=w;
end
function logLik = lik_poisson_ll(lik, y, f, z)
%LIK_POISSON_LL Log likelihood
%
% Description
% E = LIK_POISSON_LL(LIK, Y, F, Z) takes a likelihood data
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_POISSON_LLG, LIK_POISSON_LLG3, LIK_POISSON_LLG2, GPLA_E
if isempty(z)
error(['lik_poisson -> lik_poisson_ll: missing z!'...
'Poisson likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_poisson and gpla_e. ']);
end
lambda = z.*exp(f);
gamlny = gammaln(y+1);
logLik = sum(-lambda + y.*log(lambda) - gamlny);
end
function deriv = lik_poisson_llg(lik, y, f, param, z)
%LIK_POISSON_LLG Gradient of the log likelihood
%
% Description
% G = LIK_POISSON_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F. Returns the gradient of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% 'param' or 'latent'. This subfunction is needed when using
% Laplace approximation or MCMC for inference with non-Gaussian
% likelihoods.
%
% See also
% LIK_POISSON_LL, LIK_POISSON_LLG2, LIK_POISSON_LLG3, GPLA_E
if isempty(z)
error(['lik_poisson -> lik_poisson_llg: missing z!'...
'Poisson likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_poisson and gpla_e. ']);
end
switch param
case 'latent'
deriv = y - z.*exp(f);
end
end
function g2 = lik_poisson_llg2(lik, y, f, param, z)
%LIK_POISSON_LLG2 Second gradients of the log likelihood
%
% Description
% G2 = LIK_POISSON_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z,
% and latent values F. Returns the Hessian of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. G2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_POISSON_LL, LIK_POISSON_LLG, LIK_POISSON_LLG3, GPLA_E
if isempty(z)
error(['lik_poisson -> lik_poisson_llg2: missing z!'...
'Poisson likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_poisson and gpla_e. ']);
end
switch param
case 'latent'
g2 = -z.*exp(f);
end
end
function third_grad = lik_poisson_llg3(lik, y, f, param, z)
%LIK_POISSON_LLG3 Third gradients of the log likelihood
%
% Description
% G3 = LIK_POISSON_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F and returns the third gradients of the
% log likelihood with respect to PARAM. At the moment PARAM
% can be only 'latent'. G3 is a vector with third gradients.
% This subfunction is needed when using Laplace approximation
% for inference with non-Gaussian likelihoods.
%
% See also
% LIK_POISSON_LL, LIK_POISSON_LLG, LIK_POISSON_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_poisson -> lik_poisson_llg3: missing z!'...
'Poisson likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_poisson and gpla_e. ']);
end
switch param
case 'latent'
third_grad = - z.*exp(f);
end
end
function [logM_0, m_1, sigm2hati1] = lik_poisson_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_POISSON_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_POISSON_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, incedence counts
% Y, expected counts Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
% if isempty(z)
% error(['lik_poisson -> lik_poisson_tiltedMoments: missing z!'...
% 'Poisson likelihood needs the expected number of '...
% 'occurrences as an extra input z. See, for '...
% 'example, lik_poisson and gpla_e. ']);
% end
yy = y(i1);
avgE = z(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_poisson_norm(yy(i),myy_i(i),sigm2_i(i),avgE(i));
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_poisson_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_poisson_predy(lik, Ef, Varf, yt, zt)
%LIK_POISSON_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_POISSON_PREDY(LIK, EF, VARF YT, ZT)
% Returns also the predictive density of YT, that is
% p(yt | y,zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the incedence counts YT, expected counts ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_POISSON_PREDY(LIK, EF, VARF, YT, ZT)
% takes a likelihood structure LIK, posterior mean EF and
% posterior variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_poisson -> lik_poisson_predy: missing zt!'...
'Poisson likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_poisson and gpla_e. ']);
end
avgE = zt;
lpy = zeros(size(Ef));
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
if nargout > 1
% Evaluate Ey and Vary
for i1=1:length(Ef)
%%% With quadrature
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f) exp(log(avgE(i1))+f+norm_lpdf(f,myy_i,sigm_i));
Ey(i1) = quadgk(F,minf,maxf);
EVary(i1) = Ey(i1);
F3 = @(f) exp(2*log(avgE(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
end
Vary = EVary + VarEy;
end
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Poisson * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_poisson_norm(...
yt(i1),Ef(i1),Varf(i1),avgE(i1));
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function prctys = lik_poisson_predprcty(lik, Ef, Varf, zt, prcty)
%LIK_POISSON_PREDPRCTY Returns the percentiles of predictive density of y
%
% Description
% PRCTY = LIK_POISSON_PREDPRCTY(LIK, EF, VARF YT, ZT)
% Returns percentiles of the predictive density PY of YT, that is
% This requires also the succes counts YT, numbers of trials ZT. This
% subfunctions is needed when using function gp_predprcty.
%
% See also
% GP_PREDPCTY
if isempty(zt)
error(['lik_poisson -> lik_poisson_predprcty: missing z!'...
'Poisson likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_poisson and gpla_e. ']);
end
opt=optimset('TolX',.5,'Display','off');
nt=size(Ef,1);
prctys = zeros(nt,numel(prcty));
prcty=prcty/100;
for i1=1:nt
ci = sqrt(Varf(i1));
for i2=1:numel(prcty)
a=floor(fminbnd(@(a) (quadgk(@(f) poisscdf(a,zt(i1).*exp(f).*norm_pdf(f,Ef(i1),ci)),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,poissinv(prcty(i2),zt(i1).*exp(Ef(i1)-1.96*ci)),poissinv(prcty(i2),zt(i1).*exp(Ef(i1)+1.96*ci)),opt));
if quadgk(@(f) poisscdf(a,zt(i1).*exp(f)).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)<prcty(i2)
a=a+1;
end
prctys(i1,i2)=a;
end
end
end
function [df,minf,maxf] = init_poisson_norm(yy,myy_i,sigm2_i,avgE)
%INIT_POISSON_NORM
%
% Description
% Return function handle to a function evaluating Poisson *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_poisson. This
% subfunction is needed by subfunctions tiltedMoments and predy.
%
% See also
% LIK_POISSON_TILTEDMOMENTS, LIK_POISSON_PREDY
% avoid repetitive evaluation of constant part
ldconst = -gammaln(yy+1) - log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @poisson_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_poisson_norm;
ldg = @log_poisson_norm_g;
ldg2 = @log_poisson_norm_g2;
% Set the limits for integration
% Poisson likelihood is log-concave so the poisson_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the Poisson likelihood and Gaussian
mu=log(yy/avgE);
s2=1./(yy+1./sigm2_i);
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=3; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbin -> init_negbin_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbin -> init_negbin_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = poisson_norm(f)
% Poisson * Gaussian
mu = avgE.*exp(f);
integrand = exp(ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_poisson_norm(f)
% log(Poisson * Gaussian)
% log_poisson_norm is used to avoid underflow when searching
% integration interval
mu = avgE.*exp(f);
log_int = ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_poisson_norm_g(f)
% d/df log(Poisson * Gaussian)
% derivative of log_poisson_norm
mu = avgE.*exp(f);
g = -mu+yy...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_poisson_norm_g2(f)
% d^2/df^2 log(Poisson * Gaussian)
% second derivate of log_poisson_norm
mu = avgE.*exp(f);
g2 = -mu...
-1/sigm2_i;
end
end
function mu = lik_poisson_invlink(lik, f, z)
%LIK_POISSON_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_POISSON_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using gp_predprctmu.
%
% See also
% LIK_POISSON_LL, LIK_POISSON_PREDY
mu = z.*exp(f);
end
function reclik = lik_poisson_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_POISSON_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Poisson';
% Set the function handles
reclik.fh.pak = @lik_poisson_pak;
reclik.fh.unpak = @lik_poisson_unpak;
reclik.fh.ll = @lik_poisson_ll;
reclik.fh.llg = @lik_poisson_llg;
reclik.fh.llg2 = @lik_poisson_llg2;
reclik.fh.llg3 = @lik_poisson_llg3;
reclik.fh.tiltedMoments = @lik_poisson_tiltedMoments;
reclik.fh.predy = @lik_poisson_predy;
reclik.fh.invlink = @lik_poisson_invlink;
reclik.fh.predprcty = @lik_poisson_predprcty;
reclik.fh.recappend = @lik_poisson_recappend;
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_mask.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_mask.m
| 11,666 |
UNKNOWN
|
aac6a91c7e0fd4518d918b34507b986c
|
function gpcf = gpcf_mask(varargin)
%GPCF_MASK Create a mask covariance function
%
% Description
% GPCF = GPCF_MASK('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a mask covariance function structure in
% which the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_MASK(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Mask covariance function returns correlation 1 if input
% values X_i and X_j are both non zero and 0 otherwise.
%
% Parameters for mask covariance function
% selectedVariables - vector defining which inputs are used
%
% See also
% GP_SET, GPCF_*, PRIOR_*, MEAN_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2008-2010 Jaakko Riihim�ki
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_MASK';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('selectedVariables',[], ...
@(x) isempty(x) || (isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Initialize a covariance function structure
init=true;
gpcf.type = 'gpcf_mask';
else
% Modify a covariance function structure
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_mask')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
elseif isfield(gpcf,'selectedVariables')
gpcf=rmfield(gpcf,'selectedVariables');
end
end
if init
gpcf.fh.pak = @gpcf_mask_pak;
gpcf.fh.unpak = @gpcf_mask_unpak;
gpcf.fh.lp = @gpcf_mask_lp;
gpcf.fh.lpg = @gpcf_mask_lpg;
gpcf.fh.cfg = @gpcf_mask_cfg;
gpcf.fh.ginput = @gpcf_mask_ginput;
gpcf.fh.cov = @gpcf_mask_cov;
gpcf.fh.trcov = @gpcf_mask_trcov;
gpcf.fh.trvar = @gpcf_mask_trvar;
gpcf.fh.recappend = @gpcf_mask_recappend;
end
end
function [w,s] = gpcf_mask_pak(gpcf, w)
%GPCF_MASK_PAK Combine GP covariance function parameters into
% one vector.
%
% Description
% W = GPCF_MASK_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = []
%
% See also
% GPCF_MASK_UNPAK
w = []; s = {};
end
function [gpcf, w] = gpcf_mask_unpak(gpcf, w)
%GPCF_MASK_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_MASK_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance parameters have been set
% to the values in W. Deletes the values set to GPCF from W
% and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = []
%
% See also
% GPCF_MASK_PAK
end
function lp = gpcf_mask_lp(gpcf)
%GPCF_MASK_LP Evaluate the energy of prior of covariance function parameters
%
% Description
% LP = GPCF_MASK_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_MASK_PAK, GPCF_MASK_UNPAK, GPCF_MASK_LPG, GP_E
lp = 0;
end
function lpg = gpcf_mask_lpg(gpcf)
%GPCF_MASK_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_MASK_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_MASK_PAK, GPCF_MASK_UNPAK, GPCF_MASK_LP, GP_G
lpg = [];
end
function DKff = gpcf_mask_cfg(gpcf, x, x2, mask,i1)
%GPCF_MASK_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_MASK_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_MASK_CFG(GPCF, X, X2) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2) with
% respect to th (cell array with matrix elements). This subfunction
% is needed when using sparse approximations (e.g. FIC).
%
% DKff = GPCF_MASK_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_MASK_CFG(GPCF, X, X2, [], i) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2), or k(X,X)
% if X2 is empty, with respect to ith hyperparameter. This subfunction
% is needed when using memory save option in gp_set.
%
% See also
% GPCF_MASK_PAK, GPCF_MASK_UNPAK, GPCF_MASK_LP, GP_G
DKff = {};
end
function [DKff, lpg] = gpcf_mask_ginput(gpcf, x, x2, i1)
%GPCF_MASK_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_MASK_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_MASK_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MASK_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith covariate
% in X. This subfunction is needed when using memory saving option
% in gp_set.
%
% See also
% GPCF_MASK_PAK, GPCF_MASK_UNPAK, GPCF_MASK_LP, GP_G
[n, m] =size(x);
if nargin == 2 || isempty(x2)
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n);
lpg(ii1) = 0;
end
end
elseif nargin == 3 || nargin == 4
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n, size(x2,1));
lpg(ii1) = 0;
end
end
end
if nargin==4
DKff=DKff{1};
end
end
function C = gpcf_mask_cov(gpcf, x1, x2, varargin)
%GP_MASK_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_MASK_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_MASK_TRCOV, GPCF_MASK_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
C=repmat(true,n1,n2);
if isfield(gpcf, 'selectedVariables')
for j = 1:length(gpcf.selectedVariables)
jj=gpcf.selectedVariables(j);
C = C & bsxfun(@and,x1(:,jj)~=0,x2(:,jj)'~=0);
end
else
for j = 1:m1
C = C & bsxfun(@and,x1(:,j)~=0,x2(:,j)'~=0);
end
end
C=double(C);
end
function C = gpcf_mask_trcov(gpcf, x)
%GP_MASK_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_MASK_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction
% and energy computations.
%
% See also
% GPCF_MASK_COV, GPCF_MASK_TRVAR, GP_COV, GP_TRCOV
[n,m]=size(x);
C=repmat(true,n,n);
if isfield(gpcf, 'selectedVariables')
for j = 1:length(gpcf.selectedVariables)
jj=gpcf.selectedVariables(j);
C = C & bsxfun(@and,x(:,jj)~=0,x(:,jj)'~=0);
end
else
for j = 1:m
C = C & bsxfun(@and,x(:,j)~=0,x(:,j)'~=0);
end
end
C=double(C);
end
function C = gpcf_mask_trvar(gpcf, x)
%GP_MASK_TRVAR Evaluate training variance vector
%
% Description
% C = GP_MASK_TRVAR(GPCF, TX) takes in covariance function of a
% Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy
% computations.
%
% See also
% GPCF_MASK_COV, GP_COV, GP_TRCOV
[n,m]=size(x);
C=true(n,1);
if isfield(gpcf, 'selectedVariables')
for j = 1:length(gpcf.selectedVariables)
jj=gpcf.selectedVariables(j);
C = C & x(:,jj)~=0;
end
else
for j = 1:m
C = C & x(:,j)~=0;
end
end
C=double(C);
end
function reccf = gpcf_mask_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_MASK_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_mask';
% Initialize parameters
reccf.coeffSigma2= [];
% Set the function handles
reccf.fh.pak = @gpcf_mask_pak;
reccf.fh.unpak = @gpcf_mask_unpak;
reccf.fh.lp = @gpcf_mask_lp;
reccf.fh.lpg = @gpcf_mask_lpg;
reccf.fh.cfg = @gpcf_mask_cfg;
reccf.fh.cov = @gpcf_mask_cov;
reccf.fh.trcov = @gpcf_mask_trcov;
reccf.fh.trvar = @gpcf_mask_trvar;
reccf.fh.recappend = @gpcf_mask_recappend;
else
% Append to the record
if isfield(gpcf, 'selectedVariables')
reccf.selectedVariables = gpcf.selectedVariables;
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gp_e.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_e.m
| 19,648 |
utf_8
|
3ec15c96030544b6ae806bdd136d7b29
|
function [e, edata, eprior] = gp_e(w, gp, x, y, varargin)
%GP_E Evaluate the energy function (un-normalized negative
% log marginal posterior)
%
% Description
% E = GP_E(W, GP, X, Y, OPTIONS) takes a Gaussian process
% structure GP together with a matrix X of input vectors and a
% matrix Y of targets, and evaluates the energy function E. Each
% row of X corresponds to one input vector and each row of Y
% corresponds to one target vector.
%
% [E, EDATA, EPRIOR] = GP_E(W, GP, X, Y, OPTIONS) also returns
% the data and prior components of the total energy. EDATA is
% the negative marginal likelihood of the model.
%
% The energy is minus log posterior cost function:
% E = EDATA + EPRIOR
% = - log p(Y|X, th) - log p(th),
% where th represents the parameters (lengthScale,
% magnSigma2...), X is inputs and Y is observations (regression)
% or latent values (non-Gaussian likelihood).
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected
% value for ith case.
%
% See also
% GP_G, GPCF_*, GP_SET, GP_PAK, GP_UNPAK
%
% Copyright (c) 2006-2010 Jarno Vanhatalo
% Copyright (c) 2010-2011 Aki Vehtari
% Copyright (c) 2010 Heikki Peura
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if ~all(isfinite(w(:)));
% instead of stopping to error, return NaN
e=NaN;
edata = NaN;
eprior = NaN;
return;
end
if isfield(gp,'latent_method') && ~strcmp(gp.latent_method,'MCMC')
% use an inference specific method
fh_e = gp.fh.e;
switch nargout
case {0 1}
[e] = fh_e(w, gp, x, y, varargin{:});
case 2
[e, edata] = fh_e(w, gp, x, y, varargin{:});
case 3
[e, edata, eprior] = fh_e(w, gp, x, y, varargin{:});
end
if ~isreal(e)
warning('Energy is not real')
e=NaN;
end
return
end
ip=inputParser;
ip.FunctionName = 'GP_E';
ip.addRequired('w', @(x) isempty(x) || isvector(x) && isreal(x));
ip.addRequired('gp',@isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.parse(w, gp, x, y, varargin{:});
z=ip.Results.z;
gp=gp_unpak(gp, w);
ncf = length(gp.cf);
n=size(x,1);
multicf=false;
if isfield(gp.lik, 'nondiagW')
% Help parameters for likelihoods with non-diagonal Hessian
switch gp.lik.type
case {'LGP', 'LGPC'}
% Do nothing
case {'Softmax', 'Multinom'}
if size(y,1)~=size(x,1)
y=reshape(y,size(x,1),size(y,1)/size(x,1));
end
[n,nout]=size(y);
nl=[0 repmat(n,1,nout)];
nl=cumsum(nl);
otherwise
if ~isfield(gp.lik,'xtime') && size(y,1)~=size(x,1)
y=reshape(y,size(x,1),size(y,1)/size(x,1));
end
n=size(y,1);
nout=length(gp.comp_cf);
% Indices for looping over latent processes
if ~isfield(gp.lik, 'xtime')
nl=[0 repmat(n,1,nout)];
nl=cumsum(nl);
else
xtime=gp.lik.xtime;
ntime = size(xtime,1);
n=n-ntime;
nl=[0 ntime n];
nl=cumsum(nl);
end
end
if isfield(gp, 'comp_cf') % own covariance for each ouput component
multicf = true;
if length(gp.comp_cf) ~= nout
error('GP2_E: the number of component vectors in gp.comp_cf must be the same as number of outputs.')
end
end
end
% First Evaluate the data contribution to the error
switch gp.type
% ============================================================
% FULL GP (and compact support GP)
% ============================================================
case 'FULL' % A full GP
[K, C] = gp_trcov(gp, x);
% Are there specified mean functions
if ~isfield(gp,'meanf') % a zero mean function
if issparse(C) % compact support covariances are in use
[LD,notpositivedefinite] = ldlchol(C);
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
edata = 0.5*(n.*log(2*pi) + sum(log(diag(LD))) + y'*ldlsolve(LD,y));
else
if ~isfield(gp.lik, 'nondiagW') || ismember(gp.lik.type, {'LGP' 'LGPC'})
[L,notpositivedefinite] = chol(C,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
ws=warning('off','MATLAB:singularMatrix');
b=L\y;
warning(ws);
zc=sum(log(diag(L)));
else
b=zeros(nl(end),1);
zc=0;
y=y(:);
if multicf
for i1=1:nout
if i1==1 && isfield(gp.lik, 'xtime')
[tmp, C] = gp_trcov(gp, xtime, gp.comp_cf{i1});
else
[tmp, C] = gp_trcov(gp, x, gp.comp_cf{i1});
end
[L,notpositivedefinite]=chol(C,'lower');
if notpositivedefinite
[e, edata, eprior] = set_output_for_notpositivedefinite();
return
end
% b(:,i1) = L\y(:,i1);
b(nl(i1)+1:nl(i1+1)) = L\y(nl(i1)+1:nl(i1+1));
zc = zc + sum(log(diag(L)));
end
else
[tmp, C] = gp_trcov(gp, x);
[L,notpositivedefinite]=chol(C,'lower');
if notpositivedefinite
[e, edata, eprior] = set_output_for_notpositivedefinite();
return
end
for i1=1:nout
b(nl(i1)+1:nl(i1+1)) = L\y(nl(i1)+1:nl(i1+1));
zc = zc + sum(log(diag(L)));
end
end
end
edata = 0.5*n.*log(2*pi) + zc + 0.5*b'*b;
end
else
[H,b,B]=mean_prep(gp,x,[]);
if isempty(C)
L=1;
C=0;
logK=0;
KH=H';
elseif issparse(C)
[L,notpositivedefinite] = ldlchol(C);
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
logK = 0.5*sum(log(diag(L)));
KH = ldlsolve(L,H');
else
[L,notpositivedefinite] = chol(C,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
logK = sum(log(diag(L)));
KH = L'\(L\H');
end
A = B\eye(size(B)) + H*KH;
[LA, notpositivedefinite] = chol(A,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
M = H'*b-y;
% When using CS-covariance function use matrix inversion lemma to
% calculate the inverse -> faster computation
if issparse(C)
MNM = LA\(KH'*M);
MNM = M'*ldlsolve(L,M) - MNM'*MNM;
else
[LN, notpositivedefinite] = chol(C + H'*B*H,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
MNM = LN\M;
MNM = MNM'*MNM;
end
edata = 0.5*MNM + logK + sum(log(diag(chol(B)))) + sum(log(diag(LA))) + 0.5*n*log(2*pi);
% A = B\eye(size(B)) + H*KH;
% M = H'*b-y;
% [LN, notpositivedefinite] = chol(C + H'*B*H,'lower');
% if notpositivedefinite
% [edata, eprior, e] = set_output_for_notpositivedefinite;
% return
% end
% MNM = LN\M;
% MNM = MNM'*MNM;
%
% edata = 0.5*MNM + logK + 0.5*log(det(B)) + 0.5*log(det(A)) + 0.5*n*log(2*pi);
end
% ============================================================
% FIC
% ============================================================
case 'FIC'
% The equations in FIC are implemented as by Lawrence (2007)
% See also Snelson and Ghahramani (2006) and Vanhatalo and Vehtari (2007)
% First evaluate needed covariance matrices
% v defines that parameter is a vector
u = gp.X_u;
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % n x 1 vector
K_fu = gp_cov(gp, x, u); % n x m
K_uu = gp_trcov(gp, u); % m x m, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % m x n
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % n x 1, Vector of diagonal elements
% iLaKfu = diag(iLav)*K_fu = inv(La)*K_fu
iLaKfu = zeros(size(K_fu)); % f x u,
for i=1:n
iLaKfu(i,:) = K_fu(i,:)./Lav(i); % f x u
end
% The data contribution to the error is
% E = n/2*log(2*pi) + 0.5*log(det(Q_ff+La)) + 0.5*y'inv(Q_ff+La)*y
% = + 0.5*log(det(La)) + 0.5*trace(iLa*y*y') - 0.5*log(det(K_uu))
% + 0.5*log(det(A)) - 0.5*trace(inv(A)*iLaKfu'*y*y'*iLaKfu)
% First some help matrices...
% A = chol(K_uu+K_uf*inv(La)*K_fu))
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A,'upper');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% The actual error evaluation
% 0.5*log(det(K)) = sum(log(diag(L))), where L = chol(K). NOTE! chol(K) is upper triangular
b = (y'*iLaKfu)/A;
edata = sum(log(Lav)) + y'./Lav'*y - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A))) - b*b';
edata = .5*(edata + n*log(2*pi));
% ============================================================
% PIC
% ============================================================
case {'PIC' 'PIC_BLOCK'}
% First evaluate needed covariance matrices
% v defines that parameter is a vector
u = gp.X_u;
ind = gp.tr_index;
K_fu = gp_cov(gp, x, u); % n x m
K_uu = gp_trcov(gp, u); % m x m, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the blockdiag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f and B'*B = K_fu*K_uu*K_uf
iLaKfu = zeros(size(K_fu)); % f x u
edata = 0;
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
Labl{i} = Cbl_ff - Qbl_ff;
iLaKfu(ind{i},:) = Labl{i}\K_fu(ind{i},:);
[Ltmp, notpositivedefinite]=chol(Labl{i},'upper');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
edata = edata + 2*sum(log(diag(Ltmp))) + y(ind{i},:)'*(Labl{i}\y(ind{i},:));
end
% The data contribution to the error is
% E = n/2*log(2*pi) + 0.5*log(det(Q_ff+La)) + 0.5*y'inv(Q_ff+La)y
% First some help matrices...
% A = chol(K_uu+K_uf*inv(La)*K_fu))
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% The actual error evaluation
% 0.5*log(det(K)) = sum(log(diag(L))), where L = chol(K). NOTE! chol(K) is upper triangular
b = (y'*iLaKfu)*inv(A)';
edata = edata - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A))) - b*b';
edata = .5*(edata + n*log(2*pi));
% ============================================================
% CS+FIC
% ============================================================
case 'CS+FIC'
u = gp.X_u;
% Separate the FIC and CS covariance functions
cf_orig = gp.cf;
cf1 = {};
cf2 = {};
j = 1;
k = 1;
for i = 1:ncf
if ~isfield(gp.cf{i},'cs')
cf1{j} = gp.cf{i};
j = j + 1;
else
cf2{k} = gp.cf{i};
k = k + 1;
end
end
gp.cf = cf1;
% Evaluate the covariance matrices needed for FIC part
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % n x 1 vector
K_fu = gp_cov(gp, x, u); % n x m
K_uu = gp_trcov(gp, u); % m x m, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % m x n
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % n x 1, Vector of diagonal elements
% Evaluate the CS covariance matrix
gp.cf = cf2;
K_cs = gp_trcov(gp,x);
La = sparse(1:n,1:n,Lav,n,n) + K_cs;
gp.cf = cf_orig; % Set the original covariance functions in the GP structure
[LD, notpositivedefinite] = ldlchol(La);
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% iLaKfu = La\K_fu;
iLaKfu = ldlsolve(LD,K_fu);
edata = sum(log(diag(LD))) + y'*ldlsolve(LD,y);
% The data contribution to the error is
% E = n/2*log(2*pi) + 0.5*log(det(Q_ff+La)) + 0.5*y'inv(Q_ff+La)y
% First some help matrices...
% A = chol(K_uu+K_uf*inv(La)*K_fu))
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A,'upper');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% The actual error evaluation
% 0.5*log(det(K)) = sum(log(diag(L))), where L = chol(K). NOTE! chol(K) is upper triangular
%b = (y'*iLaKfu)*inv(A)';
b = (y'*iLaKfu)/A;
edata = edata - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A))) - b*b';
edata = .5*(edata + n*log(2*pi));
% ============================================================
% DTC/VAR
% ============================================================
case {'DTC' 'VAR' 'SOR'}
% Implementation of DTC varies only slightly from FIC: essentially, only
% Lav is defined differently. For equations, see e.g. Quinonero-Candela
% and Rasmussen. For VAR, a trace term is added to the DTC model, see
% Titsias (2009).
% First evaluate needed covariance matrices
% v defines that parameter is a vector
u = gp.X_u;
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % n x 1 vector
K_fu = gp_cov(gp, x, u); % n x m
K_uu = gp_trcov(gp, u); % m x m, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu';
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % m x n
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Kv_ff; % n x 1, Vector of diagonal elements
% iLaKfu = diag(iLav)*K_fu = inv(La)*K_fu
iLaKfu = zeros(size(K_fu)); % f x u,
for i=1:n
iLaKfu(i,:) = K_fu(i,:)./Lav(i); % f x u
end
% The data contribution to the error is
% E = n/2*log(2*pi) + 0.5*log(det(Q_ff+La)) + 0.5*t'inv(Q_ff+La)*t
% = + 0.5*log(det(La)) + 0.5*trace(iLa*t*t') - 0.5*log(det(K_uu))
% + 0.5*log(det(A)) - 0.5*trace(inv(A)*iLaKfu'*t*t'*iLaKfu)
% First some help matrices...
% A = chol(K_uu+K_uf*inv(La)*K_fu))
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
% The actual error evaluation
% 0.5*log(det(K)) = sum(log(diag(L))), where L = chol(K). NOTE! chol(K) is upper triangular
b = (y'*iLaKfu)/A;
edata = sum(log(Lav)) + y'./Lav'*y - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A))) - b*b';
edata = 0.5*(edata + n*log(2*pi));
if strcmp(gp.type, 'VAR')
edata = edata + 0.5*sum((Kv_ff-Qv_ff)./Lav);
end
%edata = edata - 0.5*sum((Kv_ff-Qv_ff)./Lav);% - sum(diag(B'*B),1)); %sum(B.^2,1)'
%sum(Qv_ff)
%K_ff=gp_trcov(gp,x);
%0.5*trace(K_ff-K_fu*inv(K_uu)*K_fu')
%0.5*trace(K_ff-B'*B)
% ============================================================
% SSGP
% ============================================================
case 'SSGP' % Predictions with sparse spectral sampling approximation for GP
% The approximation is proposed by M. Lazaro-Gredilla,
% J. Quinonero-Candela and A. Figueiras-Vidal
% in Microsoft Research technical report MSR-TR-2007-152 (November 2007)
% NOTE! This does not work at the moment.
[Phi, S] = gp_trcov(gp, x);
m = size(Phi,2);
A = eye(m,m) + Phi'*(S\Phi);
[A, notpositivedefinite] = chol(A,'lower');
if notpositivedefinite
[edata, eprior, e] = set_output_for_notpositivedefinite;
return
end
b = (y'/S*Phi)/A';
edata = 0.5*n.*log(2*pi) + 0.5*sum(log(diag(S))) + sum(log(diag(A))) + 0.5*y'*(S\y) - 0.5*b*b';
otherwise
error('Unknown type of Gaussian process!')
end
% ============================================================
% Evaluate the prior contribution to the error from covariance functions
% ============================================================
eprior = 0;
if ~isempty(strfind(gp.infer_params, 'covariance'))
for i=1:ncf
gpcf = gp.cf{i};
eprior = eprior - gpcf.fh.lp(gpcf);
end
end
% ============================================================
% Evaluate the prior contribution to the error from Gaussian likelihood
% ============================================================
if ~isempty(strfind(gp.infer_params, 'likelihood')) && isfield(gp.lik.fh,'trcov') && isfield(gp.lik, 'p')
% a Gaussian likelihood
lik = gp.lik;
eprior = eprior - lik.fh.lp(lik);
end
% ============================================================
% Evaluate the prior contribution to the error from the inducing inputs
% ============================================================
if ~isempty(strfind(gp.infer_params, 'inducing'))
if isfield(gp, 'p') && isfield(gp.p, 'X_u') && ~isempty(gp.p.X_u)
for i = 1:size(gp.X_u,1)
if iscell(gp.p.X_u) % Own prior for each inducing input
pr = gp.p.X_u{i};
eprior = eprior - pr.fh.lp(gp.X_u(i,:), pr);
else
eprior = eprior - gp.p.X_u.fh.lp(gp.X_u(i,:), gp.p.X_u);
end
end
end
end
% ============================================================
% Evaluate the prior contribution to the error from mean functions
% ============================================================
if ~isempty(strfind(gp.infer_params, 'mean'))
for i=1:length(gp.meanf)
gpmf = gp.meanf{i};
eprior = eprior - gpmf.fh.lp(gpmf);
end
end
e = edata + eprior;
function [edata, eprior, e] = set_output_for_notpositivedefinite()
%instead of stopping to chol error, return NaN
edata = NaN;
eprior= NaN;
e = NaN;
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_multinom.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_multinom.m
| 10,705 |
UNKNOWN
|
e987daaabebb5e9ffcb11120d36bb4f4
|
function lik = lik_multinom(varargin)
%LIK_MULTINOM Create a multinom likelihood structure
%
% Description
% LIK = LIK_MULTINOM creates multinom likelihood for multi-class
% count data. The observed numbers in each class with C classes is given
% as 1xC vector.
%
% The likelihood is defined as follows:
% __ n __ C
% p(y|f^1, ..., f^C, z) = || i=1 [ gamma(N+1) || c=1 p_i^c^(y_i^c)/gamma(y_i^c+1)]
%
% where p_i^c = exp(f_i^c)/ (sum_c=1^C exp(f_i^c)) is the succes
% probability for class c, which is a function of the latent variable
% f_i^c for the corresponding class and N=sum(y) is the number of trials.
%
% See also
% GP_SET, LIK_*
% Copyright (c) 2010 Jaakko Riihim�ki, Pasi Jyl�nki
% Copyright (c) 2010 Aki Vehtari
% Copyright (c) 2010 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_MULTINOM';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Multinom';
lik.nondiagW = true;
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Multinom')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_multinom_pak;
lik.fh.unpak = @lik_multinom_unpak;
lik.fh.ll = @lik_multinom_ll;
lik.fh.llg = @lik_multinom_llg;
lik.fh.llg2 = @lik_multinom_llg2;
lik.fh.llg3 = @lik_multinom_llg3;
lik.fh.predy = @lik_multinom_predy;
lik.fh.invlink = @lik_multinom_invlink;
lik.fh.recappend = @lik_multinom_recappend;
end
end
function [w,s] = lik_multinom_pak(lik)
%LIK_MULTINOM_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_MULTINOM_PAK(LIK) takes a likelihood structure LIK and
% returns an empty verctor W. If Multinom likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
%
% See also
% LIK_MULTINOM_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_multinom_unpak(lik, w)
%LIK_MULTINOM_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_MULTINOM_UNPAK(W, LIK) Doesn't do anything.
%
% If Multinom likelihood had parameters this would extracts them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
%
% See also
% LIK_MULTINOM_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_multinom_ll(lik, y, f, z)
%LIK_MULTINOM_LL Log likelihood
%
% Description
% LL = LIK_MULTINOM_LL(LIK, Y, F) takes a likelihood structure
% LIK, class counts Y (NxC matrix), and latent values F (NxC
% matrix). Returns the log likelihood, log p(y|f,z). This
% subfunction is needed when using Laplace approximation or
% MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria
% (DIC, WAIC) computations.
%
% See also
% LIK_MULTINOM_LLG, LIK_MULTINOM_LLG3, LIK_MULTINOM_LLG2, GPLA_E
f=reshape(f,size(y));
expf = exp(f);
p = expf ./ repmat(sum(expf,2),1,size(expf,2));
N = sum(y,2);
ll = sum(gammaln(N+1) - sum(gammaln(y+1),2) + sum(y.*log(p),2) );
end
function llg = lik_multinom_llg(lik, y, f, param, z)
%LIK_MULTINOM_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_MULTINOM_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the gradient of the log likelihood with respect to PARAM. At
% the moment PARAM can be 'param' or 'latent'. This subfunction
% is needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_MULTINOM_LL, LIK_MULTINOM_LLG2, LIK_MULTINOM_LLG3, GPLA_E
f=reshape(f,size(y));
C = size(y,2);
expf2 = exp(f);
N=sum(y, 2);
pi2 = (N*ones(1,C)).*expf2./(sum(expf2, 2)*ones(1,C));
pi_vec=pi2(:);
llg = y(:)-pi_vec;
end
function [pi_vec, pi_mat] = lik_multinom_llg2(lik, y, f, param, z)
%LIK_MULTINOM_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_MULTINOM_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the Hessian of the log likelihood with respect to PARAM. At
% the moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace
% approximation or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_MULTINOM_LL, LIK_MULTINOM_LLG, LIK_MULTINOM_LLG3, GPLA_E
% multinom:
[n,nout]=size(y);
N = sum(y,2)*ones(1,nout);
f=reshape(f,n,nout);
expf2 = exp(f);
pi2 = expf2./(sum(expf2, 2)*ones(1,nout));
pi_vec=pi2(:).*N(:);
pi_mat=zeros(nout*n, n);
for i1=1:nout
pi_mat((1+(i1-1)*n):(nout*n+1):end)=pi2(:,i1).*sqrt(N(:,i1));
end
% D = diag(pi_vec);
% llg2 = -D + pi_mat*pi_mat';
end
function [dw_mat] = lik_multinom_llg3(lik, y, f, param, z)
%LIK_MULTINOM_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_MULTINOM_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be only 'latent'.
% LLG3 is a vector with third gradients. This subfunction is
% needed when using Laplace approximation for inference with
% non-Gaussian likelihoods.
%
% See also
% LIK_MULTINOM_LL, LIK_MULTINOM_LLG, LIK_MULTINOM_LLG2, GPLA_E, GPLA_G
[n,nout] = size(y);
f2 = reshape(f,n,nout);
N=sum(y, 2);
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,nout));
pi_vec=pi2(:);
dw_mat=zeros(nout,nout,nout,n);
for cc3=1:nout
for ii1=1:n
pic=pi_vec(ii1:n:(nout*n));
for cc1=1:nout
for cc2=1:nout
% multinom third derivatives
cc_sum_tmp=0;
if cc1==cc2 && cc1==cc3 && cc2==cc3
cc_sum_tmp=cc_sum_tmp+pic(cc1);
end
if cc1==cc2
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc3);
end
if cc2==cc3
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc2);
end
if cc1==cc3
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc2);
end
cc_sum_tmp=cc_sum_tmp+2*pic(cc1)*pic(cc2)*pic(cc3);
dw_mat(cc1,cc2,cc3,ii1)=cc_sum_tmp.*N(ii1);
end
end
end
end
end
function [lpy, Ey, Vary] = lik_multinom_predy(lik, Ef, Varf, yt, zt)
%LIK_MULTINOM_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_MULTINOM_PREDY(LIK, EF, VARF YT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | y) = \int p(yt | f) p(f|y) df.
% This requires also the incedence counts YT. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
% [LPY, EY, VARY] = LIK_MULTINOM_PREDY(LIK, EF, VARF, YT) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
N=sum(yt,2);
S=10000;
[ntest, nout]=size(yt);
pi=zeros(ntest,nout);
lpy=zeros(ntest,nout);
Ey=zeros(ntest,nout);
Vary=zeros(size(Varf));
Ef=reshape(Ef(:),ntest,nout);
[notused,notused,c] =size(Varf);
if c>1
mcmc=false;
else
mcmc=true;
Varf=reshape(Varf(:), ntest, nout);
end
for i1=1:ntest
if mcmc
Sigm_tmp = (Varf(i1,:));
f_star=bsxfun(@plus, Ef(i1,:), bsxfun(@times, sqrt(Sigm_tmp), ...
randn(S,nout)));
else
Sigm_tmp=(Varf(:,:,i1)'+Varf(:,:,i1))./2;
f_star=mvnrnd(Ef(i1,:), Sigm_tmp, S);
end
tmp = exp(f_star);
tmp = tmp./(sum(tmp, 2)*ones(1,size(tmp,2)));
if nargout > 1
Ey(i1,:) = N(i1).*mean(tmp);
for z1 = 1:nout;
for z2 = 1:nout
for z3=1:S
Var_tmp(:,:,z3) = (diag(tmp(z3,:)) - tmp(z3,:)'*tmp(z3,:));
end
if mcmc
Vary(i1+(0:nout-1)*ntest,:) = diag(N(i1).*mean(Var_tmp,3));
else
Vary(:,:,i1) = N(i1).*mean(Var_tmp,3);
end
end
end
end
lpy=[];
if ~isempty(yt)
ytmp = repmat(yt(i1,:),S,1);
lpy(i1,:) = log(mean( mnpdf(ytmp,tmp) ));
end
end
lpy=lpy(:);
Ey=Ey(:);
end
function p = lik_multinom_invlink(lik, f, z)
%LIK_MULTINOM_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_MULTINOM_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_MULTINOM_LL, LIK_MULTINOM_PREDY
p = multinominv(f).*z;
end
function reclik = lik_multinom_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_MULTINOM_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Multinom';
reclik.nondiagW = true;
% Set the function handles
reclik.fh.pak = @lik_multinom_pak;
reclik.fh.unpak = @lik_multinom_unpak;
reclik.fh.ll = @lik_multinom_ll;
reclik.fh.llg = @lik_multinom_llg;
reclik.fh.llg2 = @lik_multinom_llg2;
reclik.fh.llg3 = @lik_multinom_llg3;
reclik.fh.predy = @lik_multinom_predy;
reclik.fh.invlink = @lik_multinom_invlink;
reclik.fh.recappend = @lik_multinom_recappend;
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_binomial.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_binomial.m
| 18,897 |
UNKNOWN
|
a57c5a78ab9b3f5404f6da03ff6f88c6
|
function lik = lik_binomial(varargin)
%LIK_BINOMIAL Create a Binomial likelihood structure
%
% Description
% LIK = LIK_BINOMIAL creates Binomial likelihood structure.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ p_i^(y_i)*(1-p_i)^(z_i-y_i)) *
% gamma(z_i+1)/(gamma(y_i+1)*gamma(z_i-y_i+1))]
% where p_i = exp(f_i)/ (1+exp(f_i)) is the succes probability,
% which is a function of the latent variable f_i and z is a
% vector of numbers of trials.
%
% When using Binomial likelihood you need to give the vector z
% as an extra parameter to each function that requires y also.
% For example, you should call gpla_e as follows
% gpla_e(w, gp, x, y, 'z', z)
%
% See also
% GP_SET, LIK_*
%
% Copyright (c) 2009-2010 Jaakko Riihim�ki & Jarno Vanhatalo
% Copyright (c) 2010-2011 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_BINOMIAL';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Binomial';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Binomial')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_binomial_pak;
lik.fh.unpak = @lik_binomial_unpak;
lik.fh.ll = @lik_binomial_ll;
lik.fh.llg = @lik_binomial_llg;
lik.fh.llg2 = @lik_binomial_llg2;
lik.fh.llg3 = @lik_binomial_llg3;
lik.fh.tiltedMoments = @lik_binomial_tiltedMoments;
lik.fh.predy = @lik_binomial_predy;
lik.fh.predprcty = @lik_binomial_predprcty;
lik.fh.invlink = @lik_binomial_invlink;
lik.fh.recappend = @lik_binomial_recappend;
end
end
function [w,s] = lik_binomial_pak(lik)
%LIK_BINOMIAL_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_BINOMIAL_PAK(LIK) takes a likelihood structure LIK
% and returns an empty verctor W. If Binomial likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. likelih_negbin). This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% LIK_NEGBIN_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_binomial_unpak(lik, w)
%LIK_BINOMIAL_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_BINOMIAL_UNPAK(W, LIK) Doesn't do anything.
%
% If Binomial likelihood had parameters this would extracts
% them parameters from the vector W to the LIK structure.
% This is a mandatory subfunction used for example in energy
% and gradient computations.
%
% See also
% LIK_BINOMIAL_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_binomial_ll(lik, y, f, z)
%LIK_BINOMIAL_LL Log likelihood
%
% Description
% LL = LIK_BINOMIAL_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, succes counts Y, numbers of trials Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_BINOMIAL_LLG, LIK_BINOMIAL_LLG3, LIK_BINOMIAL_LLG2, GPLA_E
if isempty(z)
error(['lik_binomial -> lik_binomial_ll: missing z!'...
'Binomial likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_binomial and gpla_e. ']);
end
expf = exp(f);
p = expf ./ (1+expf);
N = z;
ll = sum(gammaln(N+1)-gammaln(y+1)-gammaln(N-y+1)+y.*log(p)+(N-y).*log(1-p));
end
function llg = lik_binomial_llg(lik, y, f, param, z)
%LIK_BINOMIAL_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_BINOMIAL_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, succes counts Y, numbers of trials Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian
% likelihoods.
%
% See also
% LIK_BINOMIAL_LL, LIK_BINOMIAL_LLG2, LIK_BINOMIAL_LLG3, GPLA_E
if isempty(z)
error(['lik_binomial -> lik_binomial_llg: missing z!'...
'Binomial likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_binomial and gpla_e. ']);
end
switch param
case 'latent'
expf = exp(f);
N = z;
llg = y./(1+expf) - (N-y).*expf./(1+expf);
end
end
function llg2 = lik_binomial_llg2(lik, y, f, param, z)
%LIK_BINOMIAL_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_BINOMIAL_LLG2(LIK, Y, F, PARAM) takes a
% likelihood structure LIK, succes counts Y, numbers of trials
% Z, and latent values F. Returns the Hessian of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. G2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_BINOMIAL_LL, LIK_BINOMIAL_LLG, LIK_BINOMIAL_LLG3, GPLA_E
if isempty(z)
error(['lik_binomial -> lik_binomial_llg2: missing z!'...
'Binomial likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_binomial and gpla_e. ']);
end
switch param
case 'latent'
expf = exp(f);
N = z;
llg2 = -N.*expf./(1+expf).^2;
end
end
function llg3 = lik_binomial_llg3(lik, y, f, param, z)
%LIK_BINOMIAL_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_BINOMIAL_LLG3(LIK, Y, F, PARAM) takes a
% likelihood structure LIK, succes counts Y, numbers of trials
% Z and latent values F and returns the third gradients of the
% log likelihood with respect to PARAM. At the moment PARAM
% can be only 'latent'. G3 is a vector with third gradients.
% This subfunction is needed when using Laplace appoximation
% for inference with non-Gaussian likelihoods.
%
% See also
% LIK_BINOMIAL_LL, LIK_BINOMIAL_LLG, LIK_BINOMIAL_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_binomial -> lik_binomial_llg3: missing z!'...
'Binomial likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_binomial and gpla_e. ']);
end
switch param
case 'latent'
expf = exp(f);
N = z;
llg3 = N.*(expf.*(expf-1))./(1+expf).^3;
end
end
function [logM_0, m_1, sigm2hati1] = lik_binomial_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_BINOMIAL_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_BINOMIAL_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, succes counts Y,
% numbers of trials Z, index I and cavity variance S2 and mean
% MYY. Returns the zeroth moment M_0, mean M_1 and variance
% M_2 of the posterior marginal (see Rasmussen and Williams
% (2006): Gaussian processes for Machine Learning, page 55).
% This subfunction is needed when using EP for inference with
% non-Gaussian likelihoods.
%
% See also
% GPEP_E
% if isempty(z)
% error(['lik_binomial -> lik_binomial_tiltedMoments: missing z!'...
% 'Binomial likelihood needs the expected number of '...
% 'occurrences as an extra input z. See, for '...
% 'example, lik_binomial and gpla_e. ']);
% end
yy = y(i1);
N = z(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% Create function handle for the function to be integrated
% (likelihood * cavity) and useful integration limits
[tf,minf,maxf]=init_binomial_norm(yy(i),myy_i(i),sigm2_i(i),N(i));
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf,minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1 = m_2 - m_1(i).^2;
% if sigm2hati1 >= sigm2_i
% error('lik_binomial_tilted_moments: sigm2hati1 >= sigm2_i');
% end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_binomial_predy(lik, Ef, Varf, yt, zt)
%LIK_BINOMIAL_PREDY Returns the predictive mean, variance and density of y
%
% Description
% [LPY] = LIK_BINOMIAL_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | y, zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the succes counts YT, numbers of trials ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_BINOMIAL_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPEP_PRED, GPLA_PRED, GPMC_PRED
if isempty(zt)
error(['lik_binomial -> lik_binomial_predy: missing z!'...
'Binomial likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_binomial and gpla_e. ']);
end
if nargout > 1
nt=length(Ef);
Ey=zeros(nt,1);
EVary = zeros(nt,1);
VarEy = zeros(nt,1);
for i1=1:nt
ci = sqrt(Varf(i1));
F = @(x)zt(i1)./(1+exp(-x)).*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
Ey(i1) = quadgk(F,Ef(i1)-6*ci,Ef(i1)+6*ci);
F2 = @(x)zt(i1)./(1+exp(-x)).*(1-1./(1+exp(-x))).*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
EVary(i1) = quadgk(F2,Ef(i1)-6*ci,Ef(i1)+6*ci);
F3 = @(x)(zt(i1)./(1+exp(-x))).^2.*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
VarEy(i1) = quadgk(F3,Ef(i1)-6*ci,Ef(i1)+6*ci) - Ey(i1).^2;
end
Vary = EVary+VarEy;
end
nt=length(yt);
lpy=zeros(nt,1);
for i1=1:nt
ci = sqrt(Varf(i1));
F = @(x)exp(gammaln(zt(i1)+1)-gammaln(yt(i1)+1)-gammaln(zt(i1)-yt(i1)+1) + yt(i1).*log(1./(1+exp(-x))) + (zt(i1)-yt(i1)).*log(1-(1./(1+exp(-x))))).*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
lpy(i1) = log(quadgk(F,Ef(i1)-6*ci,Ef(i1)+6*ci));
end
end
function prctys = lik_binomial_predprcty(lik, Ef, Varf, zt, prcty)
%LIK_BINOMIAL_PREDPRCTY Returns the percentiled of predictive density of y
%
% Description
% PRCTY = LIK_BINOMIAL_PREDPRCTY(LIK, EF, VARF YT, ZT)
% Returns percentiles of the predictive density PY of YT, that is
% This requires also the succes counts YT, numbers of trials ZT. This
% subfunction is needed when using function gp_predprcty.
%
% See also
% GP_PREDPCTY
if isempty(zt)
error(['lik_binomial -> lik_binomial_predprcty: missing z!'...
'Binomial likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_binomial and gpla_e. ']);
end
opt=optimset('TolX',.5,'Display','off');
nt=size(Ef,1);
prctys = zeros(nt,numel(prcty));
prcty=prcty/100;
for i1=1:nt
ci = sqrt(Varf(i1));
for i2=1:numel(prcty)
a=floor(fminbnd(@(a) (quadgk(@(f) binocdf(a,zt(i1),logitinv(f)).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,binoinv(prcty(i2),zt(i1),logitinv(Ef(i1)-1.96*ci)),binoinv(prcty(i2),zt(i1),logitinv(Ef(i1)+1.96*ci)),opt));
if quadgk(@(f) binocdf(a,zt(i1),logitinv(f)).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)<prcty(i2)
a=a+1;
end
prctys(i1,i2)=a;
end
end
end
function [df,minf,maxf] = init_binomial_norm(yy,myy_i,sigm2_i,N)
%INIT_LOGIT_NORM
%
% Description
% Return function handle to a function evaluating Binomial *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_binomial. This
% subfunction is needed by subfunctions tiltedMoments and predy.
%
% See also
% LIK_BINOMIAL_TILTEDMOMENTS, LIK_BINOMIAL_PREDY
% avoid repetitive evaluation of constant part
ldconst = gammaln(N+1)-gammaln(yy+1)-gammaln(N-yy+1) - log(sigm2_i)/2 - log(2*pi)/2;
% ldconst = log(factorial(N)/(factorial(yy)*factorial(N-yy))-log(sigm2_i)/2 -log(2*pi)/2;
% Create function handle for the function to be integrated
df = @binomial_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_binomial_norm;
ldg = @log_binomial_norm_g;
ldg2 = @log_binomial_norm_g2;
% Set the limits for integration
% Binomial likelihood is log-concave so the binomial_norm
% function is unimodal, which makes things easier
if yy==0 || yy==N
% with yy==0 or yy==N the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation of the
% binomial likelihood and Gaussian
mean_app = log(yy./(N-yy));
ld0=1/(1+exp(-mean_app));
ld1=(1-ld0)*ld0;
ld2=ld0-3*ld0^2+2*ld0^3;
var_app=inv(-( yy*(ld2*ld0-ld1^2)/ld0^2 + (N-yy)*(ld2*(ld0-1)-ld1^2)/(ld0-1)^2 ));
modef = (myy_i/sigm2_i + mean_app/var_app)/(1/sigm2_i + 1/var_app);
% sigm_app = sqrt((1/sigm2_i + 1/var_app)^-1);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=3; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g = ldg(modef);
h = ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-4*modes;
maxf=modef+4*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=12; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbin -> init_negbin_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbin -> init_negbin_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = binomial_norm(f)
% Logit * Gaussian
integrand = exp(ldconst + yy*log(1./(1.+exp(-f)))+(N-yy)*log(1-1./(1.+exp(-f)))...
- 0.5 * (f-myy_i).^2./sigm2_i);
% integrand = exp(ldconst ...
% +yy*log(x)+(N-yy)*log(1-x) ...
% -0.5*(f-myy_i).^2./sigm2_i);
integrand(isnan(integrand))=0;
end
function log_int = log_binomial_norm(f)
% log(Binomial * Gaussian)
% log_binomial_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst + yy*log(1./(1.+exp(-f)))+(N-yy)*log(1-1./(1.+exp(-f)))...
- 0.5 * (f-myy_i).^2./sigm2_i;
% log_int = ldconst ...
% -log(1+exp(-yy.*f)) ...
% -0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_binomial_norm_g(f)
% d/df log(Binomial * Gaussian)
% derivative of log_logit_norm
g = -(f-myy_i)./sigm2_i - exp(-f).*(N-yy)./((1+exp(-f)).^2.*(1-1./(1+exp(-f)))) ...
+ exp(-f).*yy./(1+exp(-f));
% g = yy./(exp(f*yy)+1)...
% + (myy_i - f)./sigm2_i;
end
function g2 = log_binomial_norm_g2(f)
% d^2/df^2 log(Binomial * Gaussian)
% second derivate of log_logit_norm
g2 = - (1+exp(2.*f)+exp(f).*(2+N*sigm2_i)./((1+exp(f))^2*sigm2_i));
% a=exp(f*yy);
% g2 = -a*(yy./(a+1)).^2 ...
% -1/sigm2_i;
end
end
function p = lik_binomial_invlink(lik, f, z)
%LIK_BINOMIAL_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_BINOMIAL_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using gp_predprctmu.
%
% See also
% LIK_BINOMIAL_LL, LIK_BINOMIAL_PREDY
p = logitinv(f);
end
function reclik = lik_binomial_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_BINOMIAL_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Binomial';
% Set the function handles
reclik.fh.pak = @lik_binomial_pak;
reclik.fh.unpak = @lik_binomial_unpak;
reclik.fh.ll = @lik_binomial_ll;
reclik.fh.llg = @lik_binomial_llg;
reclik.fh.llg2 = @lik_binomial_llg2;
reclik.fh.llg3 = @lik_binomial_llg3;
reclik.fh.tiltedMoments = @lik_binomial_tiltedMoments;
reclik.fh.invlink = @lik_binomial_invlink;
reclik.fh.predprcty = @lik_binomial_predprcty;
reclik.fh.predy = @lik_binomial_predy;
reclik.fh.recappend = @likelih_binomial_recappend;
return
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_loglogistic.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_loglogistic.m
| 23,851 |
utf_8
|
488337e3c0403b611d2cf1246ceb8658
|
function lik = lik_loglogistic(varargin)
%LIK_LOGLOGISTIC Create a right censored log-logistic likelihood structure
%
% Description
% LIK = LIK_LOGLOGISTIC('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a likelihood structure for right censored log-logistic
% survival model in which the named parameters have the
% specified values. Any unspecified parameters are set to
% default values.
%
% LIK = LIK_LOGLOGISTIC(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for loggaussian likelihood [default]
% shape - shape parameter r [1]
% shape_prior - prior for shape [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ (r/exp(f_i)*(y_i/exp(f_i))^(r-1)/
% (1+(y_i/exp(f_i))^r))^(1-z_i)
% *(1+(y_i/exp(f_i))^r)^(-z_i) ]
%
%
% where r is the shape parameter of log-logistic distribution.
% z is a vector of censoring indicators with z = 0 for uncensored event
% and z = 1 for right censored event.
%
% When using the loggaussian likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2012 Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LOGLOGISTIC';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('shape',1, @(x) isscalar(x) && x>0);
ip.addParamValue('shape_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Log-Logistic';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Log-Logistic')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('shape',ip.UsingDefaults)
lik.shape = ip.Results.shape;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('shape_prior',ip.UsingDefaults)
lik.p.shape=ip.Results.shape_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_loglogistic_pak;
lik.fh.unpak = @lik_loglogistic_unpak;
lik.fh.lp = @lik_loglogistic_lp;
lik.fh.lpg = @lik_loglogistic_lpg;
lik.fh.ll = @lik_loglogistic_ll;
lik.fh.llg = @lik_loglogistic_llg;
lik.fh.llg2 = @lik_loglogistic_llg2;
lik.fh.llg3 = @lik_loglogistic_llg3;
lik.fh.tiltedMoments = @lik_loglogistic_tiltedMoments;
lik.fh.siteDeriv = @lik_loglogistic_siteDeriv;
lik.fh.invlink = @lik_loglogistic_invlink;
lik.fh.predy = @lik_loglogistic_predy;
lik.fh.recappend = @lik_loglogistic_recappend;
lik.fh.predcdf=@lik_loglogistic_predcdf;
end
end
function [w,s] = lik_loglogistic_pak(lik)
%LIK_LOGLOGISTIC_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LOGLOGISTIC_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.shape)
%
% See also
% LIK_LOGLOGISTIC_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.shape)
w = log(lik.shape);
s = [s; 'log(loglogistic.shape)'];
[wh sh] = lik.p.shape.fh.pak(lik.p.shape);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_loglogistic_unpak(lik, w)
%LIK_LOGLOGISTIC_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_LOGLOGISTIC_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.shape)
%
% See also
% LIK_LOGLOGISTIC_PAK, GP_UNPAK
if ~isempty(lik.p.shape)
lik.shape = exp(w(1));
w = w(2:end);
[p, w] = lik.p.shape.fh.unpak(lik.p.shape, w);
lik.p.shape = p;
end
end
function lp = lik_loglogistic_lp(lik, varargin)
%LIK_LOGLOGISTIC_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_LOGLOGISTIC_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This subfunction
% is needed when there are likelihood parameters.
%
% See also
% LIK_LOGLOGISTIC_LLG, LIK_LOGLOGISTIC_LLG3, LIK_LOGLOGISTIC_LLG2, GPLA_E
% If prior for shape parameter, add its contribution
lp=0;
if ~isempty(lik.p.shape)
lp = lik.p.shape.fh.lp(lik.shape, lik.p.shape) +log(lik.shape);
end
end
function lpg = lik_loglogistic_lpg(lik)
%LIK_LOGLOGISTIC_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_LOGLOGISTIC_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_LOGLOGISTIC_LLG, LIK_LOGLOGISTIC_LLG3, LIK_LOGLOGISTIC_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.shape)
% Evaluate the gprior with respect to shape
ggs = lik.p.shape.fh.lpg(lik.shape, lik.p.shape);
lpg = ggs(1).*lik.shape + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_loglogistic_ll(lik, y, f, z)
%LIK_LOGLOGISTIC_LL Log likelihood
%
% Description
% LL = LIK_LOGLOGISTIC_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_LOGLOGISTIC_LLG, LIK_LOGLOGISTIC_LLG3, LIK_LOGLOGISTIC_LLG2, GPLA_E
if isempty(z)
error(['lik_loglogistic -> lik_loglogistic_ll: missing z! '...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loglogistic and gpla_e. ']);
end
r = lik.shape;
ll = sum((1-z).*(log(r)+(r-1).*log(y)-r.*f) +(z-2).*log(1+(y./exp(f)).^r));
end
function llg = lik_loglogistic_llg(lik, y, f, param, z)
%LIK_LOGLOGISTIC_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_LOGLOGISTIC_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGLOGISTIC_LL, LIK_LOGLOGISTIC_LLG2, LIK_LOGLOGISTIC_LLG3, GPLA_E
if isempty(z)
error(['lik_loglogistic -> lik_loglogistic_llg: missing z! '...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loglogistic and gpla_e. ']);
end
r = lik.shape;
switch param
case 'param'
llg = sum((1-z).*(1/r+log(y)-f) + (z-2)./(1+(y./exp(f)).^r).* ...
(y./exp(f)).^r.*(log(y)-f));
% correction for the log transformation
llg = llg.*lik.shape;
case 'latent'
llg = -r.*(1-z) - (z-2).*r.*(y./exp(f)).^r./(1+(y./exp(f)).^r);
end
end
function llg2 = lik_loglogistic_llg2(lik, y, f, param, z)
%LIK_LOGLOGISTIC_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_LOGLOGISTIC_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGLOGISTIC_LL, LIK_LOGLOGISTIC_LLG, LIK_LOGLOGISTIC_LLG3, GPLA_E
if isempty(z)
error(['lik_loglogistic -> lik_loglogistic_llg2: missing z! '...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loglogistic and gpla_e. ']);
end
r = lik.shape;
switch param
case 'param'
case 'latent'
llg2 = r.^2.*(z-2).*(y./exp(f)).^r./(1+(y./exp(f)).^r).^2;
case 'latent+param'
llg2 = (z-1) - (z-2).*(y./exp(f)).^r./(1+(y./exp(f)).^r) ...
+ (z-2).*r.*(y./exp(f)).^(2*r).*(log(y)-f)./(1+(y./exp(f)).^r).^2 ...
- (z-2).*r.*(y./exp(f)).^r.*(log(y)-f)./(1+(y./exp(f)).^r);
% correction due to the log transformation
llg2 = llg2.*r;
end
end
function llg3 = lik_loglogistic_llg3(lik, y, f, param, z)
%LIK_LOGLOGISTIC_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_LOGLOGISTIC_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGLOGISTIC_LL, LIK_LOGLOGISTIC_LLG, LIK_LOGLOGISTIC_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_loglogistic -> lik_loglogistic_llg3: missing z! '...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loglogistic and gpla_e. ']);
end
r = lik.shape;
switch param
case 'param'
case 'latent'
llg3 = r.^3.*(z-2).*(y./exp(f)).^r.*(-1+(y./exp(f)).^r)./(1+(y./exp(f)).^r).^3;
case 'latent2+param'
llg3 = -(r.*(z-2).*(y./exp(f)).^r.*(-2-2.*(y./exp(f)).^r + ...
r.*(-1+(y./exp(f)).^r).*log(y./exp(f))))./(1+(y./exp(f)).^r).^3;
% correction due to the log transformation
llg3 = llg3.*lik.shape;
end
end
function [logM_0, m_1, sigm2hati1] = lik_loglogistic_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGLOGISTIC_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LOGLOGISTIC_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, survival times
% Y, censoring indicators Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
if isempty(z)
error(['lik_loglogistic -> lik_loglogistic_tiltedMoments: missing z!'...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loglogistic and gpep_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
r = lik.shape;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_loglogistic_norm(yy(i),myy_i(i),sigm2_i(i),yc(i),r);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_loglogistic_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_loglogistic_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGLOGISTIC_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_LOGLOGISTIC_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, survival times Y, expected
% counts Z, index I and cavity variance S2 and mean MYY.
% Returns E_f [d log p(y_i|f_i) /d a], where a is the
% likelihood parameter and the expectation is over the
% marginal posterior. This term is needed when evaluating the
% gradients of the marginal likelihood estimate Z_EP with
% respect to the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
if isempty(z)
error(['lik_loglogistic -> lik_loglogistic_siteDeriv: missing z!'...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loglogistic and gpla_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
r = lik.shape;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Log-Gaussian * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_loglogistic_norm(yy,myy_i,sigm2_i,yc,r);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*r;
function g = deriv(f)
g = yc.*(1/r+log(yy)-f) + (-1-yc)./(1+(yy./exp(f)).^r).* ...
(yy./exp(f)).^r.*(log(yy)-f);
end
end
function [lpy, Ey, Vary] = lik_loglogistic_predy(lik, Ef, Varf, yt, zt)
%LIK_LOGLOGISTIC_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LOGLOGISTIC_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the survival times YT, censoring indicators ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_LOGLOGISTIC_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_loglogistic -> lik_loglogistic_predy: missing zt!'...
'loglogistic likelihood needs the censoring '...
'indicators as an extra input zt. See, for '...
'example, lik_loglogistic and gpla_e. ']);
end
yc = 1-zt;
r = lik.shape;
Ey=[];
Vary=[];
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
if abs(Ef(i1))>700
lpy(i1) = NaN;
else
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Negative-binomial * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_loglogistic_norm(...
yt(i1),Ef(i1),Varf(i1),yc(i1),r);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
end
function [df,minf,maxf] = init_loglogistic_norm(yy,myy_i,sigm2_i,yc,r)
%INIT_LOGLOGISTIC_NORM
%
% Description
% Return function handle to a function evaluating
% loglogistic * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_loglogistic. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_LOGLOGISTIC_TILTEDMOMENTS, LIK_LOGLOGISTIC_SITEDERIV,
% LIK_LOGLOGISTIC_PREDY
% avoid repetitive evaluation of constant part
ldconst = yc.*log(r) + yc.*(r-1).*log(yy) ...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @loglogistic_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_loglogistic_norm;
ldg = @log_loglogistic_norm_g;
ldg2 = @log_loglogistic_norm_g2;
% Set the limits for integration
if yc==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the loglogistic likelihood and Gaussian
mu=-log(yc.^(1/r)./yy);
s2=-r^2.*yc.*(-1-yc)./(1+yc).^2;
% s2=1;
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=4; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_loglogistic -> init_loglogistic_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_loglogistic -> init_loglogistic_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = loglogistic_norm(f)
% loglogistic * Gaussian
integrand = exp(ldconst ...
- yc.*r.*f +(-1-yc).*log(1+(yy./exp(f)).^r) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_loglogistic_norm(f)
% log(loglogistic * Gaussian)
% log_loglogistic_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst ...
-yc.*r.*f +(-1-yc).*log(1+(yy./exp(f)).^r) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_loglogistic_norm_g(f)
% d/df log(loglogistic * Gaussian)
% derivative of log_loglogistic_norm
g = -r.*yc - (-1-yc).*r.*(yy./exp(f)).^r./(1+(yy./exp(f)).^r) ...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_loglogistic_norm_g2(f)
% d^2/df^2 log(loglogistic * Gaussian)
% second derivate of log_loglogistic_norm
g2 = r.^2.*(-1-yc).*(yy./exp(f)).^r./(1+(yy./exp(f)).^r).^2 ...
-1/sigm2_i;
end
end
function cdf = lik_loglogistic_predcdf(lik, Ef, Varf, yt)
%LIK_LOGLOGISTIC_PREDCDF Returns the predictive cdf evaluated at yt
%
% Description
% CDF = LIK_LOGLOGISTIC_PREDCDF(LIK, EF, VARF, YT)
% Returns the predictive cdf evaluated at YT given likelihood
% structure LIK, posterior mean EF and posterior Variance VARF
% of the latent variable. This subfunction is needed when using
% functions gp_predcdf or gp_kfcv_cdf.
%
% See also
% GP_PREDCDF
r = lik.shape;
% Evaluate the posterior predictive densities of the given observations
cdf = zeros(length(yt),1);
for i1=1:length(yt)
% Get a function handle of the likelihood times posterior
% (likelihood * posterior = log-logistic * Gaussian)
% and useful integration limits.
% yc=0 when evaluating predictive cdf
[pdf,minf,maxf]=init_loglogistic_norm(...
yt(i1),Ef(i1),Varf(i1),0,r);
% integrate over the f to get posterior predictive distribution
cdf(i1) = 1-quadgk(pdf, minf, maxf);
end
end
function p = lik_loglogistic_invlink(lik, f)
%LIK_loglogistic Returns values of inverse link function
%
% Description
% P = LIK_LOGLOGISTIC_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LOGLOGISTIC_LL, LIK_LOGLOGISTIC_PREDY
p = exp(f);
end
function reclik = lik_loglogistic_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_LOGLOGISTIC_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Log-Logistic';
% Initialize parameter
reclik.shape = [];
% Set the function handles
reclik.fh.pak = @lik_loglogistic_pak;
reclik.fh.unpak = @lik_loglogistic_unpak;
reclik.fh.lp = @lik_loglogistic_lp;
reclik.fh.lpg = @lik_loglogistic_lpg;
reclik.fh.ll = @lik_loglogistic_ll;
reclik.fh.llg = @lik_loglogistic_llg;
reclik.fh.llg2 = @lik_loglogistic_llg2;
reclik.fh.llg3 = @lik_loglogistic_llg3;
reclik.fh.tiltedMoments = @lik_loglogistic_tiltedMoments;
reclik.fh.invlink = @lik_loglogistic_invlink;
reclik.fh.predy = @lik_loglogistic_predy;
reclik.fh.predcdf=@lik_loglogistic_predcdf;
reclik.fh.recappend = @lik_loglogistic_recappend;
reclik.p=[];
reclik.p.shape=[];
if ~isempty(ri.p.shape)
reclik.p.shape = ri.p.shape;
end
else
% Append to the record
reclik.shape(ri,:)=lik.shape;
if ~isempty(lik.p)
reclik.p.shape = lik.p.shape.fh.recappend(reclik.p.shape, ri, lik.p.shape);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_sexp.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_sexp.m
| 39,532 |
utf_8
|
0ef6e85965ae3b707bb5d1d78df49bd4
|
function gpcf = gpcf_sexp(varargin)
%GPCF_SEXP Create a squared exponential covariance function
%
% Description
% GPCF = GPCF_SEXP('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates
% squared exponential covariance function structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_SEXP(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for squared exponential covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% inputParser checks the arguments and assigns some default values
ip=inputParser;
ip.FunctionName = 'GPCF_SEXP';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_sexp';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_sexp')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
gpcf.selectedVariables = ip.Results.selectedVariables;
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_sexp_pak;
gpcf.fh.unpak = @gpcf_sexp_unpak;
gpcf.fh.lp = @gpcf_sexp_lp;
gpcf.fh.lpg= @gpcf_sexp_lpg;
gpcf.fh.cfg = @gpcf_sexp_cfg;
gpcf.fh.cfdg = @gpcf_sexp_cfdg;
gpcf.fh.cfdg2 = @gpcf_sexp_cfdg2;
gpcf.fh.ginput = @gpcf_sexp_ginput;
gpcf.fh.ginput2 = @gpcf_sexp_ginput2;
gpcf.fh.ginput3 = @gpcf_sexp_ginput3;
gpcf.fh.ginput4 = @gpcf_sexp_ginput4;
gpcf.fh.cov = @gpcf_sexp_cov;
gpcf.fh.trcov = @gpcf_sexp_trcov;
gpcf.fh.trvar = @gpcf_sexp_trvar;
gpcf.fh.recappend = @gpcf_sexp_recappend;
end
end
function [w,s] = gpcf_sexp_pak(gpcf)
%GPCF_SEXP_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_SEXP_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_SEXP_UNPAK
w=[];s={};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(sexp.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(sexp.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(sexp.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_sexp_unpak(gpcf, w)
%GPCF_SEXP_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_SEXP_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance parameters have been set
% to the values in W. Deletes the values set to GPCF from W
% and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_SEXP_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_sexp_lp(gpcf)
%GPCF_SEXP_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_SEXP_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LPG, GP_LP
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_sexp_lpg(gpcf)
%GPCF_SEXP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SEXP_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used in gradient computations.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg = [lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function C = gpcf_sexp_cov(gpcf, x1, x2)
%GP_SEXP_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_SEXP_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_SEXP_TRCOV, GPCF_SEXP_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x1, x2).^2;
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*exp(-dist./2);
else
if isfield(gpcf,'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./gpcf.lengthScale.^2;
if m1==1 && m2==1
dd = bsxfun(@minus,x1,x2');
dist=dd.^2*s2;
else
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist=zeros(n1,n2);
for j=1:m1
dd = bsxfun(@minus,x1(:,j),x2(:,j)');
dist = dist + dd.^2.*s2(:,j);
end
end
dist(dist<eps) = 0;
C = ma2.*exp(-dist./2);
end
end
end
function C = gpcf_sexp_trcov(gpcf, x)
%GP_SEXP_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_SEXP_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX.
% This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_SEXP_COV, GPCF_SEXP_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
ma2 = gpcf.magnSigma2;
C = gpcf.metric.fh.dist(gpcf.metric, x).^2./2;
C = ma2.*exp(-C);
else
% If scaled euclidean metric
% Try to use the C-implementation
C = trcov(gpcf, x);
% C = NaN;
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
C = zeros(n,n);
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
C(col_ind,ii1) = d./2;
end
C(C<eps)=0;
C = C+C';
C = ma2.*exp(-C);
end
end
end
function C = gpcf_sexp_trvar(gpcf, x)
%GP_SEXP_TRVAR Evaluate training variance vector
%
% Description
% C = GP_SEXP_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
% See also
% GPCF_SEXP_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function DKff = gpcf_sexp_cfg(gpcf, x, x2, mask, i1)
%GPCF_SEXP_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_SEXP_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_SEXP_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using
% sparse approximations (e.g. FIC).
%
% DKff = GPCF_SEXP_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_SEXP_CFG(GPCF, X, X2, MASK, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
gpp=gpcf.p;
DKff = {};
if nargin == 5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_sexp_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -Cdm.*dist.*distg{i};
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic SEXP
s = 2./gpcf.lengthScale.^2;
dist = 0;
for i=1:m
D = bsxfun(@minus,x(:,i),x(:,i)');
dist = dist + D.^2;
end
D = Cdm.*s.*dist./2;
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
for i=i1
s = 2./gpcf.lengthScale(i).^2;
dist = bsxfun(@minus,x(:,i),x(:,i)');
D = Cdm.*s.*dist.^2./2;
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_sexp -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -K.*dist.*distg{i};
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% Evaluate help matrix for calculations of derivatives with respect
% to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic SEXP
s = 1./gpcf.lengthScale.^2;
dist = 0; dist2 = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
DK_l = s.*K.*dist;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
for i=i1
s = 1./gpcf.lengthScale(i).^2; % set the length
dist = bsxfun(@minus,x(:,i),x2(:,i)');
DK_l = s.*K.*dist.^2;
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_sexp_cfdg(gpcf, x)
%GPCF_SEXP_CFDG Evaluate gradient of covariance function, of
% which has been taken partial derivative with
% respect to x, with respect to parameters.
%
% Description
% DKff = GPCF_SEXP_CFDG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of derivatived covariance matrix
% dK(df,f)/dhyp = d(d k(X,X)/dx)/dhyp, with respect to the
% parameters
%
% Evaluate: DKff{1:m} = d Kff / d magnSigma2
% DKff{m+1:2m} = d Kff / d lengthScale_m
% m is the dimension of inputs. If ARD is used, then multiple
% lengthScales. This subfunction is needed when using derivative
% observations.
%
% See also
% GPCF_SEXP_GINPUT
[n, m] =size(x);
ii1=0;
Cdm = gpcf.fh.ginput4(gpcf, x);
% grad with respect to MAGNSIGMA
if ~isempty(gpcf.p.magnSigma2)
if m==1
ii1 = ii1 +1;
DKff{ii1} = Cdm{1};
else
DKffapu = cat(1,Cdm{1:m});
ii1=ii1+1;
DKff{ii1}=DKffapu;
end
end
% grad with respect to LENGTHSCALE
if isfield(gpcf,'metric')
error('Metric doesnt work with grad.obs')
else
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic SEXP
s = 1./gpcf.lengthScale.^2;
dist = 0;
for i=1:m
D = bsxfun(@minus,x(:,i),x(:,i)');
dist = dist + D.^2;
end
% input dimension is 1
if m==1
G = Cdm{1}.*(dist.*s - 2);
ii1 = ii1+1;
DKff{ii1} = G;
% input dimension is >1
else
for i=1:m
G{i} = 2.*Cdm{i}.*(dist.*s./2 - 1);
end
DKffapu=cat(1,G{1:m});
ii1 = ii1+1;
DKff{ii1} = DKffapu;
end
else
% In the case ARD is used
if m~=length(gpcf.lengthScale)
error('Amount of lengtscales dont match input dimension')
end
%Preparing
for i=1:m
dist{i} = bsxfun(@minus,x(:,i),x(:,i)').^2;
s(i) = 1./gpcf.lengthScale(i);
end
for i=1:m
for j=1:m
% if structure is to check: is x derivative different from lengthscale
% derivative
if j~=i
D{j}= Cdm{j}.*dist{i}.*s(i).^2;
else
D{j} = Cdm{i}.*(dist{i}.*s(i).^2 - 2);
end
end
ii1=ii1+1;
DKffapu2{i}=cat(1,D{1:m});
DKff{ii1}=DKffapu2{i};
end
end
end
end
end
function DKff = gpcf_sexp_cfdg2(gpcf, x)
%GPCF_SEXP_CFDG2 Evaluate gradient of covariance function, of
% which has been taken partial derivatives with
% respect to both input variables x, with respect
% to parameters.
%
% Description
% DKff = GPCF_SEXP_CFDG2(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of derivative covariance matrix
% dK(df,df)/dhyp = d(d^2 k(X1,X2)/dX1dX2)/dhyp with respect to
% the parameters
%
% Evaluate: DKff{1-m} = d Kff / d magnSigma2
% DKff{m+1-2m} = d Kff / d lengthScale_m
% m is the dimension of inputs. If ARD is used, then multiple
% lengthScales. This subfunction is needed when using derivative
% observations.
%
% See also
% GPCF_SEXP_GINPUT, GPCF_SEXP_GINPUT2
[n, m] =size(x);
DKff = {};
[DKdd, DKdd3, DKdd4] = gpcf.fh.ginput2(gpcf, x, x);
ii1=0;
if m>1
% Cross derivative matrices (non-diagonal).
DKdda=gpcf.fh.ginput3(gpcf, x,x);
%MAGNSIGMA
%add matrices to the diagonal of help matrix, size (m*n,m*n)
DKffapu=blkdiag(DKdd{:});
% add non-diagonal matrices
if m==2
DKffapund=[zeros(n,n) DKdda{1};DKdda{1} zeros(n,n)];
else
t1=1;
DKffapund=zeros(m*n,m*n);
for i=1:m-1
aa=zeros(m-1,m);
t2=t1+m-2-(i-1);
aa(m-1,i)=1;
k=kron(aa,cat(1,zeros((i)*n,n),DKdda{t1:t2}));
k(1:n*(m),:)=[];
k=k+k';
DKffapund = DKffapund + k;
t1=t2+1;
end
end
DKffapu=DKffapu+DKffapund;
end
% grad with respect to MAGNSIGMA
if ~isempty(gpcf.p.magnSigma2)
if m==1
ii1 = ii1 +1;
DKff{ii1} = DKdd{1};
else
ii1=ii1+1;
DKff{ii1}=DKffapu;
end
else
error('no prior set to magnSigma')
end
% grad with respect to LENGTHSCALE
% metric doesn't work with grad obs
if isfield(gpcf,'metric')
error('metric doesnt work with grad.obs')
else
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic SEXP
s = 1./gpcf.lengthScale;
dist = 0;
for i=1:m
D = bsxfun(@minus,x(:,i),x(:,i)');
dist = dist + D.^2;
end
if m==1
%diagonal matrices
ii1 = ii1+1;
DKff{ii1} = DKdd3{1}.*(dist.*s.^2 - 2)-DKdd4{1}.*(dist.*s.^2 - 4);
else
%diagonal matrices
for i=1:m
DKffdiag{i} = DKdd3{i}.*(dist.*s.^2 - 2) - DKdd4{i}.*(dist.*s.^2 - 4);
end
%nondiag.matrices
%how many pairs = num, m=2 -> 1 pair, m=3 -> 3pairs
% m=4 -> 6 pairs
num=1;
if m>2
for i=2:m-1
num=num+i;
end
end
for i=1:num
DKffnondiag{i} = DKdda{i}.*(dist.*s.^2-4);
end
% Gather matrices to diagonal
DKffapu2=blkdiag(DKffdiag{:});
% non-diagonal matrices
if m==2
DKffapu2nd=[zeros(n,n) DKffnondiag{1};DKffnondiag{1} zeros(n,n)];
else
t1=1;
DKffapu2nd=zeros(m*n,m*n);
for i=1:m-1
aa=zeros(m-1,m);
t2=t1+m-2-(i-1);
aa(m-1,i)=1;
k=kron(aa,cat(1,zeros((i)*n,n),DKffnondiag{t1:t2}));
k(1:n*(m),:)=[];
k=k+k';
DKffapu2nd = DKffapu2nd + k;
t1=t2+1;
end
end
ii1=ii1+1;
DKff{ii1}=DKffapu2+DKffapu2nd;
end
else
% In the case ARD is used
% Now lengthScale derivatives differ from the case where
% there's only one lengthScale, so here we take that to account
%Preparing, Di is diagonal help matrix and NDi
%is non-diagonal help matrix
for i=1:m
Di2{i}=zeros(n,n);
NDi{i}=zeros(m*n,m*n);
s(i) = 1./gpcf.lengthScale(i);
D = bsxfun(@minus,x(:,i),x(:,i)').*s(i);
dist{i} = D.^2;
end
% diagonal matrices for each lengthScale
for j=1:m
for i=1:m
% same x and lengthscale derivative
if i==j
Di2{i} = DKdd3{i}.*(dist{i} - 2) - DKdd4{i}.*(dist{i} - 4);
end
% different x and lengthscale derivative
if i~=j
Di2{i}=DKdd3{i}.*dist{j} - DKdd4{i}.*dist{j};
end
end
Di{j}=blkdiag(Di2{:});
end
%Non-diagonal matrices
if m==2
for k=1:2
Dnondiag=DKdda{1}.*(dist{k}-2);
NDi{k}=[zeros(n,n) Dnondiag;Dnondiag zeros(n,n)];
end
else
for k=1:m
ii3=0;
NDi{k}=zeros(m*n,m*n);
for j=0:m-2
for i=1+j:m-1
ii3=ii3+1;
sar=j*1+1;
riv=i+1;
% if lengthscale and either x derivate dimensions
% are same, else if not.
if sar==k || riv==k
Dnondiag{i}=DKdda{ii3}.*(dist{k}-2);
else
Dnondiag{i}=DKdda{ii3}.*dist{k};
end
end
aa=zeros(m-1,m);
aa(m-1,j+1)=1;
kk=kron(aa,cat(1,zeros((j+1)*n,n),Dnondiag{1+j:m-1}));
kk(1:n*(m),:)=[];
kk=kk+kk';
NDi{k} = NDi{k} + kk;
end
end
end
%and the final matrix is diag. + non-diag matrices
for i=1:m
ii1=ii1+1;
DKff{ii1}=NDi{i}+Di{i};
end
end
end
end
end
function DKff = gpcf_sexp_ginput(gpcf, x, x2, i1)
%GPCF_SEXP_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_SEXP_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_SEXP_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_SEXP_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
[n, m] =size(x);
ii1 = 0;
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
K = gpcf.fh.trcov(gpcf, x);
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*dist.*gdist{ii1};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic SEXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
for i=i1
for j = 1:n
DK = zeros(size(K));
DK(j,:) = -s(i).*bsxfun(@minus,x(j,i),x(:,i)');
DK = DK + DK';
DK = DK.*K; % dist2 = dist2 + dist2' - diag(diag(dist2));
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3 || nargin == 4
K = gpcf.fh.cov(gpcf, x, x2);
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*dist.*gdist{ii1};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic SEXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
for i=i1
for j = 1:n
DK= zeros(size(K));
DK(j,:) = -s(i).*bsxfun(@minus,x(j,i),x2(:,i)');
DK = DK.*K;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function [DKff, DKff1, DKff2] = gpcf_sexp_ginput2(gpcf, x, x2)
%GPCF_SEXP_GINPUT2 Evaluate gradient of covariance function with
% respect to both input variables x and x2 (in
% same dimension).
%
% Description
% DKff = GPCF_SEXP_GINPUT2(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of twice derivatived covariance
% matrix K(df,df) = dk(X1,X2)/dX1dX2 (cell array with matrix
% elements). Input variable's dimensions are expected to be
% same. The function returns also DKff1 and DKff2 which are
% parts of DKff and needed with CFDG2. DKff = DKff1 -
% DKff2. This subfunction is needed when using derivative
% observations.
%
% See also
% GPCF_SEXP_GINPUT, GPCF_SEXP_GINPUT2, GPCF_SEXP_CFDG2
[n, m] =size(x);
[n2,m2] =size(x2);
ii1 = 0;
if nargin ~= 3
error('Needs 3 input arguments')
end
if isequal(x,x2)
K = gpcf.fh.trcov(gpcf, x);
else
K = gpcf.fh.cov(gpcf, x, x2);
end
%metric doesn't work with grad.obs on
if isfield(gpcf,'metric')
error('Metric doesnt work with grad.obs')
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic SEXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
for i=1:m
DK2 = s(i).^2.*bsxfun(@minus,x(:,i),x2(:,i)').^2.*K;
DK = s(i).*K;
ii1 = ii1 + 1;
DKff1{ii1} = DK;
DKff2{ii1} = DK2;
DKff{ii1} = DK - DK2;
end
end
end
function DKff = gpcf_sexp_ginput3(gpcf, x, x2)
%GPCF_SEXP_GINPUT3 Evaluate gradient of covariance function with
% respect to both input variables x and x2 (in
% different dimensions).
%
% Description
% DKff = GPCF_SEXP_GINPUT3(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of twice derivatived covariance
% matrix K(df,df) = dk(X1,X2)/dX1dX2 (cell array with matrix
% elements). The derivative is calculated in multidimensional
% problem between input's observation dimensions which are not
% same. This subfunction is needed when using derivative
% observations.
%
% See also
% GPCF_SEXP_GINPUT, GPCF_SEXP_GINPUT2, GPCF_SEXP_CFDG2
[n, m] =size(x);
[n2,m2] =size(x2);
if nargin ~= 3
error('Needs 3 input arguments')
end
if isequal(x,x2)
K = gpcf.fh.trcov(gpcf, x);
else
K = gpcf.fh.cov(gpcf, x, x2);
end
% Derivative the cov.function with respect to both input variables
% but in different dimensions. Resulting matrices are for the
% cov. matrix k(df/dx,df/dx) non-diagonal part. Matrices are
% added to DKff in columnwise order for ex. dim=3:
% k(df/dx1,df/dx2),(..dx1,dx3..),(..dx2,dx3..)
if isfield(gpcf,'metric')
error('Metric doesnt work with ginput3')
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic SEXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
ii3=0;
for i=1:m-1
for j=i+1:m
ii3=ii3+1;
DKff{ii3} = s(j).*bsxfun(@minus,x(:,j),x2(:,j)').*(-s(i).*bsxfun(@minus,x(:,i),x2(:,i)').*K);
end
end
end
end
function DKff = gpcf_sexp_ginput4(gpcf, x, x2, i1)
%GPCF_SEXP_GINPUT Evaluate gradient of covariance function with
% respect to x. Simplified and faster version of
% sexp_ginput, returns full matrices.
%
% Description
% DKff = GPCF_SEXP_GHYPER(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (whole matrix). This subfunction is needed when
% using derivative observations.
%
% DKff = GPCF_SEXP_GHYPER(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (whole matrix). This subfunction
% is needed when using derivative observations.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
[n, m] =size(x);
ii1 = 0;
if nargin==2 || isempty(x2)
flag=1;
K = gpcf.fh.trcov(gpcf, x);
else
flag=0;
K = gpcf.fh.cov(gpcf, x, x2);
if isequal(x,x2)
error('ginput4 fuktio saa vaaran inputin')
end
end
if nargin<4
i1=1:m;
end
if isfield(gpcf,'metric')
error('no metric implemented')
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic SEXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
for i=i1
DK = zeros(size(K));
if flag==1
DK = -s(i).*bsxfun(@minus,x(:,i),x(:,i)');
else
DK = -s(i).*bsxfun(@minus,x(:,i),x2(:,i)');
end
DK = DK.*K;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
function reccf = gpcf_sexp_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_SEXP_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_sexp';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_sexp_pak;
reccf.fh.unpak = @gpcf_sexp_unpak;
reccf.fh.lp = @gpcf_sexp_lp;
reccf.fh.lpg = @gpcf_sexp_lpg;
reccf.fh.cfg = @gpcf_sexp_cfg;
reccf.fh.cov = @gpcf_sexp_cov;
reccf.fh.trcov = @gpcf_sexp_trcov;
reccf.fh.trvar = @gpcf_sexp_trvar;
reccf.fh.recappend = @gpcf_sexp_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if isfield(ri.p,'magnSigma2') && ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_prod.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_prod.m
| 15,801 |
utf_8
|
bdd8dde16beff8dd6249f6ff7f82aeaa
|
function gpcf = gpcf_prod(varargin)
%GPCF_PROD Create a product form covariance function
%
% Description
% GPCF = GPCF_PROD('cf', {GPCF_1, GPCF_2, ...})
% creates a product form covariance function
% GPCF = GPCF_1 .* GPCF_2 .* ... .* GPCF_N
%
% See also
% GP_SET, GPCF_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_PROD';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('cf',[], @iscell);
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_prod';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_prod')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init || ~ismember('cf',ip.UsingDefaults)
% Initialize parameters
gpcf.cf = {};
cfs=ip.Results.cf;
if ~isempty(cfs)
for i = 1:length(cfs)
gpcf.cf{i} = cfs{i};
end
else
error('At least one covariance function has to be given in cf');
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_prod_pak;
gpcf.fh.unpak = @gpcf_prod_unpak;
gpcf.fh.lp = @gpcf_prod_lp;
gpcf.fh.lpg = @gpcf_prod_lpg;
gpcf.fh.cfg = @gpcf_prod_cfg;
gpcf.fh.ginput = @gpcf_prod_ginput;
gpcf.fh.cov = @gpcf_prod_cov;
gpcf.fh.trcov = @gpcf_prod_trcov;
gpcf.fh.trvar = @gpcf_prod_trvar;
gpcf.fh.recappend = @gpcf_prod_recappend;
end
end
function [w, s] = gpcf_prod_pak(gpcf)
%GPCF_PROD_PAK Combine GP covariance function parameters into one vector
%
% Description
% W = GPCF_PROD_PAK(GPCF, W) loops through all the covariance
% functions and packs their parameters into one vector as
% described in the respective functions. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_PROD_UNPAK
ncf = length(gpcf.cf);
w = []; s = {};
for i=1:ncf
cf = gpcf.cf{i};
[wi si] = cf.fh.pak(cf);
w = [w wi];
s = [s; si];
end
end
function [gpcf, w] = gpcf_prod_unpak(gpcf, w)
%GPCF_PROD_UNPAK Sets the covariance function parameters into
% the structures
%
% Description
% [GPCF, W] = GPCF_PROD_UNPAK(GPCF, W) loops through all the
% covariance functions and unpacks their parameters from W to
% each covariance function structure. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_PROD_PAK
%
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
[cf, w] = cf.fh.unpak(cf, w);
gpcf.cf{i} = cf;
end
end
function lp = gpcf_prod_lp(gpcf)
%GPCF_PROD_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PROD_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PROD_PAK, GPCF_PROD_UNPAK, GPCF_PROD_LPG, GP_E
lp = 0;
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
lp = lp + cf.fh.lp(cf);
end
end
function lpg = gpcf_prod_lpg(gpcf)
%GPCF_PROD_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PROD_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_PROD_PAK, GPCF_PROD_UNPAK, GPCF_PROD_LP, GP_G
lpg = [];
ncf = length(gpcf.cf);
% Evaluate the gradients
for i=1:ncf
cf = gpcf.cf{i};
lpg=[lpg cf.fh.lpg(cf)];
end
end
function DKff = gpcf_prod_cfg(gpcf, x, x2, mask, i1)
%GPCF_PROD_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_PROD_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PROD_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PROD_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PROD_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_PROD_PAK, GPCF_PROD_UNPAK, GPCF_PROD_LP, GP_G
[n, m] =size(x);
ncf = length(gpcf.cf);
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
i3=0;
for k=1:ncf
% Number of hyperparameters for each covariance function
cf=gpcf.cf{k};
i3(k)=cf.fh.cfg(cf,[],[],[],0);
end
if i1==0
% Return number of hyperparameters
DKff=sum(i3);
return
end
% Help indices
i3=cumsum(i3);
ind=find(cumsum(i3 >= i1)==1);
if ind>1
i1=[ind i1-i3(ind-1)];
else
i1=[ind i1];
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
% evaluate the individual covariance functions
for i=1:ncf
cf = gpcf.cf{i};
C{i} = cf.fh.trcov(cf, x);
end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
if ~savememory
i3=1:ncf;
else
i3=i1(1);
end
for i=i3
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x);
else
DK = {cf.fh.cfg(cf,x,[],[],i1(2))};
end
CC = 1;
for kk = ind(ind~=i)
CC = CC.*C{kk};
end
for j = 1:length(DK)
DKff{end+1} = DK{j}.*CC;
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_prod -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
% evaluate the individual covariance functions
for i=1:ncf
cf = gpcf.cf{i};
C{i} = cf.fh.cov(cf, x, x2);
end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
if ~savememory
i3=1:ncf;
else
i3=i1(1);
end
for i=i3
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x,x2);
else
DK = {cf.fh.cfg(cf,x,x2,[],i1(2))};
end
CC = 1;
for kk = ind(ind~=i)
CC = CC.*C{kk};
end
for j = 1:length(DK)
DKff{end+1} = DK{j}.*CC;
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
% evaluate the individual covariance functions
for i=1:ncf
cf = gpcf.cf{i};
C{i} = cf.fh.trvar(cf, x);
end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
if ~savememory
i3=1:ncf;
else
i3=i1(1);
end
for i=i3
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x,x2);
else
DK = {cf.fh.cfg(cf,x,x2,[],i1(2))};
end
CC = 1;
for kk = ind(ind~=i)
CC = CC.*C{kk};
end
for j = 1:length(DK)
DKff{end+1} = DK{j}.*CC;
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_prod_ginput(gpcf, x, x2, i1)
%GPCF_PROD_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PROD_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_PROD_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PROD_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_PROD_PAK, GPCF_PROD_UNPAK, GPCF_PROD_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
for i=1:ncf
cf = gpcf.cf{i};
C{i} = cf.fh.trcov(cf, x);
end
% Evaluate the gradients
ind = 1:ncf;
if ~savememory
DKff=cellfun(@(a) zeros(n,n), cell(1,numel(x)), 'UniformOutput', 0);
else
DKff=cellfun(@(a) zeros(n,n), cell(1,n), 'UniformOutput', 0);
end
for i=1:ncf
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.ginput(cf, x);
else
DK = cf.fh.ginput(cf, x, [], i1);
end
CC = 1;
for kk = ind(ind~=i)
CC = CC.*C{kk};
end
for j = 1:length(DK)
DKff{j} = DKff{j} + DK{j}.*CC;
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_prod -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
for i=1:ncf
cf = gpcf.cf{i};
C{i} = cf.fh.cov(cf, x, x2);
end
% Evaluate the gradients
ind = 1:ncf;
if ~savememory
DKff=cellfun(@(a) zeros(n,n), cell(1,numel(x)), 'UniformOutput', 0);
else
DKff=cellfun(@(a) zeros(n,n), cell(1,n), 'UniformOutput', 0);
end
for i=1:ncf
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.ginput(cf, x, x2);
else
DK = cf.fh.ginput(cf, x, x2, i1);
end
CC = 1;
for kk = ind(ind~=i)
CC = CC.*C{kk};
end
for j = 1:length(DK)
DKff{j} = DKff{j} + DK{j}.*CC;
end
end
end
end
function C = gpcf_prod_cov(gpcf, x1, x2)
%GP_PROD_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PROD_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
%
% See also
% GPCF_PROD_TRCOV, GPCF_PROD_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 1;
for i=1:ncf
cf = gpcf.cf{i};
C = C.*cf.fh.cov(cf, x1, x2);
end
end
function C = gpcf_prod_trcov(gpcf, x)
%GP_PROD_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PROD_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_PROD_COV, GPCF_PROD_TRVAR, GP_COV, GP_TRCOV
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 1;
for i=1:ncf
cf = gpcf.cf{i};
C = C.*cf.fh.trcov(cf, x);
end
end
function C = gpcf_prod_trvar(gpcf, x)
% GP_PROD_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PROD_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_PROD_COV, GP_COV, GP_TRCOV
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 1;
for i=1:ncf
cf = gpcf.cf{i};
C = C.*cf.fh.trvar(cf, x);
end
end
function reccf = gpcf_prod_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PROD_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC, GP_MC->RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_prod';
% Initialize parameters
ncf = length(ri.cf);
for i=1:ncf
cf = ri.cf{i};
reccf.cf{i} = cf.fh.recappend([], ri.cf{i});
end
% Set the function handles
reccf.fh.pak = @gpcf_prod_pak;
reccf.fh.unpak = @gpcf_prod_unpak;
reccf.fh.e = @gpcf_prod_lp;
reccf.fh.lpg = @gpcf_prod_lpg;
reccf.fh.cfg = @gpcf_prod_cfg;
reccf.fh.cov = @gpcf_prod_cov;
reccf.fh.trcov = @gpcf_prod_trcov;
reccf.fh.trvar = @gpcf_prod_trvar;
reccf.fh.recappend = @gpcf_prod_recappend;
else
% Append to the record
% Loop over all of the covariance functions
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
reccf.cf{i} = cf.fh.recappend(reccf.cf{i}, ri, cf);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
scaled_mh.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/scaled_mh.m
| 10,266 |
utf_8
|
1563f9eb51b7be715e98686a4c44a6fd
|
function [f, energ, diagn] = scaled_mh(f, opt, gp, x, y, z)
%SCALED_MH A scaled Metropolis-Hastings sampling for latent values
%
% Description
% [F, ENERG, DIAG] = SCALED_MH(F, OPT, GP, X, Y) takes the
% current latent values F, options structure OPT, Gaussian
% process structure GP, inputs X and outputs Y. Samples new
% latent values and returns also energies ENERG and diagnostics
% DIAG. The latent values are sampled from their conditional
% posterior p(f|y,th).
%
% The latent values are whitened with the prior covariance
% before the sampling. This reduces the autocorrelation and
% speeds up the mixing of the sampler. See (Neal, 1993) for
% details on implementation.
%
% The options structure should include the following fields:
% repeat - the number of MH-steps before
% returning a single sample (default 10)
% sample_latent_scale - the scale for the MH-step (default 0.5)
%
% OPT = SCALED_MH() Returns default options
%
% OPT = SCALED_MH(OPT) Returns default options for fields not
% yet set in OPT
%
% See also
% GP_MC
% Copyright (c) 1999,2011 Aki Vehtari
% Copyright (c) 2006-2010 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% set default options using hmc2_opt
if nargin<=1
if nargin==0
f=scaled_mh_opt();
else
f=scaled_mh_opt(f);
end
return
end
[n,nout] = size(y);
if isfield(gp.lik, 'nondiagW')
switch gp.lik.type
case {'LGP', 'LGPC'}
% Do nothing
case {'Softmax', 'Multinom'}
% Do nothing
otherwise
nout=length(gp.comp_cf);
end
if isfield(gp, 'comp_cf') % own covariance for each ouput component
multicf = true;
if length(gp.comp_cf) ~= nout
error('SCALED_MH: the number of component vectors in gp.comp_cf must be the same as number of outputs.')
end
else
multicf = false;
end
end
f = reshape(f,n,nout);
maxcut = -log(eps);
mincut = -log(1/realmin - 1);
lvs=opt.sample_latent_scale;
a = max(min(f, maxcut),mincut);
switch gp.type
case {'FULL'}
if ~isfield(gp.lik, 'nondiagW') || ismember(gp.lik.type, {'LGP' 'LGPC'})
[K,C]=gp_trcov(gp, x);
if isfield(gp,'meanf')
[H_m,b_m,B_m]=mean_prep(gp,x,[]);
C = C + H_m'*B_m*H_m;
end
L=chol(C)';
else
L = zeros(n,n,nout);
if multicf
for i1=1:nout
[tmp, C] = gp_trcov(gp, x, gp.comp_cf{i1});
L(:,:,i1)=chol(C, 'lower');
end
else
for i1=1:nout
[tmp, C] = gp_trcov(gp, x);
L(:,:,i1)=chol(C, 'lower');
end
end
end
e = -gp.lik.fh.ll(gp.lik, y, f, z);
ft = zeros(size(y));
% Adaptive control algorithm to find such a value for lvs
% that the rejection rate of Metropolis is optimal.
slrej = 0;
for li=1:100
for i1 =1:nout
ft(:,i1)=sqrt(1-lvs.^2).*f(:,i1)+lvs.*L(:,:,i1)*randn(n,1);
end
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
lvs=min(1,lvs*1.1);
else
lvs=max(1e-8,lvs/1.05);
end
end
opt.sample_latent_scale=lvs;
% Do the actual sampling
for li=1:(opt.repeat)
for i1 =1:nout
ft(:,i1)=sqrt(1-lvs.^2).*f(:,i1)+lvs.*L(:,:,i1)*randn(n,1);
end
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
else
slrej=slrej+1;
end
end
diagn.rej = slrej/opt.repeat;
diagn.lvs = lvs;
diagn.opt=opt;
energ=[];
f = f(:)';
case 'FIC'
u = gp.X_u;
m = size(u,1);
% Turn the inducing vector on right direction
if size(u,2) ~= size(x,2)
u=u';
end
% Calculate some help matrices
[Kv_ff, Cv_ff] = gp_trvar(gp, x);
K_fu = gp_cov(gp, x, u);
K_uu = gp_trcov(gp, u);
Luu = chol(K_uu)';
% Evaluate the Lambda (La)
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff;
sLav = sqrt(Lav);
n=length(y);
e = -gp.lik.fh.ll(gp.lik, y, f, z);
% Adaptive control algorithm to find such a value for lvs
% so that the rejection rate of Metropolis is optimal.
slrej = 0;
for li=1:100
ft=sqrt(1-lvs.^2).*f + lvs.*(sLav.*randn(n,1) + B'*randn(m,1));
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
lvs=min(1,lvs*1.1);
else
lvs=max(1e-8,lvs/1.05);
end
end
opt.sample_latent_scale=lvs;
% Do the actual sampling
for li=1:(opt.repeat)
ft=sqrt(1-lvs.^2).*f + lvs.*(sLav.*randn(n,1) + B'*randn(m,1));
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
else
slrej=slrej+1;
end
end
diagn.rej = slrej/opt.repeat;
diagn.lvs = lvs;
diagn.opt=opt;
energ=[];
f = f';
case 'PIC'
u = gp.X_u;
m = size(u,1);
ind = gp.tr_index;
if size(u,2) ~= size(x,2)
u=u';
end
% Calculate some help matrices
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % 1 x f vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
Luu = chol(K_uu)';
% Evaluate the Lambda (La) for specific model
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\K_fu';
iLaKfu = zeros(size(K_fu)); % f x u
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
La{i} = Cbl_ff - Qbl_ff;
CLa{i} = chol(La{i})' ;
end
n=length(y);
e = -gp.lik.fh.ll(gp.lik, y, f, z);
% Adaptive control algorithm to find such a value for lvs
% so that the rejection rate of Metropolis is optimal.
slrej = 0;
for li=1:100
sampf = randn(size(f));
for i=1:length(ind)
sampf(ind{i},:) = CLa{i}*sampf(ind{i},:);
end
ft=sqrt(1-lvs.^2).*f + lvs.*(sampf + B'*randn(m,1));
at = max(min(ft, maxcut),mincut);
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
lvs=min(1,lvs*1.1);
else
lvs=max(1e-8,lvs/1.05);
end
end
opt.sample_latent_scale=lvs;
% Do the actual sampling
for li=1:(opt.repeat)
sampf = randn(size(f));
for i=1:length(ind)
sampf(ind{i},:) = CLa{i}*sampf(ind{i},:);
end
ft=sqrt(1-lvs.^2).*f + lvs.*(sampf + B'*randn(m,1));
at = max(min(ft, maxcut),mincut);
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
else
slrej=slrej+1;
end
end
diagn.rej = slrej/opt.repeat;
diagn.lvs = lvs;
diagn.opt=opt;
energ=[];
f = f';
case 'CS+FIC'
u = gp.X_u;
cf_orig = gp.cf;
ncf = length(gp.cf);
n = size(x,1); m = size(u,1);
cf1 = {};
cf2 = {};
j = 1;
k = 1;
for i = 1:ncf
if ~isfield(gp.cf{i},'cs')
cf1{j} = gp.cf{i};
j = j + 1;
else
cf2{k} = gp.cf{i};
k = k + 1;
end
end
gp.cf = cf1;
% First evaluate the needed covariance matrices
% if they are not in the memory
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % 1 x f vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % 1 x f, Vector of diagonal elements
gp.cf = cf2;
K_cs = gp_trcov(gp,x);
La = sparse(1:n,1:n,Lav,n,n) + K_cs;
gp.cf = cf_orig;
LD = ldlchol(La);
sLa = chol(La)';
n=length(y);
e = -gp.lik.fh.ll(gp.lik, y, f, z);
% Adaptive control algorithm to find such a value for lvs
% so that the rejection rate of Metropolis is optimal.
slrej = 0;
for li=1:100
ft=sqrt(1-lvs.^2).*f + lvs.*(sLa*randn(n,1) + B'*randn(m,1));
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
lvs=min(1,lvs*1.1);
else
lvs=max(1e-8,lvs/1.05);
end
end
opt.sample_latent_scale=lvs;
% Do the actual sampling
for li=1:(opt.repeat)
ft=sqrt(1-lvs.^2).*f + lvs.*(sLa*randn(n,1) + B'*randn(m,1));
ed = -gp.lik.fh.ll(gp.lik, y, ft, z);
a=e-ed;
if exp(a) > rand(1)
f=ft;
e=ed;
else
slrej=slrej+1;
end
end
diagn.rej = slrej/opt.repeat;
diagn.lvs = lvs;
diagn.opt=opt;
energ=[];
f = f';
end
end
function opt = scaled_mh_opt(opt)
%SCALED_MH_OPT Default options for scaled Metropolis-Hastings sampling
%
% Description
% OPT = SCALED_MH_OPT
% return default options
% OPT = SCALED_MH_OPT(OPT)
% fill empty options with default values
%
% The options and defaults are
% repeat - the number of MH-steps before
% returning a single sample (default 10)
% sample_latent_scale - the scale for the MH-step (default 0.5)
if nargin < 1
opt=[];
end
if ~isfield(opt,'repeat')
opt.repeat=10;
end
if ~isfield(opt,'sample_latent_scale')
opt.sample_latent_scale=0.5;
end
end
|
github
|
lcnbeapp/beapp-master
|
lik_lgp.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_lgp.m
| 15,167 |
windows_1250
|
7b7a6270ae79e34121ec3ef3091f6b59
|
function lik = lik_lgp(varargin)
%LIK_LGP Create a logistic Gaussian process likelihood structure
%
% Description
% LIK = LIK_LGP creates a logistic Gaussian process likelihood structure
%
% The likelihood is defined as follows:
% __ n
% p(y|f) = || i=1 exp(f_i) / Sum_{j=1}^n exp(f_j),
%
% where f contains latent values.
%
% See also
% LGPDENS, GP_SET, LIK_*
%
% Copyright (c) 2011 Jaakko Riihimäki and Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LGP';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'LGP';
lik.nondiagW = true;
else
if ~isfield(lik,'type') || ~isequal(lik.type,'LGP')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_lgp_pak;
lik.fh.unpak = @lik_lgp_unpak;
lik.fh.ll = @lik_lgp_ll;
lik.fh.llg = @lik_lgp_llg;
lik.fh.llg2 = @lik_lgp_llg2;
lik.fh.llg3 = @lik_lgp_llg3;
lik.fh.tiltedMoments = @lik_lgp_tiltedMoments;
lik.fh.predy = @lik_lgp_predy;
lik.fh.invlink = @lik_lgp_invlink;
lik.fh.recappend = @lik_lgp_recappend;
end
end
function [w,s] = lik_lgp_pak(lik)
%LIK_LGP_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LGP_PAK(LIK) takes a likelihood structure LIK
% and returns an empty verctor W. If LGP likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% LIK_LGP_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_lgp_unpak(lik, w)
%LIK_LGP_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_LGP_UNPAK(W, LIK) Doesn't do anything.
%
% If LGP likelihood had parameters this would extract them
% parameters from the vector W to the LIK structure. This
% is a mandatory subfunction used for example in energy
% and gradient computations.
%
%
% See also
% LIK_LGP_PAK, GP_UNPAK
lik=lik;
w=w;
end
function logLik = lik_lgp_ll(lik, y, f, z)
%LIK_LGP_LL Log likelihood
%
% Description
% E = LIK_LGP_LL(LIK, Y, F, Z) takes a likelihood data
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_LGP_LLG, LIK_LGP_LLG3, LIK_LGP_LLG2, GPLA_E
n=sum(y);
qj=exp(f);
logLik = sum(f.*y)-n*log(sum(qj));
end
function deriv = lik_lgp_llg(lik, y, f, param, z)
%LIK_LGP_LLG Gradient of the log likelihood
%
% Description
% G = LIK_LGP_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F. Returns the gradient of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% 'param' or 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGP_LL, LIK_LGP_LLG2, LIK_LGP_LLG3, GPLA_E
switch param
case 'latent'
n=sum(y);
qj=exp(f);
pj=qj./sum(qj);
deriv=y-n*pj;
end
end
function g2 = lik_lgp_llg2(lik, y, f, param, z)
%function g2 = lik_lgp_llg2(lik, y, f, param, z)
%LIK_LGP_LLG2 Second gradients of the log likelihood
%
% Description
% G2 = LIK_LGP_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z,
% and latent values F. Returns the Hessian of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. G2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGP_LL, LIK_LGP_LLG, LIK_LGP_LLG3, GPLA_E
switch param
case 'latent'
qj=exp(f);
% g2 is not the second gradient of the log likelihood but only a
% vector to form the exact gradient term in gpla_nd_e, gpla_nd_g and
% gpla_nd_pred functions
g2=qj./sum(qj);
end
end
function g3 = lik_lgp_llg3(lik, y, f, param, z)
%LIK_LGP_LLG3 Third gradients of the log likelihood
%
% Description
% G3 = LIK_LGP_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F and returns the third gradients of the
% log likelihood with respect to PARAM. At the moment PARAM
% can be only 'latent'. G3 is a vector with third gradients.
% This subfunction is needed when using Laplace approximation
% for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGP_LL, LIK_LGP_LLG, LIK_LGP_LLG2, GPLA_E, GPLA_G
switch param
case 'latent'
qj=exp(f);
% g3 is not the third gradient of the log likelihood but only a
% vector to form the exact gradient term in gpla_nd_e, gpla_nd_g and
% gpla_nd_pred functions
g3=qj./sum(qj);
%n=sum(y);
%nf=size(f,1);
%g3d=zeros(nf,nf);
%for i1=1:nf
% g3dtmp=-g3*g3(i1);
% g3dtmp(i1)=g3dtmp(i1)+g3(i1);
% g3d(:,i1)=g3dtmp;
% %g3i1= n*(-diag(g3d(:,i1)) + bsxfun(@times,g3,g3d(:,i1)') + bsxfun(@times,g3d(:,i1),g3'));
%end
end
end
function [logM_0, m_1, sigm2hati1] = lik_lgp_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LGP_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LGP_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, incedence counts
% Y, expected counts Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
if isempty(z)
error(['lik_lgp -> lik_lgp_tiltedMoments: missing z!'...
'LGP likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_lgp and gpla_e. ']);
end
yy = y(i1);
avgE = z(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_lgp_norm(yy(i),myy_i(i),sigm2_i(i),avgE(i));
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_lgp_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_lgp_predy(lik, Ef, Varf, yt, zt)
%LIK_LGP_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LGP_PREDY(LIK, EF, VARF YT, ZT)
% Returns also the predictive density of YT, that is
% p(yt | y,zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the incedence counts YT, expected counts ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_LGP_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This
% subfunction is needed when computing posterior predictive
% distributions for future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_lgp -> lik_lgp_predy: missing zt!'...
'LGP likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_lgp and gpla_e. ']);
end
avgE = zt;
lpy = zeros(size(Ef));
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
if nargout > 1
% Evaluate Ey and Vary
for i1=1:length(Ef)
%%% With quadrature
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f) exp(log(avgE(i1))+f+norm_lpdf(f,myy_i,sigm_i));
Ey(i1) = quadgk(F,minf,maxf);
EVary(i1) = Ey(i1);
F3 = @(f) exp(2*log(avgE(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
end
Vary = EVary + VarEy;
end
% Evaluate the posterior predictive densities of the given observations
for i1=1:length(Ef)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = LGP * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_lgp_norm(...
yt(i1),Ef(i1),Varf(i1),avgE(i1));
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_lgp_norm(yy,myy_i,sigm2_i,avgE)
%INIT_LGP_NORM
%
% Description
% Return function handle to a function evaluating LGP *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_lgp. This
% subfunction is needed by sufunctions tiltedMoments, siteDeriv
% and predy.
%
% See also
% LIK_LGP_TILTEDMOMENTS, LIK_LGP_PREDY
% avoid repetitive evaluation of constant part
ldconst = -gammaln(yy+1) - log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @lgp_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_lgp_norm;
ldg = @log_lgp_norm_g;
ldg2 = @log_lgp_norm_g2;
% Set the limits for integration
% LGP likelihood is log-concave so the lgp_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the LGP likelihood and Gaussian
mu=log(yy/avgE);
s2=1./(yy+1./sigm2_i);
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=3; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_lgp -> init_lgp_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_lgp -> init_lgp_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = lgp_norm(f)
% LGP * Gaussian
mu = avgE.*exp(f);
integrand = exp(ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_lgp_norm(f)
% log(LGP * Gaussian)
% log_lgp_norm is used to avoid underflow when searching
% integration interval
mu = avgE.*exp(f);
log_int = ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_lgp_norm_g(f)
% d/df log(LGP * Gaussian)
% derivative of log_lgp_norm
mu = avgE.*exp(f);
g = -mu+yy...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_lgp_norm_g2(f)
% d^2/df^2 log(LGP * Gaussian)
% second derivate of log_lgp_norm
mu = avgE.*exp(f);
g2 = -mu...
-1/sigm2_i;
end
end
function mu = lik_lgp_invlink(lik, f, z)
%LIK_LGP_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_LGP_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LGP_LL, LIK_LGP_PREDY
mu = z.*exp(f);
end
function reclik = lik_lgp_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_LGP_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'LGP';
% Set the function handles
reclik.fh.pak = @lik_lgp_pak;
reclik.fh.unpak = @lik_lgp_unpak;
reclik.fh.ll = @lik_lgp_ll;
reclik.fh.llg = @lik_lgp_llg;
reclik.fh.llg2 = @lik_lgp_llg2;
reclik.fh.llg3 = @lik_lgp_llg3;
reclik.fh.tiltedMoments = @lik_lgp_tiltedMoments;
reclik.fh.predy = @lik_lgp_predy;
reclik.fh.invlink = @lik_lgp_invlink;
reclik.fh.recappend = @lik_lgp_recappend;
return
end
end
|
github
|
lcnbeapp/beapp-master
|
gpmf_squared.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpmf_squared.m
| 8,746 |
utf_8
|
fcd6e5e58eb18041d7c0f1fb70a152c0
|
function gpmf = gpmf_squared(varargin)
%GPMF_SQUARED Create a squared mean function
%
% Description
% GPMF = GPMF_SQUARED('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates linear mean function structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPMF = GPMF_SQUARED(GPMF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a mean function structure with the named parameters
% altered with the specified values.
%
% Parameters for linear mean function [default]
% prior_mean - prior mean (scalar or vector) for base
% functions' weight prior (default 0)
% prior_cov - prior covariances (scalar or vector)
% for base functions' prior corresponding
% each selected input dimension. In
% multiple dimension case prior_cov is a
% struct containing scalars or vectors.
% The covariances must all be either
% scalars (diagonal cov.matrix) or
% vectors (for non-diagonal cov.matrix)
% (default 100)
% selectedVariables - vector defining which inputs are active
%
% See also
% GP_SET, GPMF_CONSTANT, GPMF_LINEAR
%
% Copyright (c) 2010 Tuomas Nikoskinen
% Copyright (c) 2011 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPMF_SQUARED';
ip.addOptional('gpmf', [], @isstruct);
ip.addParamValue('selectedVariables',[], @(x) isvector(x) && all(x>0));
ip.addParamValue('prior_mean',0, @(x) isvector(x));
ip.addParamValue('prior_cov',100, @(x) isvector(x));
ip.addParamValue('mean_prior', [], @isstruct);
ip.addParamValue('cov_prior', [], @isstruct);
ip.parse(varargin{:});
gpmf=ip.Results.gpmf;
if isempty(gpmf)
% Initialize a mean function
init=true;
gpmf.type = 'gpmf_squared';
else
% Modify a mean function
if ~isfield(gpmf,'type') && isequal(gpmf.type,'gpmf_squared')
error('First argument does not seem to be a squared mean function')
end
init=false;
end
% Initialize parameters
if init || ~ismember('prior_mean',ip.UsingDefaults)
gpmf.b=ip.Results.prior_mean(:)';
end
if init || ~ismember('prior_cov',ip.UsingDefaults)
gpmf.B=ip.Results.prior_cov(:)';
end
if ~ismember('selectedVariables',ip.UsingDefaults)
gpmf.selectedVariables=ip.Results.selectedVariables;
end
if init || ~ismember('mean_prior',ip.UsingDefaults)
gpmf.p.b=ip.Results.cov_prior;
end
if init || ~ismember('cov_prior',ip.UsingDefaults)
gpmf.p.B=ip.Results.mean_prior;
end
if init
% Set the function handles to the nested functions
gpmf.fh.geth = @gpmf_geth;
gpmf.fh.pak = @gpmf_pak;
gpmf.fh.unpak = @gpmf_unpak;
gpmf.fh.lp = @gpmf_lp;
gpmf.fh.lpg = @gpmf_lpg;
gpmf.fh.recappend = @gpmf_recappend;
end
end
function h = gpmf_geth(gpmf, x)
%GPMF_GETH Calculate the base function values for given input.
%
% Description
% H = GPMF_GETH(GPMF,X) takes in a mean function structure
% GPMF and inputs X. The function returns the squared base
% function values H in the given input points. If
% selectedVariables is used the function returns only the
% values corresponding active inputs. The base function values
% are returned as a matrix in which each row corresponds to
% one dimension and the first row is for the smallest
% dimension.
if ~isfield(gpmf,'selectedVariables')
h = x'.^2;
else
selectedVariables=gpmf.selectedVariables;
h=zeros(length(selectedVariables),length(x(:,1)));
for i=1:length(selectedVariables)
h(i,:)=x(:,selectedVariables(i))'.^2;
end
end
end
function [w, s] = gpmf_pak(gpmf, w)
%GPMF_PAK Combine GP mean function parameters into one vector
%
% Description
% W = GPCF_LINEAR_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W.
%
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_UNPAK
w = []; s = {};
if ~isempty(gpmf.p.b)
w = gpmf.b;
if numel(gpmf.b)>1
s = [s; sprintf('gpmf_squared.b x %d',numel(gpmf.b))];
else
s = [s; 'gpmf_squared.b'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.b.fh.pak(gpmf.p.b);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpmf.p.B)
w = [w log(gpmf.B)];
if numel(gpmf.B)>1
s = [s; sprintf('log(gpmf_squared.B x %d)',numel(gpmf.B))];
else
s = [s; 'log(gpmf_squared.B)'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.B.fh.pak(gpmf.p.B);
w = [w wh];
s = [s; sh];
end
end
function [gpmf, w] = gpmf_unpak(gpmf, w)
%GPCF_LINEAR_UNPAK Sets the mean function parameters
% into the structure
%
% Description
% [GPCF, W] = GPMF_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W.
%
% Assignment is inverse of
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_PAK
gpp=gpmf.p;
if ~isempty(gpp.b)
i2=length(gpmf.b);
i1=1;
gpmf.b = w(i1:i2);
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpmf.p.b.fh.unpak(gpmf.p.b, w);
gpmf.p.b = p;
end
if ~isempty(gpp.B)
i2=length(gpmf.B);
i1=1;
gpmf.B = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpmf.p.B.fh.unpak(gpmf.p.B, w);
gpmf.p.B = p;
end
end
function lp = gpmf_lp(gpmf)
%GPCF_SEXP_LP Evaluate the log prior of covariance function parameters
%
% Description
%
% See also
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lp = lp + gpp.b.fh.lp(gpmf.b, ...
gpp.b);
end
if ~isempty(gpp.B)
lp = lp + gpp.B.fh.lp(gpmf.B, ...
gpp.B) +sum(log(gpmf.B));
end
end
function [lpg_b, lpg_B] = gpmf_lpg(gpmf)
%GPCF_SEXP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SEXP_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
lpg_b=[];, lpg_B=[];
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lll = length(gpmf.b);
lpgs = gpp.b.fh.lpg(gpmf.b, gpp.b);
lpg_b = [lpgs(1:lll) lpgs(lll+1:end)]; %
end
if ~isempty(gpmf.p.B)
lll = length(gpmf.B);
lpgs = gpp.B.fh.lpg(gpmf.B, gpp.B);
lpg_B = [lpgs(1:lll).*gpmf.B+1 lpgs(lll+1:end)];
end
end
function recmf = gpmf_recappend(recmf, ri, gpmf)
%RECAPPEND Record append
%
% Description
%
% See also
% GP_MC and GP_MC -> RECAPPEND
% Initialize record
if nargin == 2
recmf.type = 'gpmf_squared';
% Initialize parameters
recmf.b= [];
recmf.B = [];
% Set the function handles
recmf.fh.geth = @gpmf_geth;
recmf.fh.pak = @gpmf_pak;
recmf.fh.unpak = @gpmf_unpak;
recmf.fh.lp = @gpmf_lp;
recmf.fh.lpg = @gpmf_lpg;
recmf.fh.recappend = @gpmf_recappend;
recmf.p=[];
recmf.p.b=[];
recmf.p.B=[];
if isfield(ri.p,'b') && ~isempty(ri.p.b)
recmf.p.b = ri.p.b;
end
if ~isempty(ri.p.B)
recmf.p.B = ri.p.B;
end
return
end
gpp = gpmf.p;
% record magnSigma2
if ~isempty(gpmf.b)
recmf.b(ri,:)=gpmf.b;
if ~isempty(recmf.p.b)
recmf.p.b = gpp.b.fh.recappend(recmf.p.b, ri, gpmf.p.b);
end
elseif ri==1
recmf.b=[];
end
if ~isempty(gpmf.B)
recmf.B(ri,:)=gpmf.B;
if ~isempty(recmf.p.B)
recmf.p.B = gpp.B.fh.recappend(recmf.p.B, ri, gpmf.p.B);
end
elseif ri==1
recmf.B=[];
end
end
|
github
|
lcnbeapp/beapp-master
|
gpmf_linear.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpmf_linear.m
| 7,746 |
utf_8
|
093d9284e00323d01f6ed94a25f6947a
|
function gpmf = gpmf_linear(varargin)
%GPMF_LINEAR Create a linear mean function
%
% Description
% GPMF = GPMF_LINEAR('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates linear mean function structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPMF = GPMF_LINEAR(GPMF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a mean function structure with the named parameters
% altered with the specified values.
%
% Parameters for linear mean function
% prior_mean - prior mean (scalar or vector) for base
% functions' weight prior (default 0)
% prior_cov - prior covariances (scalar or vector)
% for base functions' prior corresponding
% each selected input dimension. In
% multiple dimension case prior_cov is a
% struct containing scalars or vectors.
% The covariances must all be either
% scalars (diagonal cov.matrix) or
% vectors (for non-diagonal cov.matrix)
% (default 100)
% selectedVariables - vector defining which inputs are active
%
% See also
% GP_SET, GPMF_CONSTANT, GPMF_SQUARED,
% Copyright (c) 2010 Tuomas Nikoskinen
% Copyright (c) 2011 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPMF_LINEAR';
ip.addOptional('gpmf', [], @isstruct);
ip.addParamValue('selectedVariables',[], @(x) isvector(x) && all(x>0));
ip.addParamValue('prior_mean',0, @(x) isvector(x));
ip.addParamValue('prior_cov',100, @(x) isvector(x));
ip.addParamValue('mean_prior', [], @isstruct);
ip.addParamValue('cov_prior', [], @isstruct);
ip.parse(varargin{:});
gpmf=ip.Results.gpmf;
if isempty(gpmf)
% Initialize a mean function
init=true;
gpmf.type = 'gpmf_linear';
else
% Modify a mean function
if ~isfield(gpmf,'type') && isequal(gpmf.type,'gpmf_linear')
error('First argument does not seem to be a linear mean function')
end
init=false;
end
% Initialize parameters
if init || ~ismember('prior_mean',ip.UsingDefaults)
gpmf.b=ip.Results.prior_mean(:)';
end
if init || ~ismember('prior_cov',ip.UsingDefaults)
gpmf.B=ip.Results.prior_cov(:)';
end
if ~ismember('selectedVariables',ip.UsingDefaults)
gpmf.selectedVariables=ip.Results.selectedVariables;
end
if init || ~ismember('mean_prior',ip.UsingDefaults)
gpmf.p.b=ip.Results.mean_prior;
end
if init || ~ismember('cov_prior',ip.UsingDefaults)
gpmf.p.B=ip.Results.cov_prior;
end
if init
% Set the function handles to the nested functions
gpmf.fh.geth = @gpmf_geth;
gpmf.fh.pak = @gpmf_pak;
gpmf.fh.unpak = @gpmf_unpak;
gpmf.fh.lp = @gpmf_lp;
gpmf.fh.lpg = @gpmf_lpg;
gpmf.fh.recappend = @gpmf_recappend;
end
end
function h = gpmf_geth(gpmf, x)
%GPMF_GETH Calculate the base function values for given input.
%
% Description
% H = GPMF_GETH(GPMF,X) takes in a mean function structure
% GPMF and inputs X. The function returns the linear base
% function values H in the given input points. If
% selectedVariables is used the function returns only the
% values corresponding active inputs. The base function values
% are returned as a matrix in which each row corresponds to
% one dimension and the first row is for the smallest
% dimension.
if ~isfield(gpmf,'selectedVariables')
h=x';
else
h=x(:,gpmf.selectedVariables)';
end
end
function [w, s] = gpmf_pak(gpmf, w)
%GPMF_PAK Combine GP mean function parameters into one vector
%
% Description
%
% See also
w = []; s = {};
if ~isempty(gpmf.p.b)
w = gpmf.b;
if numel(gpmf.b)>1
s = [s; sprintf('gpmf_linear.b x %d',numel(gpmf.b))];
else
s = [s; 'gpmf_linear.b'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.b.fh.pak(gpmf.p.b);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpmf.p.B)
w = [w log(gpmf.B)];
if numel(gpmf.B)>1
s = [s; sprintf('log(gpmf_linear.B x %d)',numel(gpmf.B))];
else
s = [s; 'log(gpmf_linear.B)'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.B.fh.pak(gpmf.p.B);
w = [w wh];
s = [s; sh];
end
end
function [gpmf, w] = gpmf_unpak(gpmf, w)
%GPCF_LINEAR_UNPAK Sets the mean function parameters
% into the structure
%
% Description
%
% See also
gpp=gpmf.p;
if ~isempty(gpp.b)
i2=length(gpmf.b);
i1=1;
gpmf.b = w(i1:i2);
w = w(i2+1:end);
% Hyperparameters of b
[p, w] = gpmf.p.b.fh.unpak(gpmf.p.b, w);
gpmf.p.b = p;
end
if ~isempty(gpp.B)
i2=length(gpmf.B);
i1=1;
gpmf.B = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of b
[p, w] = gpmf.p.B.fh.unpak(gpmf.p.B, w);
gpmf.p.B = p;
end
end
function lp = gpmf_lp(gpmf)
%GPCF_SEXP_LP Evaluate the log prior of covariance function parameters
%
% Description
%
% See also
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lp = lp + gpp.b.fh.lp(gpmf.b, ...
gpp.b);
end
if ~isempty(gpp.B)
lp = lp + gpp.B.fh.lp(gpmf.B, ...
gpp.B) +sum(log(gpmf.B));
end
end
function [lpg_b, lpg_B] = gpmf_lpg(gpmf)
%GPCF_SEXP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SEXP_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
lpg_b=[];, lpg_B=[];
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lll = length(gpmf.b);
lpgs = gpp.b.fh.lpg(gpmf.b, gpp.b);
lpg_b = [lpgs(1:lll) lpgs(lll+1:end)]; %.*gpmf.b+1
end
if ~isempty(gpmf.p.B)
lll = length(gpmf.B);
lpgs = gpp.B.fh.lpg(gpmf.B, gpp.B);
lpg_B = [lpgs(1:lll).*gpmf.B+1 lpgs(lll+1:end)];
end
end
function recmf = gpmf_recappend(recmf, ri, gpmf)
%RECAPPEND Record append
%
% Description
%
% See also
% GP_MC and GP_MC -> RECAPPEND
% Initialize record
if nargin == 2
recmf.type = 'gpmf_linear';
% Initialize parameters
recmf.b= [];
recmf.B = [];
% Set the function handles
recmf.fh.geth = @gpmf_geth;
recmf.fh.pak = @gpmf_pak;
recmf.fh.unpak = @gpmf_unpak;
recmf.fh.lp = @gpmf_lp;
recmf.fh.lpg = @gpmf_lpg;
recmf.fh.recappend = @gpmf_recappend;
recmf.p=[];
recmf.p.b=[];
recmf.p.B=[];
if isfield(ri.p,'b') && ~isempty(ri.p.b)
recmf.p.b = ri.p.b;
end
if ~isempty(ri.p.B)
recmf.p.B = ri.p.B;
end
return
end
gpp = gpmf.p;
% record magnSigma2
if ~isempty(gpmf.b)
recmf.b(ri,:)=gpmf.b;
if ~isempty(recmf.p.b)
recmf.p.b = gpp.b.fh.recappend(recmf.p.b, ri, gpmf.p.b);
end
elseif ri==1
recmf.b=[];
end
if ~isempty(gpmf.B)
recmf.B(ri,:)=gpmf.B;
if ~isempty(recmf.p.B)
recmf.p.B = gpp.B.fh.recappend(recmf.p.B, ri, gpmf.p.B);
end
elseif ri==1
recmf.B=[];
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_matern52.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_matern52.m
| 28,809 |
utf_8
|
1086d8b7cac3a06976c064521dc35ad8
|
function gpcf = gpcf_matern52(varargin)
%GPCF_MATERN52 Create a Matern nu=5/2 covariance function
%
% Description
% GPCF = GPCF_MATERN52('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates Matern nu=5/2 covariance function structure in which
% the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_MATERN52(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for Matern nu=5/2 covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_MATERN52';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_matern52';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_matern52')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_matern52_pak;
gpcf.fh.unpak = @gpcf_matern52_unpak;
gpcf.fh.lp = @gpcf_matern52_lp;
gpcf.fh.lpg = @gpcf_matern52_lpg;
gpcf.fh.cfg = @gpcf_matern52_cfg;
gpcf.fh.ginput = @gpcf_matern52_ginput;
gpcf.fh.cov = @gpcf_matern52_cov;
gpcf.fh.trcov = @gpcf_matern52_trcov;
gpcf.fh.trvar = @gpcf_matern52_trvar;
gpcf.fh.recappend = @gpcf_matern52_recappend;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_matern52_pak(gpcf, w)
%GPCF_MATERN52_PAK Combine GP covariance function hyper-parameters
% into one vector.
%
% Description
% W = GPCF_MATERN52_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_MATERN52_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(matern52.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wm sm] = gpcf.metric.fh.pak(gpcf.metric);
w = [w wm];
s = [s; sm];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(matern52.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(matern52.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_matern52_unpak(gpcf, w)
%GPCF_MATERN52_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_MATERN52_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical to
% the input, except that the covariance hyper-parameters have
% been set to the values in W. Deletes the values set to GPCF
% from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_MATERN52_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_matern52_lp(gpcf)
%GPCF_MATERN52_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_MATERN52_LP(GPCF, X, T) takes a covariance function
% structure GPCF together with a matrix X of input
% vectors and a vector T of target vectors and evaluates log
% p(th) x J, where th is a vector of MATERN52 parameters and J
% is the Jacobian of transformation exp(w) = th. (Note that
% the parameters are log transformed, when packed.) This is a
% mandatory subfunction used for example in energy computations.
%
% See also
% GPCF_MATERN52_PAK, GPCF_MATERN52_UNPAK, GPCF_MATERN52_LPG, GP_E
%
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_matern52_lpg(gpcf)
%GPCF_MATERN52_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_MATERN52_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_MATERN52_PAK, GPCF_MATERN52_UNPAK, GPCF_MATERN52_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_matern52_cfg(gpcf, x, x2, mask, i1)
%GPCF_MATERN52_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_MATERN52_CFG(GPCF, X) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X) with respect to th (cell array with matrix
% elements). This is a mandatory subfunction used for example
% in gradient computations.
%
% DKff = GPCF_MATERN52_CFG(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_MATERN52_CFG(GPCF, X, [], MASK)
% takes a covariance function structure GPCF, a matrix X
% of input vectors and returns DKff, the diagonal of gradients
% of covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed when
% using sparse approximations (e.g. FIC).
%
% DKff = GPCF_MATERN52_CFG(GPCF, X, X2, [], i) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_MATERN52_PAK, GPCF_MATERN52_UNPAK, GPCF_MATERN52_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_matern52_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
ma2 = gpcf.magnSigma2;
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = ma2.*(sqrt(5) + 10.*dist./3).*distg{i}.*exp(-sqrt(5).*dist);
DKff{ii1} = DKff{ii1} - ma2.*(1+sqrt(5).*dist+5.*dist.^2./3).*exp(-sqrt(5).*dist).*sqrt(5).*distg{i};
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
ii1=ii1-1;
i1=i1-1;
end
if ~isempty(gpcf.p.lengthScale)
ma2 = gpcf.magnSigma2;
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic MATERN52
s = 1./gpcf.lengthScale;
dist = 0;
for i=1:m
dist = dist + bsxfun(@minus,x(:,i),x(:,i)').^2;
end
D = ma2./3.*(5.*dist.*s^2 + 5.*sqrt(5.*dist).*dist.*s.^3).*exp(-sqrt(5.*dist).*s);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
dist=sqrt(dist);
for i=i1
D = ma2.*s(i).*((5+5.*sqrt(5).*dist)/3).*(bsxfun(@minus,x(:,i),x(:,i)')).^2.*exp(-sqrt(5).*dist);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_matern52 -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
ma2 = gpcf.magnSigma2;
DKff{ii1} = ma2.*(sqrt(5) + 10.*dist./3).*distg{i}.*exp(-sqrt(5).*dist);
DKff{ii1} = DKff{ii1} - ma2.*(1+sqrt(5).*dist+5.*dist.^2./3).*exp(-sqrt(5).*dist).*sqrt(5).*distg{i};
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
ii1=ii1-1;
i1=i1-1;
end
if ~isempty(gpcf.p.lengthScale)
% Evaluate help matrix for calculations of derivatives with respect
% to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic matern52
s = 1./gpcf.lengthScale;
ma2 = gpcf.magnSigma2;
dist = 0;
for i=1:m
dist = dist + bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
DK = ma2./3.*(5.*dist.*s^2 + 5.*sqrt(5.*dist).*dist.*s.^3).*exp(-sqrt(5.*dist).*s);
ii1 = ii1+1;
DKff{ii1} = DK;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
for i=i1
D1 = ma2.*exp(-sqrt(5.*dist)).*s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;;
DK = (5./3 + 5.*sqrt(5.*dist)/3).*D1;
ii1=ii1+1;
DKff{ii1} = DK;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_matern52_ginput(gpcf, x, x2, i1)
%GPCF_MATERN52_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_MATERN52_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MATERN52_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MATERN52_GINPUT(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith covariate
% in X. This subfunction is needed when using memory save option
% in gp_set.
%
% See also
% GPCF_MATERN52_PAK, GPCF_MATERN52_UNPAK, GPCF_MATERN52_LP, GP_G
[n, m] =size(x);
ma2 = gpcf.magnSigma2;
ii1 = 0;
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
if nargin == 2 || isempty(x2)
if isfield(gpcf,'metric')
K = gpcf.fh.trcov(gpcf, x);
dist = gpcf.metric.fh.dist(gpcf.metric, x);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
ma2 = gpcf.magnSigma2;
DKff{ii1} = ma2.*(sqrt(5) + 10.*dist./3).*gdist{i}.*exp(-sqrt(5).*dist);
DKff{ii1} = DKff{ii1} - ma2.*(1+sqrt(5).*dist+5.*dist.^2./3).*exp(-sqrt(5).*dist).*sqrt(5).*gdist{i};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x(:,i2)')).^2;
end
dist=sqrt(dist);
if ~savememory
i1=1:m;
end
for i=i1
for j = 1:n
D1 = zeros(n,n);
D1(j,:) = sqrt(s(i)).*bsxfun(@minus,x(j,i),x(:,i)');
D1 = D1 + D1';
DK = ma2.*(10/3 - 5 - 5.*sqrt(5).*dist./3).*exp(-sqrt(5).*dist).*D1;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3 || nargin == 4
if isfield(gpcf,'metric')
K = gpcf.fh.cov(gpcf, x, x2);
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
ma2 = gpcf.magnSigma2;
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = ma2.*(sqrt(5) + 10.*dist./3).*gdist{i}.*exp(-sqrt(5).*dist);
DKff{ii1} = DKff{ii1} - ma2.*(1+sqrt(5).*dist+5.*dist.^2./3).*exp(-sqrt(5).*dist).*sqrt(5).*gdist{i};
end
else
[n2, m2] =size(x2);
if length(gpcf.lengthScale) == 1
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x2(:,i2)')).^2;
end
dist=sqrt(dist);
ii1 = 0;
if ~savememory
i1=1:m;
end
for i=i1
for j = 1:n
D1 = zeros(n,n2);
D1(j,:) = sqrt(s(i)).*bsxfun(@minus,x(j,i),x2(:,i)');
DK = ma2.*(10/3 - 5 - 5.*sqrt(5).*dist./3).*exp(-sqrt(5).*dist).*D1;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function C = gpcf_matern52_cov(gpcf, x1, x2)
%GP_MATERN52_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_MATERN52_COV(GP, TX, X) takes in covariance function
% of a Gaussian process GP and two matrixes TX and X that
% contain input vectors to GP. Returns covariance matrix C.
% Every element ij of C contains covariance between inputs i
% in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
%
% See also
% GPCF_MATERN52_TRCOV, GPCF_MATERN52_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
ma2 = gpcf.magnSigma2;
dist = sqrt(5).*gpcf.metric.fh.dist(gpcf.metric, x1, x2);
dist(dist<eps) = 0;
C = ma2.*(1 + dist + dist.^2./3).*exp(-dist);
C(C<eps)=0;
else
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./gpcf.lengthScale.^2;
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist2=zeros(n1,n2);
for j=1:m1
dist2 = dist2 + s2(:,j).*(bsxfun(@minus,x1(:,j),x2(:,j)')).^2;
end
dist = sqrt(5.*dist2);
C = ma2.*(1 + dist + 5.*dist2./3).*exp(-dist);
end
C(C<eps)=0;
end
end
function C = gpcf_matern52_trcov(gpcf, x)
%GP_MATERN52_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_MATERN52_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_MATERN52_COV, GPCF_MATERN52_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
ma2 = gpcf.magnSigma2;
dist = sqrt(5).*gpcf.metric.fh.dist(gpcf.metric, x);
C = ma2.*(1 + dist + dist.^2./3).*exp(-dist);
else
% Try to use the C-implementation
C = trcov(gpcf,x);
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf, 'selectedVariable')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s2 = 1./(gpcf.lengthScale).^2;
if size(s2)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
% Here we take advantage of the
% symmetry of covariance matrix
C=zeros(n,n);
for i1=2:n
i1n=(i1-1)*n;
for i2=1:i1-1
ii=i1+(i2-1)*n;
for i3=1:m
C(ii)=C(ii)+s2(i3).*(x(i1,i3)-x(i2,i3)).^2; % the covariance function
end
C(i1n+i2)=C(ii);
end
end
dist = sqrt(5.*C);
C = ma2.*(1 + dist + 5.*C./3).*exp(-dist);
C(C<eps)=0;
end
end
end
function C = gpcf_matern52_trvar(gpcf, x)
%GP_MATERN52_TRVAR Evaluate training variance vector
%
% Description
% C = GP_MATERN52_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every element i
% of C contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
%
% See also
% GPCF_MATERN52_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_matern52_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_MATERN52_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF .
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_matern52';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_matern52_pak;
reccf.fh.unpak = @gpcf_matern52_unpak;
reccf.fh.e = @gpcf_matern52_lp;
reccf.fh.lpg = @gpcf_matern52_lpg;
reccf.fh.cfg = @gpcf_matern52_cfg;
reccf.fh.cov = @gpcf_matern52_cov;
reccf.fh.trcov = @gpcf_matern52_trcov;
reccf.fh.trvar = @gpcf_matern52_trvar;
reccf.fh.recappend = @gpcf_matern52_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
gpcf_exp.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_exp.m
| 27,016 |
utf_8
|
e1b8f5e6efd82d342321a33398df19b1
|
function gpcf = gpcf_exp(varargin)
%GPCF_EXP Create an exponential covariance function
%
% Description
% GPCF = GPCF_EXP('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates a
% exponential covariance function structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_EXP(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for exponential covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_EXP';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_exp';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_exp')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_exp_pak;
gpcf.fh.unpak = @gpcf_exp_unpak;
gpcf.fh.lp = @gpcf_exp_lp;
gpcf.fh.lpg = @gpcf_exp_lpg;
gpcf.fh.cfg = @gpcf_exp_cfg;
gpcf.fh.ginput = @gpcf_exp_ginput;
gpcf.fh.cov = @gpcf_exp_cov;
gpcf.fh.trcov = @gpcf_exp_trcov;
gpcf.fh.trvar = @gpcf_exp_trvar;
gpcf.fh.recappend = @gpcf_exp_recappend;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_exp_pak(gpcf)
%GPCF_EXP_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_EXP_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_EXP_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(exp.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wm sm] = gpcf.metric.fh.pak(gpcf.metric);
w = [w wm];
s = [s; sm];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(exp.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(exp.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_exp_unpak(gpcf, w)
%GPCF_EXP_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_EXP_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_EXP_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_exp_lp(gpcf)
%GPCF_EXP_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_EXP_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_EXP_PAK, GPCF_EXP_UNPAK, GPCF_EXP_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_exp_lpg(gpcf)
%GPCF_EXP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_EXP_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_EXP_PAK, GPCF_EXP_UNPAK, GPCF_EXP_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_exp_cfg(gpcf, x, x2, mask, i1)
%GPCF_EXP_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_EXP_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used for example in gradient computations.
%
% DKff = GPCF_EXP_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_EXP_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_EXP_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradient of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_EXP_PAK, GPCF_EXP_UNPAK, GPCF_EXP_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_exp_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -Cdm.*distg{i};
end
else
if isfield(gpcf,'selectedVariables');
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
ii1=ii1-1;
i1=i1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic EXP (no ARD)
s = 1./gpcf.lengthScale;
dist = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
D = Cdm.*s.*sqrt(dist);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
dist = 0;
dist2 = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
dist = sqrt(dist);
for i=i1
D = s(i).*Cdm.*(bsxfun(@minus,x(:,i),x(:,i)')).^2;
D(dist~=0) = D(dist~=0)./dist(dist~=0);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_exp -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -K.*distg{i};
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
ii1=ii1-1;
i1=i1-1;
end
if ~isempty(gpcf.p.lengthScale)
% Evaluate help matrix for calculations of derivatives with respect
% to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic EXP
s = 1./gpcf.lengthScale;
dist = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
DK_l = s.*K.*sqrt(dist);
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2; % set the length
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
dist = sqrt(dist);
for i=i1
D1 = s(i).*K.* bsxfun(@minus,x(:,i),x2(:,i)').^2;
D1(dist~=0) = D1(dist~=0)./dist(dist~=0);
ii1=ii1+1;
DKff{ii1} = D1;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
if ~isempty(gpcf.p.magnSigma2) || (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_exp_ginput(gpcf, x, x2, i1)
%GPCF_EXP_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_EXP_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_EXP_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_EXP_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_EXP_PAK, GPCF_EXP_UNPAK, GPCF_EXP_LP, GP_G
[n, m] =size(x);
ii1 = 0;
if nargin==4
% Use memory save option
if i1==0
% Return number of hyperparameters
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
i1=1:m;
end
if nargin == 2 || isempty(x2)
K = gpcf.fh.trcov(gpcf, x);
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*gdist{ii1};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic EXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x(:,i2)')).^2;
end
dist = sqrt(dist);
for i=i1
for j = 1:n
D1 = zeros(n,n);
D1(j,:) = -s(i).*bsxfun(@minus,x(j,i),x(:,i)');
D1 = D1 + D1';
D1(dist~=0) = D1(dist~=0)./dist(dist~=0);
DK = D1.*K;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3 || nargin == 4
[n2, m2] =size(x2);
K = gpcf.fh.cov(gpcf, x, x2);
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*gdist{ii1};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic EXP
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x2(:,i2)')).^2;
end
dist = sqrt(dist);
for i=i1
for j = 1:n
D1 = zeros(n,n2);
D1(j,:) = -s(i).*bsxfun(@minus,x(j,i),x2(:,i)');
D1(dist~=0) = D1(dist~=0)./dist(dist~=0);
DK = D1.*K;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function C = gpcf_exp_cov(gpcf, x1, x2)
%GP_EXP_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_EXP_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_EXP_TRCOV, GPCF_EXP_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x1, x2);
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*exp(-dist);
else
if isfield(gpcf,'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./gpcf.lengthScale.^2;
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist=zeros(n1,n2);
for j=1:m1
dist = dist + s2(j).*(bsxfun(@minus,x1(:,j),x2(:,j)')).^2;
end
C = ma2.*exp(-sqrt(dist));
end
C(C<eps)=0;
end
end
function C = gpcf_exp_trcov(gpcf, x)
%GP_EXP_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_EXP_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX.
% This is a mandatory subfunction used for example in prediction
% and energy computations.
%
%
% See also
% GPCF_EXP_COV, GPCF_EXP_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
dist = gpcf.metric.fh.dist(gpcf.metric, x);
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*exp(-dist);
else
% If scaled euclidean metric
% Try to use the C-implementation
C = trcov(gpcf, x);
% C = NaN;
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
% Here we take advantage of the
% symmetry of covariance matrix
C=zeros(n,n);
for i1=2:n
i1n=(i1-1)*n;
for i2=1:i1-1
ii=i1+(i2-1)*n;
for i3=1:m
C(ii)=C(ii)+s2(i3).*(x(i1,i3)-x(i2,i3)).^2; % the covariance function
end
C(i1n+i2)=C(ii);
end
end
C = ma2.*exp(-sqrt(C));
C(C<eps)=0;
end
end
end
function C = gpcf_exp_trvar(gpcf, x)
%GP_EXP_TRVAR Evaluate training variance vector
%
% Description
% C = GP_EXP_TRVAR(GPCF, TX) takes in covariance function of a
% Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_EXP_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_exp_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_EXP_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_exp';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_exp_pak;
reccf.fh.unpak = @gpcf_exp_unpak;
reccf.fh.e = @gpcf_exp_lp;
reccf.fh.lpg = @gpcf_exp_lpg;
reccf.fh.cfg = @gpcf_exp_cfg;
reccf.fh.cov = @gpcf_exp_cov;
reccf.fh.trcov = @gpcf_exp_trcov;
reccf.fh.trvar = @gpcf_exp_trvar;
reccf.fh.recappend = @gpcf_exp_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnbeapp/beapp-master
|
SuiteSparse_install.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/SuiteSparse_install.m
| 11,112 |
utf_8
|
fb32defab8ad43d072b65d41d20fb41f
|
function paths = SuiteSparse_install (do_demo)
%SuiteSparse_install: compiles and installs all of SuiteSparse
% A Suite of Sparse matrix packages, authored or co-authored by Tim Davis, Univ.
% Florida. You must be in the same directory as SuiteSparse_install to use this.
%
% Packages in SuiteSparse:
%
% UMFPACK sparse LU factorization (multifrontal)
% CHOLMOD sparse Cholesky factorization, and many other operations
% AMD sparse symmetric approximate minimum degree ordering
% COLAMD sparse column approximate minimum degree ordering
% CAMD constrained AMD
% CCOLAMD constrained COLAMD
% CSparse a Concise Sparse matrix package (32-bit/real only)
% CXSparse extended version of CSparse (32-bit/64-bit/real/complex)
% UFget interface to UF Sparse Matrix Collection (MATLAB 7.0 or later)
% KLU sparse LU factorization (left-looking)
% BTF permutation to block triangular form (like dmperm)
% LDL sparse LDL' factorization
% UFcollection tools for managing the UF Sparse Matrix Collection
% RBio read/write Rutherford/Boeing files (requires Fortran compiler)
% SSMULT sparse matrix times sparse matrix
% MESHND 2D and 3D regular mesh generation and nested dissection
% LINFACTOR illustrates the use of LU and CHOL (MATLAB 7.3 or later)
% MATLAB_Tools various simple m-files and demos
% SuiteSparseQR sparse QR factorization
%
% CXSparse is installed in place of CSparse; cd to CSparse/MATLAB and type
% cs_install if you wish to use the latter. Since Microsoft Windows does not
% support ANSI C99, CXSparse does not support complex matrices on Windows.
%
% Except where noted, all packages work on MATLAB 6.1 or later. They have not
% been tested on earlier versions, but they might work there. Please let me
% know if you try SuiteSparse on MATLAB 6.0 or earlier, whether it works or not.
%
% Example:
% SuiteSparse_install
% help SuiteSparse % for more details
%
% See also AMD, COLAMD, CAMD, CCOLAMD, CHOLMOD, UMFPACK, CSPARSE, CXSPARSE,
% UFget, RBio, UFcollection, KLU, BTF, MESHND, SSMULT, LINFACTOR,
% SuiteSparse, SPQR, PATHTOOL, PATH.
% Copyright 1990-2008, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
% In collaboration with Patrick Amestoy, Yanqing Chen, Iain Duff, John Gilbert,
% Steve Hadfield, Bill Hager, Stefan Larimore, Esmond Ng, Eka Palamadai, and
% Siva Rajamanickam.
paths = { } ;
SuiteSparse = pwd ;
% add MATLAB_Tools to the path (for getversion)
cd ([SuiteSparse '/MATLAB_Tools']) ;
paths = add_to_path (paths, pwd) ;
cd (SuiteSparse) ;
% determine the MATLAB version (6.1, 6.5, 7.0, ...)
v = getversion ;
pc = ispc ;
% check if METIS 4.0.1 is present where it's supposed to be
have_metis = exist ('metis-4.0', 'dir') ;
if (~have_metis)
fprintf ('SPQR, CHOLMOD, and KLU optionally use METIS 4.0.1. Download\n') ;
fprintf ('it from http://glaros.dtc.umn.edu/gkhome/views/metis\n');
fprintf ('and place the metis-4.0 directory in this directory.\n') ;
input ('or hit enter to continue without METIS: ', 's') ;
fprintf ('Now compiling without METIS...\n\n') ;
end
% print the introduction
help SuiteSparse_install
fprintf ('MATLAB version %g (%s)\n', v, version) ;
% add SuiteSparse to the path
fprintf ('\nPlease wait while SuiteSparse is compiled and installed...\n') ;
paths = add_to_path (paths, SuiteSparse) ;
% compile and install UMFPACK
try
cd ([SuiteSparse '/UMFPACK/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
umfpack_make
catch %#ok
disp (lasterr) ;
try
fprintf ('Trying to install with lcc_lib/libmwlapack.lib instead\n') ;
umfpack_make ('lcc_lib/libmwlapack.lib') ;
catch %#ok
disp (lasterr) ;
fprintf ('UMFPACK not installed\n') ;
end
end
% compile and install CHOLMOD
try
% determine whether or not to compile CHOLMOD
cd ([SuiteSparse '/CHOLMOD/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
if (have_metis)
cholmod_make
else
cholmod_make ('no metis') ;
end
catch %#ok
disp (lasterr) ;
fprintf ('CHOLMOD not installed\n') ;
end
% compile and install AMD
try
cd ([SuiteSparse '/AMD/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
amd_make
catch %#ok
disp (lasterr) ;
fprintf ('AMD not installed\n') ;
end
% compile and install COLAMD
try
cd ([SuiteSparse '/COLAMD/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
colamd_make
catch %#ok
disp (lasterr) ;
fprintf ('COLAMD not installed\n') ;
end
% compile and install CCOLAMD
try
cd ([SuiteSparse '/CCOLAMD/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
ccolamd_make
catch %#ok
disp (lasterr) ;
fprintf ('CCOLAMD not installed\n') ;
end
% compile and install CAMD
try
cd ([SuiteSparse '/CAMD/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
camd_make
catch %#ok
disp (lasterr) ;
fprintf ('CAMD not installed\n') ;
end
% compile and install CXSparse and UFget
try
cd ([SuiteSparse '/CXSparse/MATLAB/CSparse']) ;
paths = add_to_path (paths, [SuiteSparse '/CXSparse/MATLAB/CSparse']) ;
paths = add_to_path (paths, [SuiteSparse '/CXSparse/MATLAB/Demo']) ;
if (v >= 7.0)
paths = add_to_path (paths, [SuiteSparse '/CXSparse/MATLAB/UFget']) ;
fprintf ('UFget installed successfully\n') ;
else
fprintf ('UFget skipped; requires MATLAB 7.0 or later\n') ;
end
if (pc)
% Windows does not support ANSI C99 complex, which CXSparse requires
fprintf ('Compiling CXSparse without complex support\n') ;
cs_make (1, 0) ;
else
cs_make (1) ;
end
catch %#ok
disp (lasterr) ;
fprintf ('CXSparse not installed\n') ;
end
% compile and install LDL
try
cd ([SuiteSparse '/LDL/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
ldl_make
catch %#ok
disp (lasterr) ;
fprintf ('LDL not installed\n') ;
end
% compile and install BTF
try
cd ([SuiteSparse '/BTF/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
btf_make
catch %#ok
disp (lasterr) ;
fprintf ('BTF not installed\n') ;
end
% compile and install KLU
try
cd ([SuiteSparse '/KLU/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
klu_make (have_metis) ;
catch %#ok
disp (lasterr) ;
fprintf ('KLU not installed\n') ;
end
% compile and install SSMULT
try
cd ([SuiteSparse '/SSMULT']) ;
paths = add_to_path (paths, pwd) ;
ssmult_install (0) ;
catch %#ok
disp (lasterr) ;
fprintf ('SSMULT not installed\n') ;
end
% compile and install UFcollection
try
% do not try to compile with large-file I/O for MATLAB 6.5 or earlier
cd ([SuiteSparse '/UFcollection']) ;
paths = add_to_path (paths, pwd) ;
UFcollection_install (v < 7.0) ;
catch %#ok
disp (lasterr) ;
fprintf ('UFcollection not installed\n') ;
end
% install LINFACTOR, MESHND, MATLAB_Tools/*
try
cd ([SuiteSparse '/MATLAB_Tools/Factorize']) ;
paths = add_to_path (paths, pwd) ;
cd ([SuiteSparse '/MESHND']) ;
paths = add_to_path (paths, pwd) ;
if (v > 7.2)
% LINFACTOR requires MATLAB 7.3 or later
cd ([SuiteSparse '/LINFACTOR']) ;
paths = add_to_path (paths, pwd) ;
fprintf ('LINFACTOR installed\n') ;
end
cd ([SuiteSparse '/MATLAB_Tools/find_components']) ;
paths = add_to_path (paths, pwd) ;
cd ([SuiteSparse '/MATLAB_Tools/GEE']) ;
paths = add_to_path (paths, pwd) ;
cd ([SuiteSparse '/MATLAB_Tools/shellgui']) ;
paths = add_to_path (paths, pwd) ;
cd ([SuiteSparse '/MATLAB_Tools/waitmex']) ;
paths = add_to_path (paths, pwd) ;
cd ([SuiteSparse '/MATLAB_Tools/spok']) ;
paths = add_to_path (paths, pwd) ;
mex spok.c spok_mex.c
fprintf ('LINFACTOR, MESHND, MATLAB_Tools installed\n') ;
catch %#ok
disp (lasterr) ;
fprintf ('LINFACTOR, MESHND, and/or MATLAB_Tools not installed\n') ;
end
% compile and install SuiteSparseQR
try
if (pc)
fprintf ('Note that SuiteSparseQR will not compile with the lcc\n') ;
fprintf ('compiler provided with MATLAB on Windows\n') ;
end
cd ([SuiteSparse '/SPQR/MATLAB']) ;
paths = add_to_path (paths, pwd) ;
if (have_metis)
spqr_make
else
spqr_make ('no metis') ;
end
catch %#ok
disp (lasterr) ; %#ok
fprintf ('SuiteSparseQR not installed\n') ;
end
% compile and install RBio (not on Windows ... no default Fortran compiler)
if (~pc)
try
cd ([SuiteSparse '/RBio']) ;
RBmake
paths = add_to_path (paths, pwd) ;
catch %#ok
disp (lasterr) ; %#ok
fprintf ('RBio not installed (Fortran compiler required).\n') ;
end
end
% post-install wrapup
cd (SuiteSparse)
fprintf ('SuiteSparse is now installed.\n') ;
if (nargin < 1)
% ask if demo should be run
y = input ('Hit enter to run the SuiteSparse demo (or "n" to quit): ', 's') ;
if (isempty (y))
y = 'y' ;
end
do_demo = (y (1) ~= 'n') ;
end
if (do_demo)
try
SuiteSparse_demo ;
catch %#ok
disp (lasterr) ;
fprintf ('SuiteSparse demo failed\n') ;
end
end
fprintf ('\nSuiteSparse installation is complete. The following paths\n') ;
fprintf ('have been added for this session. Use pathtool to add them\n') ;
fprintf ('permanently. If you cannot save the new path because of file\n');
fprintf ('permissions, then add these commands to your startup.m file.\n') ;
fprintf ('Type "doc startup" and "doc pathtool" for more information.\n\n') ;
for k = 1:length (paths)
fprintf ('addpath %s\n', paths {k}) ;
end
cd (SuiteSparse)
fprintf ('\nSuiteSparse for MATLAB %g installation complete\n', getversion) ;
%-------------------------------------------------------------------------------
function paths = add_to_path (paths, newpath)
% add a path
addpath (newpath) ;
paths = [paths { newpath } ] ; %#ok
|
github
|
lcnbeapp/beapp-master
|
colamd_test.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/COLAMD/MATLAB/colamd_test.m
| 11,715 |
utf_8
|
67855282bea237b3f5e5aaead650f90e
|
function colamd_test
%COLAMD_TEST test colamd2 and symamd2
% Example:
% colamd_test
%
% COLAMD and SYMAMD testing function. Here we try to give colamd2 and symamd2
% every possible type of matrix and erroneous input that they may encounter.
% We want either a valid permutation returned or we want them to fail
% gracefully.
%
% You are prompted as to whether or not the colamd2 and symand routines and
% the test mexFunctions are to be compiled.
%
% See also colamd2, symamd2
% Copyright 1998-2007, Timothy A. Davis, and Stefan Larimore
% Developed in collaboration with J. Gilbert and E. Ng.
help colamd_test
fprintf ('Compiling colamd2, symamd2, and test mexFunctions.\n') ;
colamd_make ;
d = '' ;
if (~isempty (strfind (computer, '64')))
d = '-largeArrayDims' ;
end
cmd = sprintf ('mex -DDLONG -O %s -I../../UFconfig -I../Include ', d) ;
src = '../Source/colamd.c ../Source/colamd_global.c' ;
eval ([cmd 'colamdtestmex.c ' src]) ;
eval ([cmd 'symamdtestmex.c ' src]) ;
fprintf ('Done compiling.\n') ;
fprintf ('\nThe following codes will be tested:\n') ;
which colamd2
which symamd2
which colamd2mex
which symamd2mex
which colamdtestmex
which symamdtestmex
fprintf ('\nStarting the tests. Please be patient.\n') ;
h = waitbar (0, 'COLAMD test') ;
rand ('state', 0) ;
randn ('state', 0) ;
A = sprandn (500,500,0.4) ;
p = colamd2 (A, [10 10 1]) ; check_perm (p, A) ;
p = colamd2 (A, [2 7 1]) ; check_perm (p, A) ;
p = symamd2 (A, [10 1]) ; check_perm (p, A) ;
p = symamd2 (A, [7 1]) ; check_perm (p, A) ;
p = symamd2 (A, [4 1]) ; check_perm (p, A) ;
fprintf ('Null matrices') ;
A = zeros (0,0) ;
A = sparse (A) ;
[p, stats] = colamd2 (A, [10 10 0]) ; %#ok
check_perm (p, A) ;
[p, stats] = symamd2 (A, [10 0]) ; %#ok
check_perm (p, A) ;
A = zeros (0, 100) ;
A = sparse (A) ;
[p, stats] = colamd2 (A, [10 10 0]) ; %#ok
check_perm (p, A) ;
A = zeros (100, 0) ;
A = sparse (A) ;
[p, stats] = colamd2 (A, [10 10 0]) ;
check_perm (p, A) ;
fprintf (' OK\n') ;
fprintf ('Matrices with a few dense row/cols\n') ;
for trial = 1:20
waitbar (trial/20, h, 'COLAMD: with dense rows/cols') ;
% random square unsymmetric matrix
A = rand_matrix (1000, 1000, 1, 10, 20) ;
for tol = [0:.1:2 3:20 1e6]
[p, stats] = colamd2 (A, [tol tol 0]) ; %#ok
check_perm (p, A) ;
B = A + A' ;
[p, stats] = symamd2 (B, [tol 0]) ; %#ok
check_perm (p, A) ;
[p, stats] = colamd2 (A, [tol 1 0]) ; %#ok
check_perm (p, A) ;
[p, stats] = colamd2 (A, [1 tol 0]) ; %#ok
check_perm (p, A) ;
end
end
fprintf (' OK\n') ;
fprintf ('General matrices\n') ;
for trial = 1:400
waitbar (trial/400, h, 'COLAMD: general') ;
% matrix of random mtype
mtype = irand (3) ;
A = rand_matrix (2000, 2000, mtype, 0, 0) ;
p = colamd2 (A) ;
check_perm (p, A) ;
if (mtype == 3)
p = symamd2 (A) ;
check_perm (p, A) ;
end
end
fprintf (' OK\n') ;
fprintf ('Test error handling with invalid inputs\n') ;
% Check different erroneous input.
for trial = 1:30
waitbar (trial/30, h, 'COLAMD: error handling') ;
A = rand_matrix (1000, 1000, 2, 0, 0) ;
[m n] = size (A) ;
for err = 1:13
p = Tcolamd (A, [n n 0 0 err]) ;
if (p ~= -1) %#ok
check_perm (p, A) ;
end
if (err == 1)
% check different (valid) input args to colamd2
p = Acolamd (A) ;
p2 = Acolamd (A, [10 10 0 0 0]) ;
if (any (p ~= p2))
error ('colamd2: mismatch 1!') ;
end
[p2 stats] = Acolamd (A) ; %#ok
if (any (p ~= p2))
error ('colamd2: mismatch 2!') ;
end
[p2 stats] = Acolamd (A, [10 10 0 0 0]) ;
if (any (p ~= p2))
error ('colamd2: mismatch 3!') ;
end
end
B = A'*A ;
p = Tsymamd (B, [n 0 err]) ;
if (p ~= -1) %#ok
check_perm (p, A) ;
end
if (err == 1)
% check different (valid) input args to symamd2
p = Asymamd (B) ;
check_perm (p, A) ;
p2 = Asymamd (B, [10 0 0]) ;
if (any (p ~= p2))
error ('symamd2: mismatch 1!') ;
end
[p2 stats] = Asymamd (B) ; %#ok
if (any (p ~= p2))
error ('symamd2: mismatch 2!') ;
end
[p2 stats] = Asymamd (B, [10 0 0]) ; %#ok
if (any (p ~= p2))
error ('symamd2: mismatch 3!') ;
end
end
end
end
fprintf (' OK\n') ;
fprintf ('Matrices with a few empty columns\n') ;
for trial = 1:400
% some are square, some are rectangular
n = 0 ;
while (n < 5)
A = rand_matrix (1000, 1000, irand (2), 0, 0) ;
[m n] = size (A) ;
end
% Add 5 null columns at random locations.
null_col = randperm (n) ;
null_col = sort (null_col (1:5)) ;
A (:, null_col) = 0 ;
% Order the matrix and make sure that the null columns are ordered last.
[p, stats] = colamd2 (A, [1e6 1e6 0]) ;
check_perm (p, A) ;
% if (stats (2) ~= 5)
% stats (2)
% error ('colamd2: wrong number of null columns') ;
% end
% find all null columns in A
null_col = find (sum (spones (A), 1) == 0) ;
nnull = length (null_col) ; %#ok
if (any (null_col ~= p ((n-4):n)))
error ('colamd2: Null cols are not ordered last in natural order') ;
end
end
fprintf (' OK\n') ;
fprintf ('Matrices with a few empty rows and columns\n') ;
for trial = 1:400
waitbar (trial/400, h, 'COLAMD: with empty rows/cols') ;
% symmetric matrices
n = 0 ;
while (n < 5)
A = rand_matrix (1000, 1000, 3, 0, 0) ;
[m n] = size (A) ;
end
% Add 5 null columns and rows at random locations.
null_col = randperm (n) ;
null_col = sort (null_col (1:5)) ;
A (:, null_col) = 0 ;
A (null_col, :) = 0 ;
% Order the matrix and make sure that the null rows/cols are ordered last.
[p,stats] = symamd2 (A, [10 0]) ;
check_perm (p, A) ;
% find actual number of null rows and columns
Alo = tril (A, -1) ;
nnull = length (find (sum (Alo') == 0 & sum (Alo) == 0)) ; %#ok
if (stats (2) ~= nnull | nnull < 5) %#ok
error ('symamd2: wrong number of null columns') ;
end
if (any (null_col ~= p ((n-4):n)))
error ('symamd2: Null cols are not ordered last in natural order') ;
end
end
fprintf (' OK\n') ;
fprintf ('Matrices with a few empty rows\n') ;
% Test matrices with null rows inserted.
for trial = 1:400
waitbar (trial/400, h, 'COLAMD: with null rows') ;
m = 0 ;
while (m < 5)
A = rand_matrix (1000, 1000, 2, 0, 0) ;
[m n] = size (A) ; %#ok
end
% Add 5 null rows at random locations.
null_row = randperm (m) ;
null_row = sort (null_row (1:5)) ;
A (null_row, :) = 0 ;
p = colamd2 (A, [10 10 0]) ;
check_perm (p, A) ;
if (stats (1) ~= 5)
error ('colamd2: wrong number of null rows') ;
end
end
fprintf (' OK\n') ;
fprintf ('\ncolamd2 and symamd2: all tests passed\n\n') ;
close (h) ;
%-------------------------------------------------------------------------------
function [p,stats] = Acolamd (S, knobs)
% Acolamd: compare colamd2 and Tcolamd results
if (nargin < 3)
if (nargout == 1)
[p] = colamd2 (S) ;
[p1] = Tcolamd (S, [10 10 0 0 0]) ;
else
[p, stats] = colamd2 (S) ;
[p1, stats1] = Tcolamd (S, [10 10 0 0 0]) ; %#ok
end
else
if (nargout == 1)
[p] = colamd2 (S, knobs (1:3)) ;
[p1] = Tcolamd (S, knobs) ;
else
[p, stats] = colamd2 (S, knobs (1:3)) ;
[p1, stats1] = Tcolamd (S, knobs) ; %#ok
end
end
check_perm (p, S) ;
check_perm (p1, S) ;
if (any (p1 ~= p))
error ('Acolamd mismatch!') ;
end
%-------------------------------------------------------------------------------
function [p,stats] = Asymamd (S, knobs)
% Asymamd: compare symamd2 and Tsymamd results
if (nargin < 3)
if (nargout == 1)
[p] = symamd2 (S) ;
[p1] = Tsymamd (S, [10 0 0]) ;
else
[p, stats] = symamd2 (S) ;
[p1, stats1] = Tsymamd (S, [10 0 0]) ; %#ok
end
else
if (nargout == 1)
[p] = symamd2 (S, knobs (1:2)) ;
[p1] = Tsymamd (S, knobs) ;
else
[p, stats] = symamd2 (S, knobs (1:2)) ;
[p1, stats1] = Tsymamd (S, knobs) ; %#ok
end
end
if (any (p1 ~= p))
error ('Asymamd mismatch!') ;
end
%-------------------------------------------------------------------------------
function check_perm (p, A)
% check_perm: check for a valid permutation vector
if (isempty (A) & isempty (p)) %#ok
% empty permutation vectors of empty matrices are OK
return
end
if (isempty (p))
error ('bad permutation: cannot be empty') ;
end
[m n] = size (A) ;
[pm pn] = size (p) ;
if (pn == 1)
% force p to be a row vector
p = p' ;
[pm pn] = size (p) ;
end
if (n ~= pn)
error ('bad permutation: wrong size') ;
end
if (pm ~= 1) ;
% p must be a vector
error ('bad permutation: not a vector') ;
else
if (any (sort (p) - (1:pn)))
error ('bad permutation') ;
end
end
%-------------------------------------------------------------------------------
function i = irand (n)
% irand: return a random integer between 1 and n
i = min (n, 1 + floor (rand * n)) ;
%-------------------------------------------------------------------------------
function A = rand_matrix (nmax, mmax, mtype, drows, dcols)
% rand_matrix: return a random sparse matrix
%
% A = rand_matrix (nmax, mmax, mtype, drows, dcols)
%
% A binary matrix of random size, at most nmax-by-mmax, with drows dense rows
% and dcols dense columns.
%
% mtype 1: square unsymmetric (mmax is ignored)
% mtype 2: rectangular
% mtype 3: symmetric (mmax is ignored)
n = irand (nmax) ;
if (mtype ~= 2)
% square
m = n ;
else
m = irand (mmax) ;
end
A = sprand (m, n, 10 / max (m,n)) ;
if (drows > 0)
% add dense rows
for k = 1:drows
i = irand (m) ;
nz = irand (n) ;
p = randperm (n) ;
p = p (1:nz) ;
A (i,p) = 1 ;
end
end
if (dcols > 0)
% add dense cols
for k = 1:dcols
j = irand (n) ;
nz = irand (m) ;
p = randperm (m) ;
p = p (1:nz) ;
A (p,j) = 1 ;
end
end
A = spones (A) ;
% ensure that there are no empty columns
d = find (full (sum (A)) == 0) ; %#ok
A (m,d) = 1 ; %#ok
% ensure that there are no empty rows
d = find (full (sum (A,2)) == 0) ; %#ok
A (d,n) = 1 ; %#ok
if (mtype == 3)
% symmetric
A = A + A' + speye (n) ;
end
A = spones (A) ;
%-------------------------------------------------------------------------------
function [p,stats] = Tcolamd (S, knobs)
% Tcolamd: run colamd2 in a testing mode
if (nargout <= 1 & nargin == 1) %#ok
p = colamdtestmex (S) ;
elseif (nargout <= 1 & nargin == 2) %#ok
p = colamdtestmex (S, knobs) ;
elseif (nargout == 2 & nargin == 1) %#ok
[p, stats] = colamdtestmex (S) ;
elseif (nargout == 2 & nargin == 2) %#ok
[p, stats] = colamdtestmex (S, knobs) ;
else
error ('colamd2: incorrect number of input and/or output arguments') ;
end
if (p (1) ~= -1)
[ignore, q] = etree (S (:,p), 'col') ;
p = p (q) ;
check_perm (p, S) ;
end
%-------------------------------------------------------------------------------
function [p, stats] = Tsymamd (S, knobs)
% Tsymamd: run symamd2 in a testing mode
if (nargout <= 1 & nargin == 1) %#ok
p = symamdtestmex (S) ;
elseif (nargout <= 1 & nargin == 2) %#ok
p = symamdtestmex (S, knobs) ;
elseif (nargout == 2 & nargin == 1) %#ok
[p, stats] = symamdtestmex (S) ;
elseif (nargout == 2 & nargin == 2) %#ok
[p, stats] = symamdtestmex (S, knobs) ;
else
error ('symamd2: incorrect number of input and/or output arguments') ;
end
if (p (1) ~= -1)
[ignore, q] = etree (S (p,p)) ;
p = p (q) ;
check_perm (p, S) ;
end
|
github
|
lcnbeapp/beapp-master
|
ccolamd_test.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CCOLAMD/MATLAB/ccolamd_test.m
| 11,803 |
utf_8
|
2cf7c488966fe7d5987b4062e6f6a1ca
|
function ccolamd_test
%CCOLAMD_TEST extensive test of ccolamd and csymamd
%
% Example:
% ccolamd_test
%
% See also csymamd, ccolamd, ccolamd_make.
% Copyright 1998-2007, Timothy A. Davis, Stefan Larimore, and Siva Rajamanickam
% Developed in collaboration with J. Gilbert and E. Ng.
help ccolamd_test
global ccolamd_default_knobs csymamd_default_knobs
ccolamd_default_knobs = [0 10 10 1 0] ;
csymamd_default_knobs = [10 1 0] ;
fprintf ('Compiling ccolamd, csymamd, and test mexFunctions.\n') ;
ccolamd_make ;
d = '' ;
if (~isempty (strfind (computer, '64')))
d = '-largeArrayDims' ;
end
src = '../Source/ccolamd.c ../Source/ccolamd_global.c' ;
cmd = sprintf ('mex -DDLONG -O %s -I../../UFconfig -I../Include ', d) ;
eval ([cmd 'ccolamdtestmex.c ' src]) ;
eval ([cmd 'csymamdtestmex.c ' src]) ;
fprintf ('Done compiling.\n') ;
fprintf ('\nThe following codes will be tested:\n') ;
which ccolamd
which csymamd
which ccolamdtestmex
which csymamdtestmex
fprintf ('\nStarting the tests. Please be patient.\n') ;
h = waitbar (0, 'COLAMD test') ;
rand ('state', 0) ;
randn ('state', 0) ;
A = sprandn (500,500,0.4) ;
p = ccolamd (A, [0 10 10 1 1]) ; check_perm (p, A) ;
p = ccolamd (A, [1 2 7 1 1]) ; check_perm (p, A) ;
p = ccolamd (A, [1 2 10 0 1]) ; check_perm (p, A) ;
p = ccolamd (A, [9 2 3 1 1]) ; check_perm (p, A) ;
p = csymamd (A, [10 1 1]) ; check_perm (p, A) ;
p = csymamd (A, [4 1 1]) ; check_perm (p, A) ;
p = csymamd (A, [9 0 1]) ; check_perm (p, A) ;
fprintf ('Null matrices') ;
A = zeros (0,0) ;
A = sparse (A) ;
p = ccolamd (A) ;
check_perm (p, A) ;
p = csymamd (A) ;
check_perm (p, A) ;
A = zeros (0, 100) ;
A = sparse (A) ;
p = ccolamd (A) ;
check_perm (p, A) ;
A = zeros (100, 0) ;
A = sparse (A) ;
p = ccolamd (A) ;
check_perm (p, A) ;
fprintf (' OK\n') ;
fprintf ('Matrices with a few dense row/cols\n') ;
for trial = 1:20
waitbar (trial/20, h, 'CCOLAMD: dense rows/cols') ;
% random square unsymmetric matrix
A = rand_matrix (1000, 1000, 1, 10, 20) ;
[m n] = size (A) ;
cmember = irand (min (trial,n), n) ;
for tol = [0:.1:2 3:20 1e6]
B = A + A' ;
p = ccolamd (A, [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [1 tol tol 1]) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol tol 1]) ; check_perm (p, A) ;
p = ccolamd (A, [1 tol tol 0]) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol tol 1]) ; check_perm (p, A) ;
p = csymamd (A, [tol 1]) ; check_perm (p, A) ;
p = csymamd (A, tol) ; check_perm (p, A) ;
p = csymamd (A, [ ]) ; check_perm (p, A) ;
p = csymamd (B, [tol 0]) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol -1 1]) ; check_perm (p, A) ;
p = ccolamd (A, [0 -1 tol 1]) ; check_perm (p, A) ;
% check with non-null cmember
p = ccolamd (A, [ ], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [1 tol tol 1], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol tol 1], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [1 tol tol 0], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol tol 1], cmember) ; check_perm (p, A) ;
p = csymamd (A, [tol 1], cmember) ; check_perm (p, A) ;
p = csymamd (A, tol, cmember) ; check_perm (p, A) ;
p = csymamd (A, [ ], cmember) ; check_perm (p, A) ;
p = csymamd (B, [tol 0], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol -1 1], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [0 -1 tol 1], cmember) ; check_perm (p, A) ;
p = ccolamd (A, [ ], [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [1 tol tol 1], [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol tol 1], [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [1 tol tol 0], [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol tol 1], [ ]) ; check_perm (p, A) ;
p = csymamd (A, [tol 1], [ ]) ; check_perm (p, A) ;
p = csymamd (A, tol, [ ]) ; check_perm (p, A) ;
p = csymamd (A, [ ], [ ]) ; check_perm (p, A) ;
p = csymamd (B, [tol 0], [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [0 tol -1 1], [ ]) ; check_perm (p, A) ;
p = ccolamd (A, [0 -1 tol 1], [ ]) ; check_perm (p, A) ;
end
end
fprintf (' OK\n') ;
fprintf ('General matrices\n') ;
for trial = 1:400
waitbar (trial/400, h, 'CCOLAMD: with dense rows/cols') ;
% matrix of random mtype
mtype = irand (3) ;
A = rand_matrix (2000, 2000, mtype, 0, 0) ;
p = ccolamd (A) ;
check_perm (p, A) ;
if (mtype == 3)
p = csymamd (A) ;
check_perm (p, A) ;
end
end
fprintf (' OK\n') ;
fprintf ('Test error handling with invalid inputs\n') ;
% Check different erroneous input.
for trial = 1:30
waitbar (trial/30, h, 'CCOLAMD: error handling') ;
A = rand_matrix (1000, 1000, 2, 0, 0) ;
for err = 1:13
p = Tcolamd (A, [ccolamd_default_knobs 1 err], [ ]) ;
if (p(1) ~= -1) %#ok
check_perm (p, A) ;
end
if (err == 1)
% check different (valid) input args to ccolamd
p = Acolamd (A) ;
p2 = Acolamd (A, [ccolamd_default_knobs 0 0]) ;
if (any (p ~= p2))
error ('ccolamd: mismatch 1!') ;
end
end
B = A'*A ;
p = Tsymamd (B, [-1 1 0 err], [ ]) ;
if (p(1) ~= -1) %#ok
check_perm (p, A) ;
end
if (err == 1)
% check different (valid) input args to csymamd
p = Asymamd (B) ;
check_perm (p, A) ;
p2 = Asymamd (B, [csymamd_default_knobs 0]) ;
if (any (p ~= p2))
error ('symamd: mismatch 1!') ;
end
end
end
end
fprintf (' OK\n') ;
fprintf ('Matrices with a few empty columns\n') ;
for trial = 1:400
waitbar (trial/400, h, 'CCOLAMD: with empty rows/cols') ;
% some are square, some are rectangular
n = 0 ;
while (n < 5)
A = rand_matrix (1000, 1000, irand (2), 0, 0) ;
[m n] = size (A) ;
end
% Add 5 null columns at random locations.
null_col = randperm (n) ;
A (:, null_col) = 0 ;
% Order the matrix and make sure that the null columns are ordered last.
p = ccolamd (A, [1 1e6 1e6 0]) ;
check_perm (p, A) ;
% find all null columns in A
null_col = find (sum (spones (A), 1) == 0) ;
nnull = length (null_col) ;
if (any (null_col ~= p ((n-nnull+1):n)))
error ('ccolamd: Null cols are not ordered last in natural order') ;
end
end
fprintf (' OK\n') ;
fprintf ('Matrices with a few empty rows and columns\n') ;
for trial = 1:400
waitbar (trial/400, h, 'CCOLAMD: with empty rows/cols') ;
% symmetric matrices
n = 0 ;
while (n < 5)
A = rand_matrix (1000, 1000, 3, 0, 0) ;
[m n] = size (A) ;
end
% Add 5 null columns and rows at random locations.
null_col = randperm (n) ;
A (:, null_col) = 0 ;
A (null_col, :) = 0 ;
% Order the matrix and make sure that the null rows/cols are ordered last.
p = csymamd (A, -1) ;
check_perm (p, A) ;
% find all null rows/columns in A
Alo = tril (A, -1) ;
null_col = ...
find ((sum (spones (Alo), 1) == 0) & (sum (spones (Alo), 2) == 0)') ;
nnull = length (null_col) ;
if (any (null_col ~= p ((n-nnull+1):n)))
error ('csymamd: Null cols are not ordered last in natural order') ;
end
end
fprintf (' OK\n') ;
fprintf ('Matrices with a few empty rows\n') ;
% Test matrices with null rows inserted.
for trial = 1:400
waitbar (trial/400, h, 'CCOLAMD: with null rows') ;
m = 0 ;
while (m < 5)
A = rand_matrix (1000, 1000, 2, 0, 0) ;
m = size (A,1) ;
end
% Add 5 null rows at random locations.
null_row = randperm (m) ;
null_row = sort (null_row (1:5)) ;
A (null_row, :) = 0 ;
p = ccolamd (A) ;
check_perm (p, A) ;
end
fprintf (' OK\n') ;
fprintf ('\nccolamd and csymamd: all tests passed\n\n') ;
close (h) ;
%-------------------------------------------------------------------------------
function p = Acolamd (S, knobs)
% Acolamd: compare ccolamd and Tcolamd results
global ccolamd_default_knobs
if (nargin < 2)
p = ccolamd (S) ;
p1 = Tcolamd (S, [ccolamd_default_knobs 0 0], [ ]) ;
else
p = ccolamd (S, knobs) ;
p1 = Tcolamd (S, knobs, [ ]) ;
end
check_perm (p, S) ;
check_perm (p1, S) ;
if (any (p1 ~= p))
narg = nargin ;
if (nargin == 2)
save bad S narg knobs
else
save bad S narg
end
error ('Acolamd mismatch!') ;
end
%-------------------------------------------------------------------------------
function p = Asymamd (S, knobs)
% Asymamd: compare csymamd and Tsymamd results
global csymamd_default_knobs
if (nargin < 2)
p = csymamd (S) ;
p1 = Tsymamd (S, [csymamd_default_knobs 0], [ ]) ;
else
p = csymamd (S, knobs) ;
p1 = Tsymamd (S, knobs, [ ]) ;
end
if (any (p1 ~= p))
error ('Asymamd mismatch!') ;
end
%-------------------------------------------------------------------------------
function check_perm (p, A, cmember)
% check_perm: check for a valid permutation vector
if (isempty (A) & isempty (p)) %#ok
% empty permutation vectors of empty matrices are OK
return
end
if (isempty (p))
error ('Bad permutation: cannot be empty') ;
end
[m n] = size (A) ;
[p_m p_n] = size (p) ;
if (p_n == 1)
% force p to be a row vector
p = p' ;
[p_m p_n] = size (p) ;
end
if (n ~= p_n)
error ('Bad permutation: wrong size') ;
end
if (p_m ~= 1) ;
% p must be a vector
error ('Bad permutation: not a vector') ;
else
if (any (sort (p) - (1:p_n)))
error ('Bad permutation') ;
end
end
if (nargin > 2)
% check cmember
c = cmember (p) ;
% c must be monotonically non-decreasing
c = diff (c) ;
if (any (c < 0))
error ('permutation breaks the cmember constraints') ;
end
end
%-------------------------------------------------------------------------------
function i = irand (n,s)
% irand: return a random vector of size s, with values between 1 and n
if (nargin == 1)
s = 1 ;
end
i = min (n, 1 + floor (rand (1,s) * n)) ;
%-------------------------------------------------------------------------------
function A = rand_matrix (n_max, m_max, mtype, d_rows, d_cols)
% rand_matrix: return a random sparse matrix
%
% A = rand_matrix (n_max, m_max, mtype, d_rows, d_cols)
%
% A binary matrix of random size, at most n_max-by-m_max, with d_rows dense rows
% and d_cols dense columns.
%
% mtype 1: square unsymmetric (m_max is ignored)
% mtype 2: rectangular
% mtype 3: symmetric (m_max is ignored)
n = irand (n_max) ;
if (mtype ~= 2)
% square
m = n ;
else
m = irand (m_max) ;
end
A = sprand (m, n, 10 / max (m,n)) ;
if (d_rows > 0)
% add dense rows
for k = 1:d_rows
i = irand (m) ;
nz = irand (n) ;
p = randperm (n) ;
p = p (1:nz) ;
A (i,p) = 1 ;
end
end
if (d_cols > 0)
% add dense cols
for k = 1:d_cols
j = irand (n) ;
nz = irand (m) ;
p = randperm (m) ;
p = p (1:nz) ;
A (p,j) = 1 ;
end
end
A = spones (A) ;
% ensure that there are no empty columns
d = find (full (sum (A,1)) == 0) ; %#ok
A (m,d) = 1 ; %#ok
% ensure that there are no empty rows
d = find (full (sum (A,2)) == 0) ; %#ok
A (d,n) = 1 ; %#ok
if (mtype == 3)
% symmetric
A = A + A' + speye (n) ;
end
A = spones (A) ;
%-------------------------------------------------------------------------------
% Tcolamd: run ccolamd in a testing mode
%-------------------------------------------------------------------------------
function p = Tcolamd (S, knobs, cmember)
% knobs (5) = 1 ;
p = ccolamdtestmex (S, knobs, cmember) ;
if (p (1) ~= -1)
check_perm (p, S) ;
end
%-------------------------------------------------------------------------------
% Tsymamd: run csymamd in a testing mode
%-------------------------------------------------------------------------------
function p = Tsymamd (S, knobs, cmember)
% knobs (2) = 1 ;
p = csymamdtestmex (S, knobs, cmember) ;
if (p (1) ~= -1)
check_perm (p, S) ;
end
|
github
|
lcnbeapp/beapp-master
|
klu_make.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/KLU/MATLAB/klu_make.m
| 10,922 |
utf_8
|
e85d62511e714298f5a2601c105ef2fe
|
function klu_make (with_cholmod)
%KLU_MAKE compiles the KLU mexFunctions
%
% Example:
% klu_make % compiles KLU without CHOLMOD
% klu_make (1) % with CHOLMOD, CAMD, CCOLAMD, and METIS
%
% KLU relies on AMD, COLAMD, and BTF for its ordering options, and can
% optionally use CHOLMOD, CCOLAMD, CAMD, and METIS as well. By default,
% CHOLMOD, CCOLAMD, CAMD, and METIS are not used.
%
% See http://www-users.cs.umn.edu/~karypis/metis for a copy of METIS 4.0.1.
%
% You must type the klu_make command while in the KLU/MATLAB directory.
%
% See also klu
% Copyright 2004-2009 Timothy A. Davis, Univ. of Florida
% http://www.cise.ufl.edu/research/sparse
if (nargin < 1)
with_cholmod = 0 ;
end
details = 0 ; % if 1, print details of each command
% modify this if your copy of METIS is not in SuiteSparse/metis-4.0:
metis_path = '../../metis-4.0' ;
d = '' ;
if (~isempty (strfind (computer, '64')))
% 64-bit MATLAB
d = '-largeArrayDims' ;
end
fprintf ('Compiling KLU ') ;
kk = 0 ;
include = '-I. -I../../AMD/Include -I../../COLAMD/Include -I../Include -I../../UFconfig -I../../BTF/Include' ;
if (with_cholmod)
include = [include ' -I../../CCOLAMD/Include -I../../CAMD/Include -I../../CHOLMOD/Include -I../../UFconfig -I' metis_path '/Lib -I../User'] ;
end
% do not attempt to compile CHOLMOD with large file support (not needed)
include = [include ' -DNLARGEFILE'] ;
% fix the METIS 4.0.1 rename.h file
if (with_cholmod)
fprintf ('with CHOLMOD, CAMD, CCOLAMD, and METIS\n') ;
f = fopen ('rename.h', 'w') ;
if (f == -1)
error ('unable to create rename.h in current directory') ;
end
fprintf (f, '/* do not edit this file; generated by klu_make */\n') ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#include "%s/Lib/rename.h"\n', metis_path) ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#define log2 METIS__log2\n') ;
fprintf (f, '#include "mex.h"\n') ;
fprintf (f, '#define malloc mxMalloc\n') ;
fprintf (f, '#define free mxFree\n') ;
fprintf (f, '#define calloc mxCalloc\n') ;
fprintf (f, '#define realloc mxRealloc\n') ;
fclose (f) ;
include = ['-DNSUPERNODAL -DNMODIFY -DNMATRIXOPS -DNCHECK ' include] ;
else
fprintf ('without CHOLMOD, CAMD, CCOLAMD, and METIS\n') ;
include = ['-DNCHOLMOD ' include] ;
end
include = strrep (include, '/', filesep) ;
amd_src = { ...
'../../AMD/Source/amd_1', ...
'../../AMD/Source/amd_2', ...
'../../AMD/Source/amd_aat', ...
'../../AMD/Source/amd_control', ...
'../../AMD/Source/amd_defaults', ...
'../../AMD/Source/amd_dump', ...
'../../AMD/Source/amd_global', ...
'../../AMD/Source/amd_info', ...
'../../AMD/Source/amd_order', ...
'../../AMD/Source/amd_postorder', ...
'../../AMD/Source/amd_post_tree', ...
'../../AMD/Source/amd_preprocess', ...
'../../AMD/Source/amd_valid' } ;
camd_src = { ...
'../../CAMD/Source/camd_1', ...
'../../CAMD/Source/camd_2', ...
'../../CAMD/Source/camd_aat', ...
'../../CAMD/Source/camd_control', ...
'../../CAMD/Source/camd_defaults', ...
'../../CAMD/Source/camd_dump', ...
'../../CAMD/Source/camd_global', ...
'../../CAMD/Source/camd_info', ...
'../../CAMD/Source/camd_order', ...
'../../CAMD/Source/camd_postorder', ...
'../../CAMD/Source/camd_preprocess', ...
'../../CAMD/Source/camd_valid' } ;
colamd_src = {
'../../COLAMD/Source/colamd', ...
'../../COLAMD/Source/colamd_global' } ;
ccolamd_src = {
'../../CCOLAMD/Source/ccolamd', ...
'../../CCOLAMD/Source/ccolamd_global' } ;
metis_src = {
'Lib/balance', ...
'Lib/bucketsort', ...
'Lib/ccgraph', ...
'Lib/coarsen', ...
'Lib/compress', ...
'Lib/debug', ...
'Lib/estmem', ...
'Lib/fm', ...
'Lib/fortran', ...
'Lib/frename', ...
'Lib/graph', ...
'Lib/initpart', ...
'Lib/kmetis', ...
'Lib/kvmetis', ...
'Lib/kwayfm', ...
'Lib/kwayrefine', ...
'Lib/kwayvolfm', ...
'Lib/kwayvolrefine', ...
'Lib/match', ...
'Lib/mbalance2', ...
'Lib/mbalance', ...
'Lib/mcoarsen', ...
'Lib/memory', ...
'Lib/mesh', ...
'Lib/meshpart', ...
'Lib/mfm2', ...
'Lib/mfm', ...
'Lib/mincover', ...
'Lib/minitpart2', ...
'Lib/minitpart', ...
'Lib/mkmetis', ...
'Lib/mkwayfmh', ...
'Lib/mkwayrefine', ...
'Lib/mmatch', ...
'Lib/mmd', ...
'Lib/mpmetis', ...
'Lib/mrefine2', ...
'Lib/mrefine', ...
'Lib/mutil', ...
'Lib/myqsort', ...
'Lib/ometis', ...
'Lib/parmetis', ...
'Lib/pmetis', ...
'Lib/pqueue', ...
'Lib/refine', ...
'Lib/separator', ...
'Lib/sfm', ...
'Lib/srefine', ...
'Lib/stat', ...
'Lib/subdomains', ...
'Lib/timing', ...
'Lib/util' } ;
for i = 1:length (metis_src)
metis_src {i} = [metis_path '/' metis_src{i}] ;
end
cholmod_src = {
'../../CHOLMOD/Core/cholmod_aat', ...
'../../CHOLMOD/Core/cholmod_add', ...
'../../CHOLMOD/Core/cholmod_band', ...
'../../CHOLMOD/Core/cholmod_change_factor', ...
'../../CHOLMOD/Core/cholmod_common', ...
'../../CHOLMOD/Core/cholmod_complex', ...
'../../CHOLMOD/Core/cholmod_copy', ...
'../../CHOLMOD/Core/cholmod_dense', ...
'../../CHOLMOD/Core/cholmod_error', ...
'../../CHOLMOD/Core/cholmod_factor', ...
'../../CHOLMOD/Core/cholmod_memory', ...
'../../CHOLMOD/Core/cholmod_sparse', ...
'../../CHOLMOD/Core/cholmod_transpose', ...
'../../CHOLMOD/Core/cholmod_triplet', ...
'../../CHOLMOD/Cholesky/cholmod_amd', ...
'../../CHOLMOD/Cholesky/cholmod_analyze', ...
'../../CHOLMOD/Cholesky/cholmod_colamd', ...
'../../CHOLMOD/Cholesky/cholmod_etree', ...
'../../CHOLMOD/Cholesky/cholmod_postorder', ...
'../../CHOLMOD/Cholesky/cholmod_rowcolcounts', ...
'../../CHOLMOD/Partition/cholmod_ccolamd', ...
'../../CHOLMOD/Partition/cholmod_csymamd', ...
'../../CHOLMOD/Partition/cholmod_camd', ...
'../../CHOLMOD/Partition/cholmod_metis', ...
'../../CHOLMOD/Partition/cholmod_nesdis' } ;
btf_src = {
'../../BTF/Source/btf_maxtrans', ...
'../../BTF/Source/btf_order', ...
'../../BTF/Source/btf_strongcomp' } ;
klu_src = {
'../Source/klu_free_symbolic', ...
'../Source/klu_defaults', ...
'../Source/klu_analyze_given', ...
'../Source/klu_analyze', ...
'../Source/klu_memory' } ;
if (with_cholmod)
klu_src = [klu_src { '../User/klu_l_cholmod' }] ; %#ok
end
klu_zlsrc = {
'../Source/klu', ...
'../Source/klu_kernel', ...
'../Source/klu_dump', ...
'../Source/klu_factor', ...
'../Source/klu_free_numeric', ...
'../Source/klu_solve', ...
'../Source/klu_scale', ...
'../Source/klu_refactor', ...
'../Source/klu_tsolve', ...
'../Source/klu_diagnostics', ...
'../Source/klu_sort', ...
'../Source/klu_extract', ...
} ;
klu_lobj = {
'klu_l', ...
'klu_l_kernel', ...
'klu_l_dump', ...
'klu_l_factor', ...
'klu_l_free_numeric', ...
'klu_l_solve', ...
'klu_l_scale', ...
'klu_l_refactor', ...
'klu_l_tsolve', ...
'klu_l_diagnostics', ...
'klu_l_sort', ...
'klu_l_extract', ...
} ;
klu_zlobj = {
'klu_zl', ...
'klu_zl_kernel', ...
'klu_zl_dump', ...
'klu_zl_factor', ...
'klu_zl_free_numeric', ...
'klu_zl_solve', ...
'klu_zl_scale', ...
'klu_zl_refactor', ...
'klu_zl_tsolve', ...
'klu_zl_diagnostics', ...
'klu_zl_sort', ...
'klu_zl_extract', ...
} ;
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
if (pc)
% Windows does not have drand48 and srand48, required by METIS. Use
% drand48 and srand48 in CHOLMOD/MATLAB/Windows/rand48.c instead.
obj_extension = '.obj' ;
cholmod_src = [cholmod_src {'../../CHOLMOD/MATLAB/Windows/rand48'}] ;
include = [include ' -I../../CHOLMOD/MATLAB/Windows'] ;
else
obj_extension = '.o' ;
end
% compile each library source file
obj = ' ' ;
source = [amd_src btf_src klu_src colamd_src] ;
if (with_cholmod)
source = [metis_src ccolamd_src camd_src cholmod_src source] ;
end
for f = source
fs = strrep (f {1}, '/', filesep) ;
slash = strfind (fs, filesep) ;
if (isempty (slash))
slash = 1 ;
else
slash = slash (end) + 1 ;
end
o = fs (slash:end) ;
obj = [obj ' ' o obj_extension] ; %#ok
s = sprintf ('mex %s -DDLONG -O %s -c %s.c', d, include, fs) ;
kk = do_cmd (s, kk, details) ;
end
for k = 1:length(klu_zlsrc)
ff = strrep (klu_zlsrc {k}, '/', filesep) ;
slash = strfind (ff, filesep) ;
if (isempty (slash))
slash = 1 ;
else
slash = slash (end) + 1 ;
end
o = ff (slash:end) ;
s = sprintf ('mex %s -DDLONG -O %s -c %s.c', d, include, ff) ;
kk = do_cmd (s, kk, details) ;
lobj = klu_lobj {k} ;
obj = [obj ' ' lobj obj_extension] ; %#ok
mvfile ([o obj_extension], [lobj obj_extension]) ;
s = sprintf ('mex %s -DDLONG -DCOMPLEX -O %s -c %s.c', d, include, ff) ;
kk = do_cmd (s, kk, details) ;
zlobj = klu_zlobj {k} ;
obj = [obj ' ' zlobj obj_extension] ; %#ok
mvfile ([o obj_extension], [zlobj obj_extension]) ;
end
% compile the KLU mexFunction
s = sprintf ('mex %s -DDLONG -O %s -output klu klu_mex.c', d, include) ;
s = [s obj] ; %#ok
kk = do_cmd (s, kk, details) ;
% clean up
s = ['delete ' obj] ;
do_cmd (s, kk, details) ;
fprintf ('\nKLU successfully compiled\n') ;
%-------------------------------------------------------------------------------
function rmfile (file)
% rmfile: delete a file, but only if it exists
if (length (dir (file)) > 0) %#ok
delete (file) ;
end
%-------------------------------------------------------------------------------
function cpfile (src, dst)
% cpfile: copy the src file to the filename dst, overwriting dst if it exists
rmfile (dst)
if (length (dir (src)) == 0) %#ok
fprintf ('File does not exist: %s\n', src) ;
error ('File does not exist') ;
end
copyfile (src, dst) ;
%-------------------------------------------------------------------------------
function mvfile (src, dst)
% mvfile: move the src file to the filename dst, overwriting dst if it exists
cpfile (src, dst) ;
rmfile (src) ;
%-------------------------------------------------------------------------------
function kk = do_cmd (s, kk, details)
%DO_CMD: evaluate a command, and either print it or print a "."
if (details)
fprintf ('%s\n', s) ;
else
if (mod (kk, 60) == 0)
fprintf ('\n') ;
end
kk = kk + 1 ;
fprintf ('.') ;
end
eval (s) ;
|
github
|
lcnbeapp/beapp-master
|
cs_install.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CSparse/MATLAB/cs_install.m
| 1,729 |
utf_8
|
698e3a7be8710df4bca038a373f4850f
|
function cs_install (do_pause)
%CS_INSTALL: compile and install CSparse for use in MATLAB.
% Your current working directory must be CSparse/MATLAB in order to use this
% function.
%
% The directories
%
% CSparse/MATLAB/CSparse
% CSparse/MATLAB/Demo
% CSparse/MATLAB/UFget
%
% are added to your MATLAB path (see the "pathtool" command to add these to
% your path permanently, for future MATLAB sessions).
%
% Next, the MATLAB CSparse demo program, CSparse/MATLAB/cs_demo is executed.
% To run the demo with pauses so you can see the results, use cs_install(1).
% To run the full MATLAB test programs for CSparse, run testall in the
% Test directory.
%
% Example:
% cs_install % install and run demo with no pauses
% cs_install(1) % install and run demo with pauses
%
% See also: cs_demo
%
% Copyright 2006-2007, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
fprintf ('Compiling and installing CSparse\n') ;
if (nargin < 1)
do_pause = 0 ;
end
if (~isempty (strfind (computer, '64')))
error ('64-bit version not supported; use CXSparse instead') ;
end
if (do_pause)
input ('Hit enter to continue: ') ;
end
addpath ([pwd filesep 'CSparse']) ;
addpath ([pwd filesep 'Demo']) ;
v = getversion ;
if (v >= 7.0)
addpath ([pwd filesep 'UFget']) ;
else
fprintf ('UFget not installed (MATLAB 7.0 or later required)\n') ;
end
cd ('CSparse') ;
cs_make (1) ;
cd ('../Demo') ;
cs_demo (do_pause)
%-------------------------------------------------------------------------------
function v = getversion
% determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
cs_make.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CSparse/MATLAB/CSparse/cs_make.m
| 6,443 |
utf_8
|
db813bf30398d17dfe66ed1f938ee591
|
function [objfiles, timestamp_out] = cs_make (f)
%CS_MAKE compiles CSparse for use in MATLAB.
% Usage:
% cs_make
% [objfiles, timestamp] = cs_make (f)
%
% With no input arguments, or with f=0, only those files needing to be
% compiled are compiled (like the Unix/Linux/GNU "make" command, but not
% requiring "make"). If f is a nonzero number, all files are compiled.
% If f is a string, only that mexFunction is compiled. For example,
% cs_make ('cs_add') just compiles the cs_add mexFunction. This option is
% useful when developing a single new mexFunction. This function can only be
% used if the current directory is CSparse/MATLAB/CSparse. Returns a list of
% the object files in CSparse, and the latest modification time of any source
% codes.
%
% To add a new function and its MATLAB mexFunction to CSparse:
%
% (1) Create a source code file CSparse/Source/cs_mynewfunc.c.
% (2) Create a help file, CSparse/MATLAB/CSparse/cs_mynewfunc.m.
% This is very useful, but not strictly required.
% (3) Add the prototype of cs_mynewfunc to CSparse/Include/cs.h.
% (4) Create its MATLAB mexFunction, CSparse/MATLAB/cs_mynewfunc_mex.c.
% (5) Edit cs_make.m, and add 'cs_mynewfunc' to the 'cs' and 'csm' lists.
% (6) Type 'cs_make' in the CSparse/MATLAB/CSparse directory.
% If all goes well, your new function is ready for use in MATLAB.
%
% (7) Optionally add 'cs_mynewfunc' to CSparse/Source/Makefile
% and CSparse/MATLAB/CSparse/Makefile, if you want to use the
% Unix/Linux/GNU make command instead of cs_make.m. See where
% 'cs_add' and 'cs_add_mex' appear in those files, and add
% 'cs_mynewfunc' accordingly.
% (8) Optionally add 'cs_mynewfunc' to Tcov/Makefile, and add additional
% test code to cs_test.c, and add MATLAB test code to MATLAB/Test/*.
%
% Example:
% cs_make % compile everything
% cs_make ('cs_chol') ; % just compile cs_chol mexFunction
%
% See also MEX.
% Copyright 2006-2007, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
fprintf ('Compiling CSparse\n') ;
% CSparse source files, in ../../Source, such as ../../Source/cs_add.c.
% Note that not all CSparse source files have their own mexFunction.
cs = { 'cs_add', 'cs_amd', 'cs_chol', 'cs_cholsol', 'cs_counts', ...
'cs_cumsum', 'cs_dfs', 'cs_dmperm', 'cs_droptol', 'cs_dropzeros', ...
'cs_dupl', 'cs_entry', 'cs_etree', 'cs_fkeep', 'cs_gaxpy', 'cs_happly', ...
'cs_house', 'cs_ipvec', 'cs_load', 'cs_lsolve', 'cs_ltsolve', 'cs_lu', ...
'cs_lusol', 'cs_malloc', 'cs_maxtrans', 'cs_multiply', 'cs_norm', ...
'cs_permute', 'cs_pinv', 'cs_post', 'cs_print', 'cs_pvec', 'cs_qr', ...
'cs_qrsol', 'cs_scatter', 'cs_scc', 'cs_schol', 'cs_sqr', 'cs_symperm', ...
'cs_tdfs', 'cs_transpose', 'cs_compress', 'cs_updown', 'cs_usolve', ...
'cs_utsolve', 'cs_util', 'cs_reach', 'cs_spsolve', 'cs_ereach', ...
'cs_leaf', 'cs_randperm' } ;
% add cs_mynewfunc to the above list
details = 1 ;
kk = 0 ;
csm = { } ;
if (nargin == 0)
force = 0 ;
elseif (ischar (f))
fprintf ('cs_make: compiling ../../Source files and %s_mex.c\n', f) ;
force = 0 ;
csm = {f} ;
else
force = f ;
details = details | (force > 1) ; %#ok
if (force & details) %#ok
fprintf ('cs_make: re-compiling everything\n') ;
end
end
if (force)
fprintf ('Compiling CSparse\n') ;
end
if (isempty (csm))
% mexFunctions, of the form cs_add_mex.c, etc, in this directory
csm = { 'cs_add', 'cs_amd', 'cs_chol', 'cs_cholsol', 'cs_counts', ...
'cs_dmperm', 'cs_droptol', 'cs_etree', 'cs_gaxpy', 'cs_lsolve', ...
'cs_ltsolve', 'cs_lu', 'cs_lusol', 'cs_multiply', 'cs_permute', ...
'cs_print', 'cs_qr', 'cs_qrsol', 'cs_scc', 'cs_symperm', 'cs_thumb', ...
'cs_transpose', 'cs_sparse', 'cs_updown', 'cs_usolve', ...
'cs_utsolve', 'cs_randperm', 'cs_sqr' } ;
% add cs_mynewfunc to the above list
end
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
if (pc)
obj = '.obj' ;
else
obj = '.o' ;
end
srcdir = '../../Source/' ;
hfile = '../../Include/cs.h' ;
% compile each CSparse source file
[anysrc timestamp kk] = compile_source ('', 'cs_mex', obj, hfile, force, ...
kk, details) ;
CS = ['cs_mex' obj] ;
if (nargout > 0)
objfiles = ['..' filesep 'CSparse' filesep 'cs_mex' obj] ;
end
for i = 1:length (cs)
[s t kk] = compile_source (srcdir, cs {i}, obj, hfile, force, kk, details) ;
timestamp = max (timestamp, t) ;
anysrc = anysrc | s ; %#ok
CS = [CS ' ' cs{i} obj] ; %#ok
if (nargout > 0)
objfiles = [objfiles ' ..' filesep 'CSparse' filesep cs{i} obj] ; %#ok
end
end
% compile each CSparse mexFunction
obj = ['.' mexext] ;
for i = 1:length (csm)
[s t] = cs_must_compile ('', csm{i}, '_mex', obj, hfile, force) ;
timestamp = max (timestamp, t) ;
if (anysrc | s) %#ok
cmd = sprintf ('mex -O -I../../Include %s_mex.c %s -output %s', ...
csm{i}, CS, csm{i}) ;
kk = do_cmd (cmd, kk, details) ;
end
end
fprintf ('\n') ;
if (nargout > 1)
timestamp_out = timestamp ;
end
if (force)
fprintf ('CSparse successfully compiled.\n') ;
end
%-------------------------------------------------------------------------------
function [s,t,kk] = compile_source (srcdir, f, obj, hfile, force, kk, details)
% compile a source code file in ../../Source, leaving object file in
% this directory.
[s t] = cs_must_compile (srcdir, f, '', obj, hfile, force) ;
if (s)
cmd = sprintf ('mex -O -c -I../../Include %s%s.c', srcdir, f) ;
kk = do_cmd (cmd, kk, details) ;
end
%-------------------------------------------------------------------------------
function kk = do_cmd (s, kk, details)
%DO_CMD: evaluate a command, and either print it or print a "."
s = strrep (s, '/', filesep) ;
if (details)
fprintf ('%s\n', s) ;
else
if (mod (kk, 60) == 0)
fprintf ('\n') ;
end
kk = kk + 1 ;
fprintf ('.') ;
end
eval (s) ;
|
github
|
lcnbeapp/beapp-master
|
mynormest1.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CSparse/MATLAB/Test/mynormest1.m
| 1,843 |
utf_8
|
0f6e7b3dd575c4dbfa809e784838035e
|
function est = mynormest1 (L, U, P, Q)
%MYNORMEST1 estimate norm(A,1), using LU factorization (L*U = P*A*Q).
%
% Example:
% est = mynormest1 (L, U, P, Q)
% See also: testall
% Copyright 2006-2007, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
n = size (L,1) ;
est = 0 ;
S = zeros (n,1) ;
for k = 1:5
if k == 1
x = ones (n,1) / n ;
else
j = find (abs (x) == max (abs (x))) ;
j = j (1) ;
x = zeros (n,1) ;
x (j) = 1 ;
% fprintf ('eka: k %d j %d est %g\n', k, j, est) ;
end
% x=A\x, but use the existing P*A*Q=L*U factorization
x = Q * (U \ (L \ (P*x))) ;
est_old = est ;
est = sum (abs (x)) ;
unchanged = 1 ;
for i = 1:n
if (x (i) >= 0)
s = 1 ;
else
s = -1 ;
end
if (s ~= S (i))
S (i) = s ;
unchanged = 0 ;
end
end
if (any (S ~= signum (x)))
S' %#ok
signum(x)' %#ok
error ('Hey!') ;
end
if k > 1 & (est <= est_old | unchanged) %#ok
break ;
end
x = S ;
% x=A'\x, but use the existing P*A*Q=L*U factorization
x = P' * (L' \ (U' \ (Q'*x))) ;
if k > 1
jnew = find (abs (x) == max (abs (x))) ;
if (jnew == j)
break ;
end
end
end
for k = 1:n
x (k) = power (-1, k+1) * (1 + ((k-1)/(n-1))) ;
end
% x=A\x, but use the existing P*A*Q=L*U factorization
x = Q * (U \ (L \ (P*x))) ;
est_new = 2 * sum (abs (x)) / (3 * n) ;
if (est_new > est)
est = est_new ;
end
function s = signum (x)
%SIGNUM compute sign of x
s = ones (length (x),1) ;
s (find (x < 0)) = -1 ; %#ok
|
github
|
lcnbeapp/beapp-master
|
testall.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CSparse/MATLAB/Test/testall.m
| 1,517 |
utf_8
|
221311b233cabd5b041462eeed288254
|
function testall
%TESTALL test all CSparse functions (run tests 1 to 28 below)
%
% Example:
% testall
% See also: cs_demo
% Copyright 2006-2007, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
h = waitbar (0, 'CSparse') ;
cs_test_make % compile all CSparse, Demo, Text, and Test mexFunctions
ntests = 28 ;
testwait (1, ntests, h) ; test1 ;
testwait (2, ntests, h) ; test2 ;
testwait (3, ntests, h) ; test3 ;
testwait (4, ntests, h) ; test4 ;
testwait (5, ntests, h) ; test5 ;
testwait (6, ntests, h) ; test6 ;
testwait (7, ntests, h) ; test7 ;
testwait (8, ntests, h) ; test8 ;
testwait (9, ntests, h) ; test9 ;
testwait (10, ntests, h) ; test10 ;
testwait (11, ntests, h) ; test11 ;
testwait (12, ntests, h) ; test12 ;
testwait (13, ntests, h) ; test13 ;
testwait (14, ntests, h) ; test14 ;
testwait (15, ntests, h) ; test15 ;
testwait (16, ntests, h) ; test16 ;
testwait (17, ntests, h) ; test17 ;
testwait (18, ntests, h) ; test18 ;
testwait (19, ntests, h) ; test19 ;
testwait (20, ntests, h) ; test20 ;
testwait (21, ntests, h) ; test21 ;
testwait (22, ntests, h) ; test22 ;
testwait (23, ntests, h) ; test23 ;
testwait (24, ntests, h) ; test24 ;
testwait (25, ntests, h) ; test25 ;
testwait (26, ntests, h) ; test26 ;
testwait (27, ntests, h) ; test27 ;
testwait (28, ntests, h) ; test28 ;
close (h)
function testwait (n,ntests,h)
fprintf ('\n------------------------ test%d\n', n) ;
waitbar (n/(ntests+1), h, sprintf ('CSparse test %d of %d\n', n, ntests)) ;
|
github
|
lcnbeapp/beapp-master
|
umfpack_report.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/UMFPACK/MATLAB/umfpack_report.m
| 16,015 |
utf_8
|
ab2ab9204411376267d5931f57b6b59b
|
function umfpack_report (Control, Info)
%UMFPACK_REPORT prints optional control settings and statistics
%
% Example:
% umfpack_report (Control, Info) ;
%
% Prints the current Control settings for umfpack2, and the statistical
% information returned by umfpack2 in the Info array. If Control is
% an empty matrix, then the default control settings are printed.
%
% Control is 20-by-1, and Info is 90-by-1. Not all entries are used.
%
% Alternative usages:
%
% umfpack_report ([ ], Info) ; print the default control parameters
% and the Info array.
% umfpack_report (Control) ; print the control parameters only.
% umfpack_report ; print the default control parameters
% and an empty Info array.
%
% See also umfpack, umfpack2, umfpack_make, umfpack_details,
% umfpack_demo, and umfpack_simple.
% Copyright 1995-2007 by Timothy A. Davis.
%-------------------------------------------------------------------------------
% get inputs, use defaults if input arguments not present
%-------------------------------------------------------------------------------
% The contents of Control and Info are defined in umfpack.h
if (nargin < 1)
Control = [] ;
end
if (nargin < 2)
Info = [] ;
end
if (isempty (Control))
Control = umfpack2 ;
end
if (isempty (Info))
Info = [ 0 (-ones (1, 89)) ] ;
end
%-------------------------------------------------------------------------------
% control settings
%-------------------------------------------------------------------------------
fprintf ('\nUMFPACK: Control settings:\n\n') ;
fprintf (' Control (1): print level: %d\n', Control (1)) ;
fprintf (' Control (2): dense row parameter: %g\n', Control (2)) ;
fprintf (' "dense" rows have > max (16, (%g)*16*sqrt(n_col)) entries\n', Control (2)) ;
fprintf (' Control (3): dense column parameter: %g\n', Control (3)) ;
fprintf (' "dense" columns have > max (16, (%g)*16*sqrt(n_row)) entries\n', Control (3)) ;
fprintf (' Control (4): pivot tolerance: %g\n', Control (4)) ;
fprintf (' Control (5): max block size for dense matrix kernels: %d\n', Control (5)) ;
prstrat (' Control (6): strategy: %g ', Control (6)) ;
fprintf (' Control (7): initial allocation ratio: %g\n', Control (7)) ;
fprintf (' Control (8): max iterative refinement steps: %d\n', Control (8)) ;
fprintf (' Control (13): 2-by-2 pivot tolerance: %g\n', Control (13)) ;
fprintf (' Control (14): Q fixed during numeric factorization: %g ', Control (14)) ;
if (Control (14) > 0)
fprintf ('(yes)\n') ;
elseif (Control (14) < 0)
fprintf ('(no)\n') ;
else
fprintf ('(auto)\n') ;
end
fprintf (' Control (15): AMD dense row/column parameter: %g\n', Control (15)) ;
fprintf (' "dense" rows/columns in A+A'' have > max (16, (%g)*sqrt(n)) entries.\n', Control (15)) ;
fprintf (' Only used if the AMD ordering is used.\n') ;
fprintf (' Control (16): diagonal pivot tolerance: %g\n', Control (16)) ;
fprintf (' Only used if diagonal pivoting is attempted.\n') ;
fprintf (' Control (17): scaling option: %g ', Control (17)) ;
if (Control (17) == 0)
fprintf ('(none)\n') ;
elseif (Control (17) == 2)
fprintf ('(scale the matrix by\n') ;
fprintf (' dividing each row by max. abs. value in each row)\n') ;
else
fprintf ('(scale the matrix by\n') ;
fprintf (' dividing each row by sum of abs. values in each row)\n') ;
end
fprintf (' Control (18): frontal matrix allocation ratio: %g\n', Control (18)) ;
fprintf (' Control (19): drop tolerance: %g\n', Control (19)) ;
fprintf (' Control (20): AMD and COLAMD aggressive absorption: %g ', Control (20)) ;
yes_no (Control (20)) ;
% compile-time options:
fprintf ('\n The following options can only be changed at compile-time:\n') ;
if (Control (9) == 1)
fprintf (' Control (9): compiled to use the BLAS\n') ;
else
fprintf (' Control (9): compiled without the BLAS\n') ;
fprintf (' (you will not get the best possible performance)\n') ;
end
if (Control (10) == 1)
fprintf (' Control (10): compiled for MATLAB\n') ;
elseif (Control (10) == 2)
fprintf (' Control (10): compiled for MATLAB\n') ;
else
fprintf (' Control (10): not compiled for MATLAB\n') ;
fprintf (' Printing will be in terms of 0-based matrix indexing,\n') ;
fprintf (' not 1-based as is expected in MATLAB. Diary output may\n') ;
fprintf (' not be properly recorded.\n') ;
end
if (Control (11) == 2)
fprintf (' Control (11): uses POSIX times ( ) to get CPU time and wallclock time.\n') ;
elseif (Control (11) == 1)
fprintf (' Control (11): uses getrusage to get CPU time.\n') ;
else
fprintf (' Control (11): uses ANSI C clock to get CPU time.\n') ;
fprintf (' The CPU time may wrap around, type "help cputime".\n') ;
end
if (Control (12) == 1)
fprintf (' Control (12): compiled with debugging enabled\n') ;
fprintf (' ###########################################\n') ;
fprintf (' ### This will be exceedingly slow! ########\n') ;
fprintf (' ###########################################\n') ;
else
fprintf (' Control (12): compiled for normal operation (no debugging)\n') ;
end
%-------------------------------------------------------------------------------
% Info:
%-------------------------------------------------------------------------------
if (nargin == 1)
return
end
status = Info (1) ;
fprintf ('\nUMFPACK status: Info (1): %d, ', status) ;
if (status == 0)
fprintf ('OK\n') ;
elseif (status == 1)
fprintf ('WARNING matrix is singular\n') ;
elseif (status == -1)
fprintf ('ERROR out of memory\n') ;
elseif (status == -3)
fprintf ('ERROR numeric LU factorization is invalid\n') ;
elseif (status == -4)
fprintf ('ERROR symbolic LU factorization is invalid\n') ;
elseif (status == -5)
fprintf ('ERROR required argument is missing\n') ;
elseif (status == -6)
fprintf ('ERROR n <= 0\n') ;
elseif (status <= -7 & status >= -12 | status == -14) %#ok
fprintf ('ERROR matrix A is corrupted\n') ;
elseif (status == -13)
fprintf ('ERROR invalid system\n') ;
elseif (status == -15)
fprintf ('ERROR invalid permutation\n') ;
elseif (status == -911)
fprintf ('ERROR internal error!\n') ;
fprintf ('Please report this error to Tim Davis ([email protected])\n') ;
else
fprintf ('ERROR unrecognized error. Info array corrupted\n') ;
end
fprintf (' (a -1 means the entry has not been computed):\n') ;
fprintf ('\n Basic statistics:\n') ;
fprintf (' Info (2): %d, # of rows of A\n', Info (2)) ;
fprintf (' Info (17): %d, # of columns of A\n', Info (17)) ;
fprintf (' Info (3): %d, nnz (A)\n', Info (3)) ;
fprintf (' Info (4): %d, Unit size, in bytes, for memory usage reported below\n', Info (4)) ;
fprintf (' Info (5): %d, size of int (in bytes)\n', Info (5)) ;
fprintf (' Info (6): %d, size of UF_long (in bytes)\n', Info (6)) ;
fprintf (' Info (7): %d, size of pointer (in bytes)\n', Info (7)) ;
fprintf (' Info (8): %d, size of numerical entry (in bytes)\n', Info (8)) ;
fprintf ('\n Pivots with zero Markowitz cost removed to obtain submatrix S:\n') ;
fprintf (' Info (57): %d, # of pivots with one entry in pivot column\n', Info (57)) ;
fprintf (' Info (58): %d, # of pivots with one entry in pivot row\n', Info (58)) ;
fprintf (' Info (59): %d, # of rows/columns in submatrix S (if square)\n', Info (59)) ;
fprintf (' Info (60): ') ;
if (Info (60) > 0)
fprintf ('submatrix S square and diagonal preserved\n') ;
elseif (Info (60) == 0)
fprintf ('submatrix S not square or diagonal not preserved\n') ;
else
fprintf ('\n') ;
end
fprintf (' Info (9): %d, # of "dense" rows in S\n', Info (9)) ;
fprintf (' Info (10): %d, # of empty rows in S\n', Info (10)) ;
fprintf (' Info (11): %d, # of "dense" columns in S\n', Info (11)) ;
fprintf (' Info (12): %d, # of empty columns in S\n', Info (12)) ;
fprintf (' Info (34): %g, symmetry of pattern of S\n', Info (34)) ;
fprintf (' Info (35): %d, # of off-diagonal nonzeros in S+S''\n', Info (35)) ;
fprintf (' Info (36): %d, nnz (diag (S))\n', Info (36)) ;
fprintf ('\n 2-by-2 pivoting to place large entries on diagonal:\n') ;
fprintf (' Info (52): %d, # of small diagonal entries of S\n', Info (52)) ;
fprintf (' Info (53): %d, # of unmatched small diagonal entries\n', Info (53)) ;
fprintf (' Info (54): %g, symmetry of P2*S\n', Info (54)) ;
fprintf (' Info (55): %d, # of off-diagonal entries in (P2*S)+(P2*S)''\n', Info (55)) ;
fprintf (' Info (56): %d, nnz (diag (P2*S))\n', Info (56)) ;
fprintf ('\n AMD results, for strict diagonal pivoting:\n') ;
fprintf (' Info (37): %d, est. nz in L and U\n', Info (37)) ;
fprintf (' Info (38): %g, est. flop count\n', Info (38)) ;
fprintf (' Info (39): %g, # of "dense" rows in S+S''\n', Info (39)) ;
fprintf (' Info (40): %g, est. max. nz in any column of L\n', Info (40)) ;
fprintf ('\n Final strategy selection, based on the analysis above:\n') ;
prstrat (' Info (19): %d, strategy used ', Info (19)) ;
fprintf (' Info (20): %d, ordering used ', Info (20)) ;
if (Info (20) == 0)
fprintf ('(COLAMD on A)\n') ;
elseif (Info (20) == 1)
fprintf ('(AMD on A+A'')\n') ;
elseif (Info (20) == 2)
fprintf ('(provided by user)\n') ;
else
fprintf ('(undefined ordering option)\n') ;
end
fprintf (' Info (32): %d, Q fixed during numeric factorization: ', Info (32)) ;
yes_no (Info (32)) ;
fprintf (' Info (33): %d, prefer diagonal pivoting: ', Info (33)) ;
yes_no (Info (33)) ;
fprintf ('\n symbolic analysis time and memory usage:\n') ;
fprintf (' Info (13): %d, defragmentations during symbolic analysis\n', Info (13)) ;
fprintf (' Info (14): %d, memory used during symbolic analysis (Units)\n', Info (14)) ;
fprintf (' Info (15): %d, final size of symbolic factors (Units)\n', Info (15)) ;
fprintf (' Info (16): %.2f, symbolic analysis CPU time (seconds)\n', Info (16)) ;
fprintf (' Info (18): %.2f, symbolic analysis wall clock time (seconds)\n', Info (18)) ;
fprintf ('\n Estimates computed in the symbolic analysis:\n') ;
fprintf (' Info (21): %d, est. size of LU factors (Units)\n', Info (21)) ;
fprintf (' Info (22): %d, est. total peak memory usage (Units)\n', Info (22)) ;
fprintf (' Info (23): %d, est. factorization flop count\n', Info (23)) ;
fprintf (' Info (24): %d, est. nnz (L)\n', Info (24)) ;
fprintf (' Info (25): %d, est. nnz (U)\n', Info (25)) ;
fprintf (' Info (26): %d, est. initial size, variable-part of LU (Units)\n', Info (26)) ;
fprintf (' Info (27): %d, est. peak size, of variable-part of LU (Units)\n', Info (27)) ;
fprintf (' Info (28): %d, est. final size, of variable-part of LU (Units)\n', Info (28)) ;
fprintf (' Info (29): %d, est. max frontal matrix size (# of entries)\n', Info (29)) ;
fprintf (' Info (30): %d, est. max # of rows in frontal matrix\n', Info (30)) ;
fprintf (' Info (31): %d, est. max # of columns in frontal matrix\n', Info (31)) ;
fprintf ('\n Computed in the numeric factorization (estimates shown above):\n') ;
fprintf (' Info (41): %d, size of LU factors (Units)\n', Info (41)) ;
fprintf (' Info (42): %d, total peak memory usage (Units)\n', Info (42)) ;
fprintf (' Info (43): %d, factorization flop count\n', Info (43)) ;
fprintf (' Info (44): %d, nnz (L)\n', Info (44)) ;
fprintf (' Info (45): %d, nnz (U)\n', Info (45)) ;
fprintf (' Info (46): %d, initial size of variable-part of LU (Units)\n', Info (46)) ;
fprintf (' Info (47): %d, peak size of variable-part of LU (Units)\n', Info (47)) ;
fprintf (' Info (48): %d, final size of variable-part of LU (Units)\n', Info (48)) ;
fprintf (' Info (49): %d, max frontal matrix size (# of numerical entries)\n', Info (49)) ;
fprintf (' Info (50): %d, max # of rows in frontal matrix\n', Info (50)) ;
fprintf (' Info (51): %d, max # of columns in frontal matrix\n', Info (51)) ;
fprintf ('\n Computed in the numeric factorization (no estimates computed a priori):\n') ;
fprintf (' Info (61): %d, defragmentations during numeric factorization\n', Info (61)) ;
fprintf (' Info (62): %d, reallocations during numeric factorization\n', Info (62)) ;
fprintf (' Info (63): %d, costly reallocations during numeric factorization\n', Info (63)) ;
fprintf (' Info (64): %d, integer indices in compressed pattern of L and U\n', Info (64)) ;
fprintf (' Info (65): %d, numerical values stored in L and U\n', Info (65)) ;
fprintf (' Info (66): %.2f, numeric factorization CPU time (seconds)\n', Info (66)) ;
fprintf (' Info (76): %.2f, numeric factorization wall clock time (seconds)\n', Info (76)) ;
if (Info (66) > 0.05 & Info (43) > 0) %#ok
fprintf (' mflops in numeric factorization phase: %.2f\n', 1e-6 * Info (43) / Info (66)) ;
end
fprintf (' Info (67): %d, nnz (diag (U))\n', Info (67)) ;
fprintf (' Info (68): %g, reciprocal condition number estimate\n', Info (68)) ;
fprintf (' Info (69): %g, matrix was ', Info (69)) ;
if (Info (69) == 0)
fprintf ('not scaled\n') ;
elseif (Info (69) == 2)
fprintf ('scaled (row max)\n') ;
else
fprintf ('scaled (row sum)\n') ;
end
fprintf (' Info (70): %g, min. scale factor of rows of A\n', Info (70)) ;
fprintf (' Info (71): %g, max. scale factor of rows of A\n', Info (71)) ;
fprintf (' Info (72): %g, min. abs. on diagonal of U\n', Info (72)) ;
fprintf (' Info (73): %g, max. abs. on diagonal of U\n', Info (73)) ;
fprintf (' Info (74): %g, initial allocation parameter used\n', Info (74)) ;
fprintf (' Info (75): %g, # of forced updates due to frontal growth\n', Info (75)) ;
fprintf (' Info (77): %d, # of off-diaogonal pivots\n', Info (77)) ;
fprintf (' Info (78): %d, nnz (L), if no small entries dropped\n', Info (78)) ;
fprintf (' Info (79): %d, nnz (U), if no small entries dropped\n', Info (79)) ;
fprintf (' Info (80): %d, # of small entries dropped\n', Info (80)) ;
fprintf ('\n Computed in the solve step:\n') ;
fprintf (' Info (81): %d, iterative refinement steps taken\n', Info (81)) ;
fprintf (' Info (82): %d, iterative refinement steps attempted\n', Info (82)) ;
fprintf (' Info (83): %g, omega(1), sparse-backward error estimate\n', Info (83)) ;
fprintf (' Info (84): %g, omega(2), sparse-backward error estimate\n', Info (84)) ;
fprintf (' Info (85): %d, solve flop count\n', Info (85)) ;
fprintf (' Info (86): %.2f, solve CPU time (seconds)\n', Info (86)) ;
fprintf (' Info (87): %.2f, solve wall clock time (seconds)\n', Info (87)) ;
fprintf ('\n Info (88:90): unused\n\n') ;
%-------------------------------------------------------------------------------
function prstrat (fmt, strategy)
% prstrat print the ordering strategy
fprintf (fmt, strategy) ;
if (strategy == 1)
fprintf ('(unsymmetric)\n') ;
fprintf (' Q = COLAMD (A), Q refined during numerical\n') ;
fprintf (' factorization, and no attempt at diagonal pivoting.\n') ;
elseif (strategy == 2)
fprintf ('(symmetric, with 2-by-2 pivoting)\n') ;
fprintf (' P2 = row permutation to place large values on the diagonal\n') ;
fprintf (' Q = AMD (P2*A+(P2*A)''), Q not refined during numeric factorization,\n') ;
fprintf (' and diagonal pivoting attempted.\n') ;
elseif (strategy == 3)
fprintf ('(symmetric)\n') ;
fprintf (' Q = AMD (A+A''), Q not refined during numeric factorization,\n') ;
fprintf (' and diagonal pivoting (P=Q'') attempted.\n') ;
else
% strategy = 0 ;
fprintf ('(auto)\n') ;
end
%-------------------------------------------------------------------------------
function yes_no (s)
% yes_no print yes or no
if (s == 0)
fprintf ('(no)\n') ;
else
fprintf ('(yes)\n') ;
end
|
github
|
lcnbeapp/beapp-master
|
umfpack_make.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/UMFPACK/MATLAB/umfpack_make.m
| 12,201 |
utf_8
|
c47a8208e044186c2dd6795d63bbe158
|
function umfpack_make (lapack)
%UMFPACK_MAKE to compile umfpack2 for use in MATLAB
%
% Compiles the umfpack2 mexFunction and then runs a simple demo.
%
% Example:
% umfpack_make % use default LAPACK and BLAS
% umfpack_make ('lcc_lib/libmwlapack.lib') % for Windows
% umfpack_make ('-lmwlapack -lmwblas') % for Linux, Unix, Mac
%
% the string gives the locations of the LAPACK and BLAS libraries.
%
% See also: umfpack, umfpack2, umfpack_details, umfpack_report, umfpack_demo,
% and umfpack_simple.
% Copyright 1995-2007 by Timothy A. Davis.
details = 0 ; % set to 1 to print out each mex command as it's executed
d = '' ;
% if (~isempty (strfind (computer, '64')))
% d = ' -largeArrayDims' ;
% end
v = getversion ; % Added by [email protected] (-09).
if (~isempty (strfind (computer, '64')))
d = ' -largeArrayDims' ;
% The next three lines are added by [email protected] (-09).
% These options are needed for some reason in Matlab 7.8 or newer.
if v >= 7.8
d = [d ' -DLONG -D''LONGBLAS=UF_long'''];
end
end
v = getversion ;
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
fprintf ('Compiling UMFPACK for MATLAB Version %g\n', v) ;
if (pc)
obj = 'obj' ;
else
obj = 'o' ;
end
kk = 0 ;
%-------------------------------------------------------------------------------
% BLAS option
%-------------------------------------------------------------------------------
% This is exceedingly ugly. The MATLAB mex command needs to be told where to
% fine the LAPACK and BLAS libraries, which is a real portability nightmare.
if (nargin < 1)
if (pc)
if (v < 6.5)
% MATLAB 6.1 and earlier: use the version supplied here
lapack = 'lcc_lib/libmwlapack.lib' ;
fprintf ('Using %s. If this fails with dgemm and others\n',lapack);
fprintf ('undefined, then edit umfpack_make.m and modify the') ;
fprintf (' statement:\nlapack = ''%s'' ;\n', lapack) ;
elseif (v < 7.5)
lapack = 'libmwlapack.lib' ;
else
% MATLAB R2007b (7.5) made the problem worse
lapack = 'libmwlapack.lib libmwblas.lib' ;
end
else
% For other systems, mex should find lapack on its own, but this has
% been broken in MATLAB R2007a; the following is now required.
if (v < 7.5)
lapack = '-lmwlapack' ;
else
% MATLAB R2007b (7.5) made the problem worse
lapack = '-lmwlapack -lmwblas' ;
end
end
end
%-------------------------------------------------------------------------------
% -DNPOSIX option (for sysconf and times timer routines)
%-------------------------------------------------------------------------------
posix = ' ' ;
if (~pc)
% added for timing routine:
lapack = [lapack ' -lrt'] ;
posix = ' -DLIBRT' ;
end
% if (~pc)
% msg = [ ...
% '--------------------------------------------------------------\n', ...
% '\nUMFPACK can use the POSIX routines sysconf () and times ()\n', ...
% 'to provide CPU time and wallclock time statistics. If you do not\n', ...
% 'have a POSIX-compliant operating system, then UMFPACK won''t\n', ...
% 'compile. If you don''t know which option to pick, try the\n', ...
% 'default. If you get an error saying that sysconf and/or times\n', ...
% 'are not defined, then recompile with the non-POSIX option.\n', ...
% '\nPlease select one of the following options:\n', ...
% ' 1: use POSIX sysconf and times routines (default)\n', ...
% ' 2: do not use POSIX routines\n'] ;
% fprintf (msg) ;
% posix = str2num (input (': ', 's')) ;
% if (isempty (posix))
% posix = 1 ;
% end
% if (posix == 2)
% fprintf ('\nNot using POSIX sysconf and times routines.\n') ;
% posix = ' -DNPOSIX' ;
% else
% fprintf ('\nUsing POSIX sysconf and times routines.\n') ;
% posix = '' ;
% end
% end
%-------------------------------------------------------------------------------
% mex command
%-------------------------------------------------------------------------------
umfdir = '../Source/' ;
amddir = '../../AMD/Source/' ;
incdir = ' -I../Include -I../Source -I../../AMD/Include -I../../UFconfig' ;
% with optimization:
mx = sprintf ('mex -O%s%s%s ', posix, incdir, d) ;
% no optimization:
%% mx = sprintf ('mex -g %s%s%s ', posix, incdir, d) ;
% fprintf ('compile options:\n%s\n', mx) ;
%-------------------------------------------------------------------------------
% source files
%-------------------------------------------------------------------------------
% non-user-callable umf_*.[ch] files:
umfch = { 'assemble', 'blas3_update', ...
'build_tuples', 'create_element', ...
'dump', 'extend_front', 'garbage_collection', ...
'get_memory', 'init_front', 'kernel', ...
'kernel_init', 'kernel_wrapup', ...
'local_search', 'lsolve', 'ltsolve', ...
'mem_alloc_element', 'mem_alloc_head_block', ...
'mem_alloc_tail_block', 'mem_free_tail_block', ...
'mem_init_memoryspace', ...
'report_vector', 'row_search', 'scale_column', ...
'set_stats', 'solve', 'symbolic_usage', 'transpose', ...
'tuple_lengths', 'usolve', 'utsolve', 'valid_numeric', ...
'valid_symbolic', 'grow_front', 'start_front', '2by2', ...
'store_lu', 'scale' } ;
% non-user-callable umf_*.[ch] files, int versions only (no real/complex):
umfint = { 'analyze', 'apply_order', 'colamd', 'free', 'fsize', ...
'is_permutation', 'malloc', 'realloc', 'report_perm', ...
'singletons' } ;
% non-user-callable and user-callable amd_*.[ch] files (int versions only):
amdsrc = { 'aat', '1', '2', 'dump', 'postorder', 'post_tree', 'defaults', ...
'order', 'control', 'info', 'valid', 'preprocess', 'global' } ;
% user-callable umfpack_*.[ch] files (real/complex):
user = { 'col_to_triplet', 'defaults', 'free_numeric', ...
'free_symbolic', 'get_numeric', 'get_lunz', ...
'get_symbolic', 'get_determinant', 'numeric', 'qsymbolic', ...
'report_control', 'report_info', 'report_matrix', ...
'report_numeric', 'report_perm', 'report_status', ...
'report_symbolic', 'report_triplet', ...
'report_vector', 'solve', 'symbolic', ...
'transpose', 'triplet_to_col', 'scale' ...
'load_numeric', 'save_numeric', 'load_symbolic', 'save_symbolic' } ;
% user-callable umfpack_*.[ch], only one version
generic = { 'timer', 'tictoc', 'global' } ;
M = cell (0) ;
%-------------------------------------------------------------------------------
% Create the umfpack2 and amd2 mexFunctions for MATLAB (int versions only)
%-------------------------------------------------------------------------------
for k = 1:length(umfint)
[M, kk] = make (M, '%s -DDLONG -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s.%s', mx, umfint {k}, umfint {k}, 'm', obj, umfdir, ...
kk, details) ;
end
rules = { [mx ' -DDLONG'] , [mx ' -DZLONG'] } ;
kinds = { 'md', 'mz' } ;
for what = 1:2
rule = rules {what} ;
kind = kinds {what} ;
[M, kk] = make (M, '%s -DCONJUGATE_SOLVE -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s.%s', rule, 'ltsolve', 'lhsolve', kind, obj, umfdir, ...
kk, details) ;
[M, kk] = make (M, '%s -DCONJUGATE_SOLVE -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s.%s', rule, 'utsolve', 'uhsolve', kind, obj, umfdir, ...
kk, details) ;
[M, kk] = make (M, '%s -DDO_MAP -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s_map_nox.%s', rule, 'triplet', 'triplet', kind, obj, ...
umfdir, kk, details) ;
[M, kk] = make (M, '%s -DDO_VALUES -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s_nomap_x.%s', rule, 'triplet', 'triplet', kind, obj, ...
umfdir, kk, details) ;
[M, kk] = make (M, '%s -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s_nomap_nox.%s', rule, 'triplet', 'triplet', kind, obj, ...
umfdir, kk, details) ;
[M, kk] = make (M, '%s -DDO_MAP -DDO_VALUES -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s_map_x.%s', rule, 'triplet', 'triplet', kind, obj, ...
umfdir, kk, details) ;
[M, kk] = make (M, '%s -DFIXQ -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s_fixq.%s', rule, 'assemble', 'assemble', kind, obj, ...
umfdir, kk, details) ;
[M, kk] = make (M, '%s -DDROP -c %sumf_%s.c', 'umf_%s.%s', ...
'umf_%s_%s_drop.%s', rule, 'store_lu', 'store_lu', kind, obj, ...
umfdir, kk, details) ;
for k = 1:length(umfch)
[M, kk] = make (M, '%s -c %sumf_%s.c', 'umf_%s.%s', 'umf_%s_%s.%s', ...
rule, umfch {k}, umfch {k}, kind, obj, umfdir, kk, details) ;
end
[M, kk] = make (M, '%s -DWSOLVE -c %sumfpack_%s.c', 'umfpack_%s.%s', ...
'umfpack_%s_w%s.%s', rule, 'solve', 'solve', kind, obj, umfdir, ...
kk, details) ;
for k = 1:length(user)
[M, kk] = make (M, '%s -c %sumfpack_%s.c', 'umfpack_%s.%s', ...
'umfpack_%s_%s.%s', rule, user {k}, user {k}, kind, obj, ...
umfdir, kk, details) ;
end
end
for k = 1:length(generic)
[M, kk] = make (M, '%s -c %sumfpack_%s.c', 'umfpack_%s.%s', ...
'umfpack_%s_%s.%s', mx, generic {k}, generic {k}, 'm', obj, ...
umfdir, kk, details) ;
end
%----------------------------------------
% AMD routines (int only)
%----------------------------------------
for k = 1:length(amdsrc)
[M, kk] = make (M, '%s -DDLONG -c %samd_%s.c', 'amd_%s.%s', ...
'amd_%s_%s.%s', mx, amdsrc {k}, amdsrc {k}, 'm', obj, amddir, ...
kk, details) ;
end
%----------------------------------------
% compile the umfpack2 mexFunction
%----------------------------------------
C = sprintf ('%s -output umfpack2 umfpackmex.c', mx) ;
for i = 1:length (M)
C = [C ' ' (M {i})] ; %#ok
end
C = [C ' ' lapack] ;
kk = cmd (C, kk, details) ;
%----------------------------------------
% delete the object files
%----------------------------------------
for i = 1:length (M)
rmfile (M {i}) ;
end
%----------------------------------------
% compile the luflop mexFunction
%----------------------------------------
cmd (sprintf ('%s -output luflop luflopmex.c', mx), kk, details) ;
fprintf ('\nUMFPACK successfully compiled\n') ;
%===============================================================================
% end of umfpack_make
%===============================================================================
%-------------------------------------------------------------------------------
function rmfile (file)
% rmfile: delete a file, but only if it exists
if (length (dir (file)) > 0) %#ok
delete (file) ;
end
%-------------------------------------------------------------------------------
function cpfile (src, dst)
% cpfile: copy the src file to the filename dst, overwriting dst if it exists
rmfile (dst)
if (length (dir (src)) == 0) %#ok
fprintf ('File does not exist: %s\n', src) ;
error ('File does not exist') ;
end
copyfile (src, dst) ;
%-------------------------------------------------------------------------------
function mvfile (src, dst)
% mvfile: move the src file to the filename dst, overwriting dst if it exists
cpfile (src, dst) ;
rmfile (src) ;
%-------------------------------------------------------------------------------
function kk = cmd (s, kk, details)
%CMD: evaluate a command, and either print it or print a "."
if (details)
fprintf ('%s\n', s) ;
else
if (mod (kk, 60) == 0)
fprintf ('\n') ;
end
kk = kk + 1 ;
fprintf ('.') ;
end
eval (s) ;
%-------------------------------------------------------------------------------
function [M, kk] = make (M, s, src, dst, rule, file1, file2, kind, obj, ...
srcdir, kk, details)
% make: execute a "make" command for a source file
kk = cmd (sprintf (s, rule, srcdir, file1), kk, details) ;
src = sprintf (src, file1, obj) ;
dst = sprintf (dst, kind, file2, obj) ;
mvfile (src, dst) ;
M {end + 1} = dst ;
%-------------------------------------------------------------------------------
function v = getversion
% determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
umfpack_btf.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/UMFPACK/MATLAB/umfpack_btf.m
| 4,651 |
utf_8
|
905709090e298d8bfd5e9937181c95b6
|
function [x, info] = umfpack_btf (A, b, Control)
%UMFPACK_BTF factorize A using a block triangular form
%
% Example:
% x = umfpack_btf (A, b, Control)
%
% solve Ax=b by first permuting the matrix A to block triangular form via dmperm
% and then using UMFPACK to factorize each diagonal block. Adjacent 1-by-1
% blocks are merged into a single upper triangular block, and solved via
% MATLAB's \ operator. The Control parameter is optional (Type umfpack_details
% and umfpack_report for details on its use). A must be square.
%
% See also umfpack, umfpack2, umfpack_details, dmperm
% Copyright 1995-2007 by Timothy A. Davis.
if (nargin < 2)
help umfpack_btf
error ('Usage: x = umfpack_btf (A, b, Control)') ;
end
[m n] = size (A) ;
if (m ~= n)
help umfpack_btf
error ('umfpack_btf: A must be square') ;
end
m1 = size (b,1) ;
if (m1 ~= n)
help umfpack_btf
error ('umfpack_btf: b has the wrong dimensions') ;
end
if (nargin < 3)
Control = umfpack2 ;
end
%-------------------------------------------------------------------------------
% find the block triangular form
%-------------------------------------------------------------------------------
% dmperm built-in may segfault in MATLAB 7.4 or earlier; fixed in MATLAB 7.5
% since dmperm now uses CSparse
[p,q,r] = dmperm (A) ;
nblocks = length (r) - 1 ;
info = [0 0 0] ; % [nnz(L), nnz(U), nnz(F)], optional 2nd output
%-------------------------------------------------------------------------------
% solve the system
%-------------------------------------------------------------------------------
if (nblocks == 1 | sprank (A) < n) %#ok
%---------------------------------------------------------------------------
% matrix is irreducible or structurally singular
%---------------------------------------------------------------------------
[x info2] = umfpack2 (A, '\', b, Control) ;
info = [info2(78) info2(79) 0] ;
else
%---------------------------------------------------------------------------
% A (p,q) is in block triangular form
%---------------------------------------------------------------------------
b = b (p,:) ;
A = A (p,q) ;
x = zeros (size (b)) ;
%---------------------------------------------------------------------------
% merge adjacent singletons into a single upper triangular block
%---------------------------------------------------------------------------
[r, nblocks, is_triangular] = merge_singletons (r) ;
%---------------------------------------------------------------------------
% solve the system: x (q) = A\b
%---------------------------------------------------------------------------
for k = nblocks:-1:1
% get the kth block
k1 = r (k) ;
k2 = r (k+1) - 1 ;
% solve the system
[x2 info2] = solver (A (k1:k2, k1:k2), b (k1:k2,:), ...
is_triangular (k), Control) ;
x (k1:k2,:) = x2 ;
% off-diagonal block back substitution
F2 = A (1:k1-1, k1:k2) ;
b (1:k1-1,:) = b (1:k1-1,:) - F2 * x (k1:k2,:) ;
info (1:2) = info (1:2) + info2 (1:2) ;
info (3) = info (3) + nnz (F2) ;
end
x (q,:) = x ;
end
%-------------------------------------------------------------------------------
% merge_singletons
%-------------------------------------------------------------------------------
function [r, nblocks, is_triangular] = merge_singletons (r)
%
% Given r from [p,q,r] = dmperm (A), where A is square, return a modified r that
% reflects the merger of adjacent singletons into a single upper triangular
% block. is_triangular (k) is 1 if the kth block is upper triangular. nblocks
% is the number of new blocks.
nblocks = length (r) - 1 ;
bsize = r (2:nblocks+1) - r (1:nblocks) ;
t = [0 (bsize == 1)] ;
z = (t (1:nblocks) == 0 & t (2:nblocks+1) == 1) | t (2:nblocks+1) == 0 ;
y = [(find (z)) nblocks+1] ;
r = r (y) ;
nblocks = length (y) - 1 ;
is_triangular = y (2:nblocks+1) - y (1:nblocks) > 1 ;
%-------------------------------------------------------------------------------
% solve Ax=b, but check for small and/or triangular systems
%-------------------------------------------------------------------------------
function [x, info] = solver (A, b, is_triangular, Control)
if (is_triangular)
% back substitution only
x = A \ b ;
info = [nnz(A) 0 0] ;
elseif (size (A,1) < 4)
% a very small matrix, solve it as a dense linear system
x = full (A) \ b ;
n = size (A,1) ;
info = [(n^2+n)/2 (n^2+n)/2 0] ;
else
% solve it as a sparse linear system
[x info] = umfpack_solve (A, '\', b, Control) ;
end
|
github
|
lcnbeapp/beapp-master
|
cs_install.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CXSparse_newfiles/MATLAB/cs_install.m
| 1,625 |
utf_8
|
7431badf82b77fa1911f89c54dbbb33c
|
function cs_install (do_pause)
%CS_INSTALL: compile and install CXSparse for use in MATLAB.
% Your current working directory must be CXSparse/MATLAB in order to use this
% function.
%
% The directories
%
% CXSparse/MATLAB/CSparse
% CXSparse/MATLAB/Demo
% CXSparse/MATLAB/UFget
%
% are added to your MATLAB path (see the "pathtool" command to add these to
% your path permanently, for future MATLAB sessions).
%
% Next, the MATLAB CXSparse demo program, CXSparse/MATLAB/cs_demo is executed.
% To run the demo with pauses so you can see the results, use cs_install(1).
% To run the full MATLAB test programs for CXSparse, run testall in the
% Test directory.
%
% Example:
% cs_install % install and run demo with no pauses
% cs_install(1) % install and run demo with pauses
%
% See also: cs_demo
%
% Copyright 2006-2007, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
fprintf ('Compiling and installing CXSparse\n') ;
if (nargin < 1)
do_pause = 0 ;
end
if (do_pause)
input ('Hit enter to continue: ') ;
end
addpath ([pwd filesep 'CSparse']) ;
addpath ([pwd filesep 'Demo']) ;
v = getversion ;
if (v >= 7.0)
addpath ([pwd filesep 'UFget']) ;
else
fprintf ('UFget not installed (MATLAB 7.0 or later required)\n') ;
end
cd ('CSparse') ;
cs_make (1) ;
cd ('../Demo') ;
cs_demo (do_pause)
%-------------------------------------------------------------------------------
function v = getversion
% determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
camd_demo.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CAMD/MATLAB/camd_demo.m
| 3,308 |
utf_8
|
823dfa42e18c7d2753b7dadb43e5cde2
|
function camd_demo
%CAMD_DEMO a demo of camd, using the can_24 matrix
%
% A demo of CAMD for MATLAB.
%
% Example:
% camd_demo
%
% See also: camd, camd_make
% Copyright 1994-2007, Tim Davis, University of Florida,
% Patrick R. Amestoy, Iain S. Duff, and Yanqing Chen.
% This orders the same matrix as the ANSI C demo, camd_demo.c. It includes an
% additional analysis of the matrix via MATLAB's symbfact routine.
% First, print the help information for CAMD
help camd
% Get the Harwell/Boeing can_24 matrix.
load can_24
A = spconvert (can_24) ;
n = size (A,1) ;
rand ('state', 0) ;
C = irand (6, n) ;
figure (1)
clf
hold off
subplot (2,2,1) ;
spy (A)
title ('HB/can24 matrix') ;
% print the details during CAMD ordering and SYMBFACT
% spparms ('spumoni', 1) ;
% order the matrix. Note that the Info argument is optional.
fprintf ('\nIf the next step fails, then you have\n') ;
fprintf ('not yet compiled the CAMD mexFunction.\n') ;
[p, Info] = camd (A) ; %#ok
% order again, but this time print some statistics
[p, camd_Info] = camd (A, [10 1 1], C) ;
fprintf ('Permutation vector:\n') ;
fprintf (' %2d', p) ;
fprintf ('\n\n') ;
fprintf ('Corresponding constraint sets:\n') ;
if (any (sort (C (p)) ~= C (p)))
error ('Error!') ;
end
for j = 1:n
fprintf (' %2d', C (p (j))) ;
end
fprintf ('\n\n\n') ;
subplot (2,2,2) ;
spy (A (p,p)) ;
title ('Permuted matrix') ;
% The camd_demo.c program stops here.
fprintf ('Analyze A(p,p) with MATLAB symbfact routine:\n') ;
[cn, height, parent, post, R] = symbfact (A(p,p)) ;
subplot (2,2,3) ;
spy (R') ;
title ('Cholesky factor L') ;
subplot (2,2,4) ;
treeplot (parent) ;
title ('etree') ;
% results from symbfact
lnz = sum (cn) ; % number of nonzeros in L, incl. diagonal
cn = cn - 1 ; % get the count of off-diagonal entries
fl = n + sum (cn.^2 + 2*cn) ; % flop count for chol (A (p,p)
fprintf ('number of nonzeros in L (including diagonal): %d\n', lnz) ;
fprintf ('floating point operation count for chol (A (p,p)): %d\n', fl) ;
% approximations from camd:
lnz2 = n + camd_Info (10) ;
fl2 = n + camd_Info (11) + 2 * camd_Info (12) ;
fprintf ('\nResults from CAMD''s approximate analysis:\n') ;
fprintf ('number of nonzeros in L (including diagonal): %d\n', lnz2) ;
fprintf ('floating point operation count for chol (A (p,p)): %d\n\n', fl2) ;
fprintf ('\nNote that the ordering quality is not as good as p=amd(A).\n') ;
fprintf ('This is only because the ordering constraints, C, have been\n') ;
fprintf ('randomly selected.\n') ;
if (lnz2 ~= lnz | fl ~= fl2) %#ok
fprintf ('Note that the nonzero and flop counts from CAMD are slight\n') ;
fprintf ('upper bounds. This is due to the approximate minimum degree\n');
fprintf ('method used, in conjunction with "mass elimination".\n') ;
fprintf ('See the discussion about mass elimination in camd.h and\n') ;
fprintf ('camd_2.c for more details.\n') ;
end
% turn off diagnostic output in MATLAB's sparse matrix routines
% spparms ('spumoni', 0) ;
%-------------------------------------------------------------------------------
function i = irand (n,s)
% irand: return a random vector of size s, with values between 1 and n
if (nargin == 1)
s = 1 ;
end
i = min (n, 1 + floor (rand (1,s) * n)) ;
|
github
|
lcnbeapp/beapp-master
|
cs_install.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CXSparse/MATLAB/cs_install.m
| 1,625 |
utf_8
|
7431badf82b77fa1911f89c54dbbb33c
|
function cs_install (do_pause)
%CS_INSTALL: compile and install CXSparse for use in MATLAB.
% Your current working directory must be CXSparse/MATLAB in order to use this
% function.
%
% The directories
%
% CXSparse/MATLAB/CSparse
% CXSparse/MATLAB/Demo
% CXSparse/MATLAB/UFget
%
% are added to your MATLAB path (see the "pathtool" command to add these to
% your path permanently, for future MATLAB sessions).
%
% Next, the MATLAB CXSparse demo program, CXSparse/MATLAB/cs_demo is executed.
% To run the demo with pauses so you can see the results, use cs_install(1).
% To run the full MATLAB test programs for CXSparse, run testall in the
% Test directory.
%
% Example:
% cs_install % install and run demo with no pauses
% cs_install(1) % install and run demo with pauses
%
% See also: cs_demo
%
% Copyright 2006-2007, Timothy A. Davis.
% http://www.cise.ufl.edu/research/sparse
fprintf ('Compiling and installing CXSparse\n') ;
if (nargin < 1)
do_pause = 0 ;
end
if (do_pause)
input ('Hit enter to continue: ') ;
end
addpath ([pwd filesep 'CSparse']) ;
addpath ([pwd filesep 'Demo']) ;
v = getversion ;
if (v >= 7.0)
addpath ([pwd filesep 'UFget']) ;
else
fprintf ('UFget not installed (MATLAB 7.0 or later required)\n') ;
end
cd ('CSparse') ;
cs_make (1) ;
cd ('../Demo') ;
cs_demo (do_pause)
%-------------------------------------------------------------------------------
function v = getversion
% determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
cholmod_make.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CHOLMOD/MATLAB/cholmod_make.m
| 10,939 |
utf_8
|
1fe1b7470711101787a8e019798fbc53
|
function cholmod_make (metis_path)
%CHOLMOD_MAKE compiles the CHOLMOD mexFunctions
%
% Example:
% cholmod_make
%
% CHOLMOD relies on AMD and COLAMD, and optionally CCOLAMD, CAMD, and METIS.
% All but METIS are distributed with CHOLMOD. To compile CHOLMOD to use METIS
% you must first place a copy of the metis-4.0 directory (METIS version 4.0.1)
% in same directory that contains the AMD, COLAMD, CCOLAMD, and CHOLMOD
% directories. Next, type
%
% cholmod_make
%
% in the MATLAB command window. Alternatively, use this command:
%
% cholmod_make ('path to your copy of metis-4.0 here') ;
%
% See http://www-users.cs.umn.edu/~karypis/metis for a copy of
% METIS 4.0.1. If you do not have METIS, use either of the following:
%
% cholmod_make ('')
% cholmod_make ('no metis')
%
% You must type the cholmod_make command while in the CHOLMOD/MATLAB directory.
%
% See also analyze, bisect, chol2, cholmod2, etree2, lchol, ldlchol, ldlsolve,
% ldlupdate, metis, spsym, nesdis, septree, resymbol, sdmult, sparse2,
% symbfact2, mread, mwrite
% Copyright 2006-2007, Timothy A. Davis
% http://www.cise.ufl.edu/research/sparse
details = 0 ; % 1 if details of each command are to be printed
v = getversion ;
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
d = '' ;
% if (~isempty (strfind (computer, '64')))
% % 64-bit MATLAB
% d = '-largeArrayDims' ;
% end
if (~isempty (strfind (computer, '64')))
% 64-bit MATLAB
d = '-largeArrayDims' ;
% The next three lines are added by [email protected] (-09).
% These options are needed for some reason in Matlab 7.8 or newer.
if v >= 7.8
d = [d ' -DLONG -D''LONGBLAS=UF_long'''];
end
end
include = '-I. -I../../AMD/Include -I../../COLAMD/Include -I../../CCOLAMD/Include -I../../CAMD/Include -I../Include -I../../UFconfig' ;
if (v < 7.0)
% do not attempt to compile CHOLMOD with large file support
include = [include ' -DNLARGEFILE'] ;
elseif (~pc)
% Linux/Unix require these flags for large file support
include = [include ' -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE'] ;
end
if (v < 6.5)
% logical class does not exist in MATLAB 6.1 or earlie
include = [include ' -DMATLAB6p1_OR_EARLIER'] ;
end
% Determine the METIS path, and whether or not METIS is available
if (nargin == 0)
metis_path = '../../metis-4.0' ;
end
if (strcmp (metis_path, 'no metis'))
metis_path = '' ;
end
have_metis = (~isempty (metis_path)) ;
% fix the METIS 4.0.1 rename.h file
if (have_metis)
fprintf ('Compiling CHOLMOD with METIS on MATLAB Version %g\n', v) ;
f = fopen ('rename.h', 'w') ;
if (f == -1)
error ('unable to create rename.h in current directory') ;
end
fprintf (f, '/* do not edit this file; generated by cholmod_make */\n') ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#include "%s/Lib/rename.h"\n', metis_path) ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#define log2 METIS__log2\n') ;
fprintf (f, '#include "mex.h"\n') ;
fprintf (f, '#define malloc mxMalloc\n') ;
fprintf (f, '#define free mxFree\n') ;
fprintf (f, '#define calloc mxCalloc\n') ;
fprintf (f, '#define realloc mxRealloc\n') ;
fclose (f) ;
include = [include ' -I' metis_path '/Lib'] ;
else
fprintf ('Compiling CHOLMOD without METIS on MATLAB Version %g\n', v) ;
include = ['-DNPARTITION ' include] ;
end
%-------------------------------------------------------------------------------
% BLAS option
%-------------------------------------------------------------------------------
% This is exceedingly ugly. The MATLAB mex command needs to be told where to
% fine the LAPACK and BLAS libraries, which is a real portability nightmare.
if (pc)
if (v < 6.5)
% MATLAB 6.1 and earlier: use the version supplied here
lapack = 'lcc_lib/libmwlapack.lib' ;
elseif (v < 7.5)
lapack = 'libmwlapack.lib' ;
else
lapack = 'libmwlapack.lib libmwblas.lib' ;
end
else
if (v < 7.5)
lapack = '-lmwlapack' ;
else
lapack = '-lmwlapack -lmwblas' ;
end
end
%-------------------------------------------------------------------------------
include = strrep (include, '/', filesep) ;
amd_src = { ...
'../../AMD/Source/amd_1', ...
'../../AMD/Source/amd_2', ...
'../../AMD/Source/amd_aat', ...
'../../AMD/Source/amd_control', ...
'../../AMD/Source/amd_defaults', ...
'../../AMD/Source/amd_dump', ...
'../../AMD/Source/amd_global', ...
'../../AMD/Source/amd_info', ...
'../../AMD/Source/amd_order', ...
'../../AMD/Source/amd_postorder', ...
'../../AMD/Source/amd_post_tree', ...
'../../AMD/Source/amd_preprocess', ...
'../../AMD/Source/amd_valid' } ;
camd_src = { ...
'../../CAMD/Source/camd_1', ...
'../../CAMD/Source/camd_2', ...
'../../CAMD/Source/camd_aat', ...
'../../CAMD/Source/camd_control', ...
'../../CAMD/Source/camd_defaults', ...
'../../CAMD/Source/camd_dump', ...
'../../CAMD/Source/camd_global', ...
'../../CAMD/Source/camd_info', ...
'../../CAMD/Source/camd_order', ...
'../../CAMD/Source/camd_postorder', ...
'../../CAMD/Source/camd_preprocess', ...
'../../CAMD/Source/camd_valid' } ;
colamd_src = {
'../../COLAMD/Source/colamd', ...
'../../COLAMD/Source/colamd_global' } ;
ccolamd_src = {
'../../CCOLAMD/Source/ccolamd', ...
'../../CCOLAMD/Source/ccolamd_global' } ;
metis_src = {
'Lib/balance', ...
'Lib/bucketsort', ...
'Lib/ccgraph', ...
'Lib/coarsen', ...
'Lib/compress', ...
'Lib/debug', ...
'Lib/estmem', ...
'Lib/fm', ...
'Lib/fortran', ...
'Lib/frename', ...
'Lib/graph', ...
'Lib/initpart', ...
'Lib/kmetis', ...
'Lib/kvmetis', ...
'Lib/kwayfm', ...
'Lib/kwayrefine', ...
'Lib/kwayvolfm', ...
'Lib/kwayvolrefine', ...
'Lib/match', ...
'Lib/mbalance2', ...
'Lib/mbalance', ...
'Lib/mcoarsen', ...
'Lib/memory', ...
'Lib/mesh', ...
'Lib/meshpart', ...
'Lib/mfm2', ...
'Lib/mfm', ...
'Lib/mincover', ...
'Lib/minitpart2', ...
'Lib/minitpart', ...
'Lib/mkmetis', ...
'Lib/mkwayfmh', ...
'Lib/mkwayrefine', ...
'Lib/mmatch', ...
'Lib/mmd', ...
'Lib/mpmetis', ...
'Lib/mrefine2', ...
'Lib/mrefine', ...
'Lib/mutil', ...
'Lib/myqsort', ...
'Lib/ometis', ...
'Lib/parmetis', ...
'Lib/pmetis', ...
'Lib/pqueue', ...
'Lib/refine', ...
'Lib/separator', ...
'Lib/sfm', ...
'Lib/srefine', ...
'Lib/stat', ...
'Lib/subdomains', ...
'Lib/timing', ...
'Lib/util' } ;
for i = 1:length (metis_src)
metis_src {i} = [metis_path '/' metis_src{i}] ;
end
cholmod_matlab = { 'cholmod_matlab' } ;
cholmod_src = {
'../Core/cholmod_aat', ...
'../Core/cholmod_add', ...
'../Core/cholmod_band', ...
'../Core/cholmod_change_factor', ...
'../Core/cholmod_common', ...
'../Core/cholmod_complex', ...
'../Core/cholmod_copy', ...
'../Core/cholmod_dense', ...
'../Core/cholmod_error', ...
'../Core/cholmod_factor', ...
'../Core/cholmod_memory', ...
'../Core/cholmod_sparse', ...
'../Core/cholmod_transpose', ...
'../Core/cholmod_triplet', ...
'../Check/cholmod_check', ...
'../Check/cholmod_read', ...
'../Check/cholmod_write', ...
'../Cholesky/cholmod_amd', ...
'../Cholesky/cholmod_analyze', ...
'../Cholesky/cholmod_colamd', ...
'../Cholesky/cholmod_etree', ...
'../Cholesky/cholmod_factorize', ...
'../Cholesky/cholmod_postorder', ...
'../Cholesky/cholmod_rcond', ...
'../Cholesky/cholmod_resymbol', ...
'../Cholesky/cholmod_rowcolcounts', ...
'../Cholesky/cholmod_rowfac', ...
'../Cholesky/cholmod_solve', ...
'../Cholesky/cholmod_spsolve', ...
'../MatrixOps/cholmod_drop', ...
'../MatrixOps/cholmod_horzcat', ...
'../MatrixOps/cholmod_norm', ...
'../MatrixOps/cholmod_scale', ...
'../MatrixOps/cholmod_sdmult', ...
'../MatrixOps/cholmod_ssmult', ...
'../MatrixOps/cholmod_submatrix', ...
'../MatrixOps/cholmod_vertcat', ...
'../MatrixOps/cholmod_symmetry', ...
'../Modify/cholmod_rowadd', ...
'../Modify/cholmod_rowdel', ...
'../Modify/cholmod_updown', ...
'../Supernodal/cholmod_super_numeric', ...
'../Supernodal/cholmod_super_solve', ...
'../Supernodal/cholmod_super_symbolic', ...
'../Partition/cholmod_ccolamd', ...
'../Partition/cholmod_csymamd', ...
'../Partition/cholmod_camd', ...
'../Partition/cholmod_metis', ...
'../Partition/cholmod_nesdis' } ;
cholmod_mex_src = { ...
'analyze', ...
'bisect', ...
'chol2', ...
'cholmod2', ...
'etree2', ...
'lchol', ...
'ldlchol', ...
'ldlsolve', ...
'ldlupdate', ...
'metis', ...
'spsym', ...
'nesdis', ...
'septree', ...
'resymbol', ...
'sdmult', ...
'sparse2', ...
'symbfact2', ...
'mread', ...
'mwrite' } ;
if (pc)
% Windows does not have drand48 and srand48, required by METIS. Use
% drand48 and srand48 in CHOLMOD/MATLAB/Windows/rand48.c instead.
% Also provide Windows with an empty <strings.h> include file.
obj_extension = '.obj' ;
cholmod_matlab = [cholmod_matlab {'Windows/rand48'}] ;
include = [include ' -IWindows'] ;
else
obj_extension = '.o' ;
end
% compile each library source file
obj = '' ;
source = [amd_src colamd_src ccolamd_src camd_src cholmod_src cholmod_matlab] ;
if (have_metis)
source = [metis_src source] ;
end
kk = 0 ;
for f = source
ff = strrep (f {1}, '/', filesep) ;
slash = strfind (ff, filesep) ;
if (isempty (slash))
slash = 1 ;
else
slash = slash (end) + 1 ;
end
o = ff (slash:end) ;
obj = [obj ' ' o obj_extension] ; %#ok
s = sprintf ('mex %s -DDLONG -O %s -c %s.c', d, include, ff) ;
kk = do_cmd (s, kk, details) ;
end
% compile each mexFunction
for f = cholmod_mex_src
s = sprintf ('mex %s -DDLONG -O %s %s.c', d, include, f{1}) ;
s = [s obj ' ' lapack] ; %#ok
kk = do_cmd (s, kk, details) ;
end
% clean up
s = ['delete ' obj] ;
do_cmd (s, kk, details) ;
fprintf ('\nCHOLMOD successfully compiled\n') ;
%-------------------------------------------------------------------------------
function kk = do_cmd (s, kk, details)
%DO_CMD: evaluate a command, and either print it or print a "."
if (details)
fprintf ('%s\n', s) ;
else
if (mod (kk, 60) == 0)
fprintf ('\n') ;
end
kk = kk + 1 ;
fprintf ('.') ;
end
eval (s) ;
%-------------------------------------------------------------------------------
function v = getversion
% determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
graph_demo.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CHOLMOD/MATLAB/graph_demo.m
| 2,476 |
utf_8
|
d68493e617f06b2d5a601b7cb3b91a33
|
function graph_demo (n)
%GRAPH_DEMO graph partitioning demo
% graph_demo(n) constructs an set of n-by-n 2D grids, partitions them, and
% plots them in one-second intervals. n is optional; it defaults to 60.
%
% Example:
% graph_demo
%
% See also DELSQ, NUMGRID, GPLOT, TREEPLOT
% Copyright 2006-2007, Timothy A. Davis
% http://www.cise.ufl.edu/research/sparse
if (nargin < 1)
% construct a 60-by-60 grid
n = 60 ;
end
figure (1)
clf
for regions = {'Square', 'C' 'Disc', 'Annulus', 'Heart', 'Butterfly', 'L'}
% construct the grid
region = regions {1} ;
g = numgrid (region (1), n) ;
x = repmat (0:n-1, n, 1) ;
y = repmat (((n-1):-1:0)', 1, n) ;
A = delsq (g) ;
x = x (find (g)) ; %#ok
y = y (find (g)) ; %#ok
% plot the original grid
clf
subplot (2,2,1)
my_gplot (A, x, y)
title (sprintf ('%s-shaped 2D grid', region)) ;
axis equal
axis off
% bisect the graph
s = bisect (A) ;
[i j] = find (A) ;
subplot (2,2,2)
my_gplot (sparse (i, j, s(i) == s(j)), x, y) ;
title ('node bisection') ;
axis equal
axis off
% nested dissection
nsmall = floor (size (A,1) / 2) ;
defaults = 0 ;
while (1)
if (defaults)
% use defaults
[p cp cmember] = nesdis (A) ;
else
[p cp cmember] = nesdis (A, 'sym', nsmall) ;
end
% plot the components
subplot (2,2,3)
my_gplot (sparse (i, j, cmember(i) == cmember (j)), x, y) ;
if (defaults)
title ('nested dissection (defaults)') ;
else
title (sprintf ('nested dissection, nsmall %d', nsmall)) ;
end
axis equal
axis off
% plot the separator tree
subplot (2,2,4)
treeplot (cp, 'ko')
title ('separator tree') ;
axis equal
axis off
drawnow
pause (0.1)
if (defaults)
break ;
end
nsmall = floor (nsmall / 2) ;
if (nsmall < 20)
defaults = 1 ;
pause (0.2)
end
end
end
%-------------------------------------------------------------------------------
function my_gplot (A, x, y)
% my_gplot : like gplot, just a lot faster
[i, j] = find (A) ;
[ignore, p] = sort (max(i, j)) ;
i = i (p) ;
j = j (p) ;
nans = repmat (NaN, size (i)) ;
x = [ x(i) x(j) nans ]' ;
y = [ y(i) y(j) nans ]' ;
plot (x (:), y (:)) ;
|
github
|
lcnbeapp/beapp-master
|
cholmod_demo.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CHOLMOD/MATLAB/cholmod_demo.m
| 3,684 |
utf_8
|
366c6ffc28d9204098b51142c5e72445
|
function cholmod_demo
%CHOLMOD_DEMO a demo for CHOLMOD
%
% Tests CHOLMOD with various randomly-generated matrices, and the west0479
% matrix distributed with MATLAB. Random matrices are not good test cases,
% but they are easily generated. It also compares CHOLMOD and MATLAB on the
% sparse matrix problem used in the MATLAB BENCH command.
%
% See CHOLMOD/MATLAB/Test/cholmod_test.m for a lengthy test using matrices from
% the UF sparse matrix collection.
%
% Example:
% cholmod_demo
%
% See also BENCH
% Copyright 2006-2007, Timothy A. Davis
% http://www.cise.ufl.edu/research/sparse
help cholmod_demo
rand ('state', 0) ;
randn ('state', 0) ;
load west0479
A = west0479 ;
n = size (A,1) ;
A = A*A'+100*speye (n) ;
try_matrix (A) ;
clear A
n = 2000 ;
A = sprandn (n, n, 0.002) ;
A = A+A'+100*speye (n) ;
try_matrix (A) ;
clear A
for n = [100 2000]
A = rand (n) ;
A = A*A' + 10 * eye (n) ;
try_matrix (A) ;
clear A
end
fprintf ('\n--------------------------------------------------------------\n') ;
fprintf ('\nWith the matrix used in the MATLAB 7.2 "bench" program.\n') ;
fprintf ('No fill-reducing orderings are used; type "help bench" for more') ;
fprintf (' information.\n') ;
n = 300 ;
A = delsq (numgrid ('L', n)) ;
b = sum (A)' ;
spparms ('default') ;
spparms ('autommd',0) ;
spparms ('autoamd',0) ;
tic ;
x = A\b ;
t1 = toc ;
e1 = norm (A*x-b) ;
tic ;
x = cholmod2 (A,b,0) ;
t2 = toc ;
e2 = norm (A*x-b) ;
fprintf ('MATLAB x=A\\b time: %8.4f resid: %8.0e\n', t1, e1) ;
fprintf ('CHOLMOD x=A\\b time: %8.4f resid: %8.0e\n', t2, e2) ;
fprintf ('CHOLMOD speedup: %8.2f\n', t1/t2) ;
spparms ('default') ;
fprintf ('\ncholmod_demo finished: all tests passed\n') ;
fprintf ('\nFor more accurate timings, run this test again.\n') ;
function try_matrix (A)
% try_matrix: try a matrix with CHOLMOD
n = size (A,1) ;
S = sparse (A) ;
fprintf ('\n--------------------------------------------------------------\n') ;
if (issparse (A))
fprintf ('cholmod_demo: sparse matrix, n %d nnz %d\n', n, nnz (A)) ;
else
fprintf ('cholmod_demo: dense matrix, n %d\n', n) ;
end
X = rand (n,1) ;
C = sparse (X) ;
try
% use built-in AMD
p = amd (S) ;
catch
try
% use AMD from SuiteSparse (../../AMD)
p = amd2 (S) ;
catch
% use SYMAMD
p = symamd (S) ;
end
end
S = S (p,p) ;
lnz = symbfact2 (S) ;
fl = sum (lnz.^2) ;
tic
L = lchol (S) ; %#ok
t1 = toc ;
fprintf ('CHOLMOD lchol(sparse(A)) time: %6.2f mflop %8.1f\n', ...
t1, 1e-6 * fl / t1) ;
tic
LD = ldlchol (S) ; %#ok
t2 = toc ;
fprintf ('CHOLMOD ldlchol(sparse(A)) time: %6.2f mflop %8.1f\n', ...
t2, 1e-6 * fl / t2) ;
tic
LD = ldlupdate (LD,C) ;
t3 = toc ;
fprintf ('CHOLMOD ldlupdate(sparse(A),C) time: %6.2f (rank-1, C dense)\n', t3) ;
[L,D] = ldlsplit (LD) ;
L = full (L) ;
err = norm ((S+C*C') - L*D*L', 1) / norm (S,1) ;
fprintf ('err: %g\n', err) ;
tic
R = chol (S) ; %#ok
s1 = toc ;
fprintf ('MATLAB chol(sparse(A)) time: %6.2f mflop %8.1f\n', ...
s1, 1e-6 * fl / s1) ;
E = full (A) ;
tic
R = chol (E) ;
s2 = toc ;
fprintf ('MATLAB chol(full(A)) time: %6.2f mflop %8.1f\n', ...
s2, 1e-6 * fl / s2) ;
Z = full (R) ;
tic
Z = cholupdate (Z,X) ;
s3 = toc ;
fprintf ('MATLAB cholupdate(full(A),C) time: %6.2f (rank-1)\n', s3) ;
err = norm ((E+X*X') - Z'*Z, 1) / norm (E,1) ;
fprintf ('err: %g\n', err) ;
fprintf ('CHOLMOD lchol(sparse(A)) speedup over chol(sparse(A)): %6.1f\n', ...
s1 / t1) ;
fprintf ('CHOLMOD sparse update speedup vs MATLAB DENSE update: %6.1f\n', ...
s3 / t3) ;
clear E S L R LD X C D Z
clear err s1 s2 s3 t1 t2 t3 n
|
github
|
lcnbeapp/beapp-master
|
test15.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CHOLMOD/MATLAB/Test/test15.m
| 3,742 |
utf_8
|
138acfd9c60538711b73c3f3938a3644
|
function test15 (nmat)
%TEST15 test symbfact2 vs MATLAB
% Example:
% test15(nmat)
% See also cholmod_test
% Copyright 2006-2007, Timothy A. Davis, University of Florida
fprintf ('=================================================================\n');
index = UFget ;
% only test matrices with nrows = 109000 or less. large ones nearly always
% cause a MATLAB segfault.
f = find (index.nrows < 109000 & index.ncols < 109000) ;
% sort by row /col dimension
s = max (index.nrows, index.ncols) ;
[ignore i] = sort (s (f)) ;
f = f (i) ;
if (nargin > 0)
nmat = max (0,nmat) ;
nmat = min (nmat, length (f)) ;
f = f (1:nmat) ;
end
fprintf ('Matrices to test: %d\n', length (f)) ;
for i = f
% try
Problem = UFget (i) ;
A = spones (Problem.A) ;
[m n] = size (A) ;
fprintf ('\n%4d: %-20s nrow: %6d ncol: %6d nnz: %10d\n', ...
i, Problem.name, m, n, nnz(A)) ;
% warmup, for accurate timing
etree (sparse (1)) ;
etree2 (sparse (1)) ;
amd2 (sparse (1)) ;
symbfact (sparse (1)) ;
symbfact2 (sparse (1)) ;
% test symmetric case
if (m == n)
% permute the matrix first
p = amd2 (A) ;
A = A (p,p) ;
% test with triu(A)
tic
co = symbfact (A) ;
t1 = toc ;
tic
co2 = symbfact2 (A) ;
t2 = toc ;
fprintf ('c=symbfact(A): %10.4f %10.4f speedup %8.2f lnz %d\n', ...
t1, t2, t1/t2, sum (co)) ;
if (any (co ~= co2))
error ('!') ;
end
tic
[co h parent post R] = symbfact (A) ;
t1 = toc ;
tic
[co2 h2 parent2 post2 R2] = symbfact2 (A) ;
t2 = toc ;
fprintf ('R=symbfact(A): %10.4f %10.4f speedup %8.2f\n',...
t1, t2, t1/t2) ;
checkem(co,co2,parent,parent2,post,post2,R,R2,h,h2) ;
% test with tril(A)
tic
co = symbfact (A') ;
t1 = toc ;
tic
co2 = symbfact2 (A,'lo') ;
t2 = toc ;
fprintf (...
'c=symbfact(A''): %10.4f %10.4f speedup %8.2f lnz %d\n',...
t1, t2, t1/t2, sum (co)) ;
if (any (co ~= co2))
error ('!') ;
end
tic
[co h parent post R] = symbfact (A') ;
t1 = toc ;
tic
[co2 h2 parent2 post2 R2] = symbfact2 (A,'lo') ;
t2 = toc ;
fprintf (...
'R=symbfact(A''): %10.4f %10.4f speedup %8.2f\n',...
t1, t2, t1/t2) ;
checkem(co,co2,parent,parent2,post,post2,R,R2,h,h2) ;
end
% permute the matrix first
p = colamd (A) ;
[parent post] = etree2 (A (:,p), 'col') ;
p = p (post) ;
A = A (:,p) ;
% test column case
tic
co = symbfact (A,'col') ;
t1 = toc ;
tic
co2 = symbfact2 (A,'col') ;
t2 = toc ;
fprintf ('c=symbfact(A,''col''): %10.4f %10.4f speedup %8.2f lnz %d\n', ...
t1, t2, t1/t2, sum (co)) ;
if (any (co ~= co2))
error ('!') ;
end
tic
[co h parent post R] = symbfact (A,'col') ;
t1 = toc ;
tic
[co2 h2 parent2 post2 R2] = symbfact2 (A,'col') ;
t2 = toc ;
fprintf ('R=symbfact(A,''col''): %10.4f %10.4f speedup %8.2f\n', ...
t1, t2, t1/t2) ;
checkem(co,co2,parent,parent2,post,post2,R,R2,h,h2) ;
% catch
% fprintf ('%d failed\n', i) ;
% end
end
fprintf ('test15 passed\n') ;
%-------------------------------------------------------------------------------
function checkem(co,co2,parent,parent2,post,post2,R,R2,h,h2)
% checkem compare results from symbfact and symbfact2
if (any (co ~= co2))
error ('count!') ;
end
if (any (parent ~= parent2))
error ('parent!') ;
end
if (any (post ~= post2))
error ('post!') ;
end
if (nnz (R2) ~= nnz (R))
error ('lnz!') ;
end
if (h ~= h2)
error ('h!') ;
end
% this may run out of memory
try % compute nnz(R-R2)
err = nnz (R-R2) ;
catch
err = -1 ;
fprintf ('nnz(R-R2) not computed\n') ;
end
if (err > 0)
error ('R!') ;
end
|
github
|
lcnbeapp/beapp-master
|
test26.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/CHOLMOD/MATLAB/Test/test26.m
| 1,919 |
utf_8
|
76a21ab29ef4ddc3b64ae785a716c51a
|
function test26 (do_metis)
%TEST26 test logical full and sparse matrices
% Example:
% test26
% See also cholmod_test
% Copyright 2006-2007, Timothy A. Davis, University of Florida
fprintf ('=================================================================\n');
fprintf ('test26: test logical full and sparse matrices\n') ;
if (nargin < 1)
do_metis = 1 ;
end
Prob = UFget ('HB/bcsstk01') ;
A = Prob.A ;
p = amd2 (A) ;
n = size (A,1) ;
A = A (p,p) + 10*speye (n) ;
C = logical (A ~= 0) ;
test26b (A,C,do_metis) ;
test26b (full (A),C,do_metis) ;
test26b (full (A), full (C),do_metis) ;
test26b (A, full(C),do_metis) ;
A = A + 0.001 * (spones (tril (A,-1) + triu (A,1))) * 1i ;
test26b (A,C,do_metis) ;
test26b (full (A),C,do_metis) ;
test26b (full (A), full (C),do_metis) ;
test26b (A, full(C),do_metis) ;
fprintf ('test26 passed\n') ;
%-------------------------------------------------------------------------------
function test26b (A,C,do_metis)
% test26b test bisect, analyze, etree2, metis, nesdis, symbfact2, and resymbol
p1 = analyze (A) ;
p2 = analyze (C) ;
if (any (p1 ~= p2))
error ('test 26 failed (analyze)!') ;
end
p1 = etree2 (A) ;
p2 = etree2 (C) ;
if (any (p1 ~= p2))
error ('test 26 failed (etree2)!') ;
end
if (do_metis)
s1 = bisect (A) ;
s2 = bisect (C) ;
if (any (s1 ~= s2))
error ('test 26 failed (bisect)!') ;
end
p1 = metis (A) ;
p2 = metis (C) ;
if (any (p1 ~= p2))
error ('test 26 failed (metis)!') ;
end
p1 = nesdis (A) ;
p2 = nesdis (C) ;
if (any (p1 ~= p2))
error ('test 26 failed (nesdis)!') ;
end
end
c1 = symbfact2 (A) ;
c2 = symbfact2 (C) ;
if (any (c1 ~= c2))
error ('test 26 failed (symbfact2)!') ;
end
A (1,2) = 0 ;
A (2,1) = 0 ;
C = logical (A ~= 0) ;
L = chol (sparse (A))' ;
L1 = resymbol (L, A) ;
L2 = resymbol (L, C) ;
if (norm (L1 - L2, 1) ~= 0)
error ('test 26 failed (resymbol)!') ;
end
|
github
|
lcnbeapp/beapp-master
|
meshnd_example.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MESHND/meshnd_example.m
| 2,391 |
utf_8
|
b73633dae23b3d361785dff0a3577e93
|
function meshnd_example
%MESHND_EXAMPLE example usage of meshnd and meshsparse.
%
% Example:
% meshnd_example
%
% See also meshnd.
% Copyright 2007, Timothy A. Davis, Univ. of Florida
help meshnd
% 2D mesh, compare with Cleve Moler's demos
m = 7 ;
n = 7 ;
[G p pinv Gnew] = meshnd (m,n) ;
fprintf ('Original mesh:\n') ;
disp (G) ;
fprintf ('Permuted node numbers using meshnd.m (nested dissection):\n') ;
disp (Gnew) ;
Moler = nested (n+2) ;
Moler = Moler (2:n+1,2:n+1) ;
fprintf ('Cleve Moler''s nested dissection ordering, using nested.m\n') ;
disp (Moler) ;
fprintf ('Difference between nested.m and meshnd.m:\n') ;
disp (Gnew-Moler) ;
% 2D and 3D meshes
stencils = [5 9 7 27] ;
mm = [7 7 7 7] ;
nn = [7 7 7 7] ;
kk = [1 1 7 7] ;
for s = 1:4
m = mm (s) ;
n = nn (s) ;
k = kk (s) ;
[G p] = meshnd (mm (s), nn (s), kk (s)) ;
A = meshsparse (G, stencils (s)) ;
C = A (p,p) ;
parent = etree (C) ;
try
L = chol (C, 'lower') ;
catch
% old version of MATLAB
L = chol (C)' ;
end
subplot (4,5,(s-1)*5 + 1) ;
do_spy (A) ;
if (k > 1)
title (sprintf ('%d-by-%d-by-%d mesh, %d-point stencil', ...
m, n, k, stencils (s))) ;
else
title (sprintf ('%d-by-%d mesh, %d-point stencil', ...
m, n, stencils (s))) ;
end
subplot (4,5,(s-1)*5 + 2) ;
do_spy (C) ;
title ('nested dissection') ;
subplot (4,5,(s-1)*5 + 3) ;
treeplot (parent) ;
title ('etree') ;
xlabel ('') ;
subplot (4,5,(s-1)*5 + 4) ;
do_spy (L) ;
title (sprintf ('Cholesky with nd, nnz %d', nnz (L))) ;
try
% use the built-in AMD
p = amd (A) ;
catch
try
% use AMD from SuiteSparse
p = amd2 (A) ;
catch
% use the older built-in SYMAMD
p = symamd (A) ;
end
end
try
L = chol (A (p,p), 'lower') ;
catch
% old version of MATLAB
L = chol (A (p,p))' ;
end
subplot (4,5,(s-1)*5 + 5) ;
do_spy (L) ;
title (sprintf ('Cholesky with amd, nnz %d', nnz (L))) ;
end
%-------------------------------------------------------------------------------
function do_spy (A)
%DO_SPY use cspy(A) to plot a matrix, or spy(A) if cspy not installed.
try
% This function is in CSparse. It generates better looking plots than spy.
cspy (A) ;
catch
spy (A) ;
end
|
github
|
lcnbeapp/beapp-master
|
meshnd.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MESHND/meshnd.m
| 3,103 |
utf_8
|
f852a9aec88b5fcf8d39f1afe5d6eb25
|
function [G, p, pinv, Gnew] = meshnd (arg1,n,k)
%MESHND creation and nested dissection of a regular 2D or 3D mesh.
% [G p pinv Gnew] = meshnd (m,n) constructs an m-by-n 2D mesh G, and then finds
% a permuted mesh Gnew where Gnew = pinv(G) and G = p(Gnew). meshnd(m,n,k)
% creates an m-by-n-by-k 3D mesh.
%
% [G p pinv Gnew] = meshnd (G) does not construct G, but uses the mesh G as
% given on input instead.
%
% Example:
% [G p pinv Gnew] = meshnd (4,5) ;
%
% returns
% Gnew =
% 1 2 17 9 10
% 7 8 18 15 16
% 3 5 19 11 13
% 4 6 20 12 14
% G =
% 1 2 3 4 5
% 6 7 8 9 10
% 11 12 13 14 15
% 16 17 18 19 20
%
% With no inputs, a few example meshes are generated and plotted.
%
% See also nested, numgrid.
% Copyright 2007-2009, Timothy A. Davis, Univ. of Florida
% get the inputs and create the mesh if not provided on input
if (nargin == 0)
% run a simple example
meshnd_example ;
elseif (nargin == 1)
% the mesh is provided on input
G = arg1 ;
[m n k] = size (G) ;
elseif (nargin == 2)
% create the m-by-n-by-k mesh in "natural" (row-major) order. This is how
% a typical 2D mesh is ordered. A column-major order would be better, since
% in that case G(:) would equal 1:(m*n) ... but let's stick with tradition.
m = arg1 ;
k = 1 ;
G = reshape (1:(m*n*k), n, m, k)' ;
elseif (nargin == 3)
% create the m-by-n-by-k mesh in column-major order. The first m-by-n-by-1
% slice is in column-major order, followed by all the other slices 2 to k.
m = arg1 ;
G = reshape (1:(m*n*k), m, n, k) ;
else
error ('Usage: [G p pinv Gnew] = meshnd(G), meshnd(m,n) or meshnd(m,n,k)') ;
end
if (nargout > 1)
p = nd2 (G)' ; % order the mesh
end
if (nargout > 2)
pinv (p) = 1:(m*n*k) ; % find the inverse permutation
end
if (nargout > 3)
Gnew = pinv (G) ; % find the permuted mesh
end
%-------------------------------------------------------------------------------
function p = nd2 (G)
%ND2 p = nd2 (G) permutes a 2D or 3D mesh G.
% Compare with nestdiss which uses p as a scalar offset and returns a modified
% mesh G that corresponds to Gnew in meshnd. Here, the scalar offset p in
% nestdiss is not needed. Instead, p is a permutation, and the modified mesh
% Gnew is not returned.
[m n k] = size (G) ;
if (max ([m n k]) <= 2)
% G is small; do not cut it
p = G (:) ;
elseif k >= max (m,n)
% cut G along the middle slice, cutting k in half
s = ceil (k/2) ;
middle = G (:,:,s) ;
p = [(nd2 (G (:,:,1:s-1))) ; (nd2 (G (:,:,s+1:k))) ; middle(:)] ;
elseif n >= max (m,k)
% cut G along the middle column, cutting n in half
s = ceil (n/2) ;
middle = G (:,s,:) ;
p = [(nd2 (G (:,1:s-1,:))) ; (nd2 (G (:,s+1:n,:))) ; middle(:)] ;
else
% cut G along the middle row, cutting m in half
s = ceil (m/2) ;
middle = G (s,:,:) ;
p = [(nd2 (G (1:s-1,:,:))) ; (nd2 (G (s+1:m,:,:))) ; middle(:)] ;
end
|
github
|
lcnbeapp/beapp-master
|
ssmult_install.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/SSMULT/ssmult_install.m
| 6,256 |
utf_8
|
ca7bbd625a43cfd1c3cec4e2980ff27c
|
function ssmult_install (dotests)
%SSMULT_INSTALL compiles, installs, and tests ssmult.
% Note that the "lcc" compiler provided with MATLAB for Windows can generate
% slow code; use another compiler if possible. Your current directory must be
% SSMULT for ssmult_install to work properly. If you use Linux/Unix/Mac, I
% recommend that you use COPTIMFLAGS='-O3 -DNDEBUG' in your mexopts.sh file.
%
% Example:
% ssmult_install % compile and install
% ssmult_install (0) % just compile and install, do not test
%
% See also ssmult, ssmultsym, sstest, sstest2, mtimes.
% Copyright 2009, Timothy A. Davis, University of Florida
fprintf ('Compiling SSMULT:\n') ;
%-------------------------------------------------------------------------------
% compile ssmult and add it to the path
%-------------------------------------------------------------------------------
d = '' ;
if (~isempty (strfind (computer, '64')))
% 64-bit MATLAB
d = ' -largeArrayDims -DIS64' ;
end
v = getversion ;
if (v < 6.5)
% mxIsDouble is false for a double sparse matrix in MATLAB 6.1 or earlier
d = [d ' -DMATLAB_6p1_OR_EARLIER'] ;
end
cmd = sprintf ('mex -O%s ssmult.c ssmult_mex.c ssmult_saxpy.c ssmult_dot.c ssmult_transpose.c', d) ;
disp (cmd) ;
eval (cmd) ;
cmd = sprintf ('mex -O%s ssmultsym.c', d) ;
disp (cmd) ;
eval (cmd) ;
cmd = sprintf ('mex -O%s sptranspose.c ssmult_transpose.c', d) ;
disp (cmd) ;
eval (cmd) ;
addpath (pwd) ;
fprintf ('\nssmult has been compiled, and the following directory has been\n') ;
fprintf ('added to your MATLAB path. Use pathtool to add it permanently:\n') ;
fprintf ('\n%s\n\n', pwd) ;
fprintf ('If you cannot save your path with pathtool, add the following\n') ;
fprintf ('to your MATLAB startup.m file (type "doc startup" for help):\n') ;
fprintf ('\naddpath (''%s'') ;\n\n', pwd) ;
%-------------------------------------------------------------------------------
% test ssmult and ssmultsym
%-------------------------------------------------------------------------------
if (nargin < 1)
dotests = 1 ;
end
if (~dotests)
return
end
fprintf ('Please wait while your new ssmult function is tested ...\n') ;
fprintf ('\nTesting large sparse column vectors (1e7-by-1)\n') ;
x = sprandn (1e7,1,1e-4) ;
y = sprandn (1e7,1,1e-4) ;
x (1) = pi ;
y (1) = exp (1) ;
tic ; a = x'*y ; t1 = toc ;
tic ; b = ssmult (x, y, 1) ; t2 = toc ;
fprintf ('s=x''*y in MATLAB: %8.3f seconds\n', t1) ;
fprintf ('s=ssmult(x,y,1): %8.3f seconds; error %g\n', t2, abs (full(a-b))) ;
fprintf ('SSMULT speedup: %8.3g\n\n', t1/t2) ;
load west0479
A = west0479 ;
B = sprand (A) ;
C = A*B ;
D = ssmult (A,B) ;
err = norm (C-D,1) / norm (C,1) ;
fprintf ('west0479 error: %g\n', err) ;
fprintf ('\ntesting large matrices (may fail if you are low on memory):\n')
rand ('state', 0) ;
n = 10000 ;
A = sprand (n, n, 0.01) ;
B = sprand (n, n, 0.001) ;
test_large (A,B) ;
msg = { 'real', 'complex' } ;
% all of these calls to ssmult should fail:
fprintf ('\ntesting error handling (the errors below are expected):\n') ;
A = { 3, 'gunk', sparse(1), sparse(1), sparse(rand(3,2)) } ;
B = { 4, 0 , 5, msg, sparse(rand(3,4)) } ;
for k = 1:length(A)
try
% the following statement is supposed to fail
C = ssmult (A {k}, B {k}) ; %#ok
error ('test failed\n') ;
catch
disp (lasterr) ;
end
end
fprintf ('error handling tests: ok.\n') ;
% err should be zero:
rand ('state', 0)
for Acomplex = 0:1
for Bcomplex = 0:1
err = 0 ;
fprintf ('\ntesting C = A*B where A is %s, B is %s\n', ...
msg {Acomplex+1}, msg {Bcomplex+1}) ;
for m = [ 0:30 100 ]
fprintf ('.') ;
for n = [ 0:30 100 ]
for k = [ 0:30 100 ]
A = sprand (m,k,0.1) ;
if (Acomplex)
A = A + 1i*sprand (A) ;
end
B = sprand (k,n,0.1) ;
if (Bcomplex)
B = B + 1i*sprand (B) ;
end
C = A*B ;
D = ssmult (A,B) ;
s = ssmultsym (A,B) ;
err = max (err, norm (C-D,1)) ;
err = max (err, nnz (C-D)) ;
err = max (err, isreal (D) ~= (norm (imag (D), 1) == 0)) ;
err = max (err, s.nz > nnz (C)) ;
[i j x] = find (D) ; %#ok
if (~isempty (x))
err = max (err, any (x == 0)) ;
end
end
end
end
fprintf (' maximum error: %g\n', err) ;
end
end
sstest ;
fprintf ('\nSSMULT tests complete.\n') ;
%-------------------------------------------------------------------------------
function [v,pc] = getversion
% determine the MATLAB version, and return it as a double.
% only the primary and secondary version numbers are kept.
% MATLAB 7.0.4 becomes 7.0, version 6.5.2 becomes 6.5, etc.
v = version ;
t = find (v == '.') ;
if (length (t) > 1)
v = v (1:(t(2)-1)) ;
end
v = str2double (v) ;
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
%-------------------------------------------------------------------------------
function test_large (A,B)
% test large matrices
n = size (A,1) ;
fprintf ('dimension %d nnz(A): %d nnz(B): %d\n', n, nnz (A), nnz (B)) ;
c = ssmultsym (A,B) ;
fprintf ('nnz(C): %d flops: %g memory: %g MB\n', ...
c.nz, c.flops, c.memory/2^20) ;
try
% warmup for accurate timings
C = A*B ; %#ok
D = ssmult (A,B) ; %#ok
tic ;
C = A*B ;
t1 = toc ;
tic ;
D = ssmult (A,B) ;
t2 = toc ;
tic ;
t3 = toc ;
fprintf ('MATLAB time: %g\n', t1) ;
err = norm (C-D,1) ;
fprintf ('SSMULT time: %g err: %g\n', t2, err) ;
catch
disp (lasterr)
fprintf ('tests with large random matrices failed ...\n') ;
end
clear C D
|
github
|
lcnbeapp/beapp-master
|
pagerankdemo.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MATLAB_Tools/pagerankdemo.m
| 6,301 |
utf_8
|
2a933a39d67b7f916bc79b747055d403
|
function pagerankdemo (steps)
% PAGERANKDEMO draw a 6-node web and compute its pagerank
%
% PAGERANKDEMO draws the 6-node "tiny web" in Section 2.11 of "Numerical
% Computing with MATLAB", by Cleve Moler, SIAM, 2004. It then simulates the
% computation of Google's PageRank algorithm, by randomly selecting links to
% traverse. If a link is traversed, the edge and the target node are displayed
% in red. If the "random surfer" jumps to an arbitrary page, the target node
% is displayed in blue. The number of hits at each node, and the page rank
% (in %) are displayed % on each node. Note that after a large number of
% steps, the PageRanks (in percentages) converge to the values given in Section
% 2.11 of Moler (alpha: .321, sigma: .2007, beta: .1705, delta: .1368,
% gamma: .1066, rho: .0643). See http://www.mathworks.com/moler for more
% details (the pagerank M-file, in particular).
%
% Note that this method is NOT how the PageRank is actually computed. Instead
% the eigenvalue problem A*x=x is solved for x, where A is the Markov
% transition matrix, A = p*G*D + e*z', where G is the binary matrix used here.
% The method here is a simplistic random-hopping demonstration of the Markov
% process, to motivate the A*x=x formulation of the problem. In this example,
% A does control how the transitions are made, but the matrix A is not formed
% explicitly.
%
% This demo only operates on a single graph. It is meant as a simple demo
% only, suitable for in-class use. To compute the PageRanks for an arbitrary
% graph, use pagerank.m, or the power method (repeat x=A*x until convergence,
% where A is the Markov transition matrix of the web).
%
% Example:
% pagerankdemo
% pagerankdemo (1000) % run 1000 steps with no user input, then quit
%
% See also pagerank
%
% I suggest single-stepping a dozen times or so to see the link traversal in
% process, and then type "1000". Hit control-C to quit.
%
% Copyright 2007, Tim Davis, University of Florida
% Initial graph
Graph = graphinit ;
rand ('state', 0) ;
n = size (Graph.G, 1) ;
help pagerankdemo
% initialize the page counts
hits = zeros (1,n) ;
oldwhere = 1 ;
where = 1 ;
hits (where) = 1 ;
set (Graph.node (where), 'FaceColor', [0 0 1]) ;
p = 0.85 ; % probability a link will be followed
c = sum (Graph.G) ; % outgoing degree
links = cell (1,n) ;
for k = 1:n
links {k} = find (Graph.G (:,k)) ;
end
follow_link = 0 ;
if (nargin < 1)
input ('hit enter to start at node alpha: ') ;
end
% write the stats to the figure
set (Graph.nodelabel (where), 'string', ...
sprintf ('%s %d (%3.1f%%)', Graph.nodes {where}, hits (where), ...
100 * hits (where) / sum (hits))) ;
if (nargin < 1)
input ('hit enter to take one step: ') ;
steps = 1 ;
end
% repeat
while (1)
% clear the old color and old arrow
set (Graph.node (where), 'FaceColor', [0 1 0]) ;
if (follow_link)
set (Graph.arrows (where,oldwhere), 'LineWidth', 2) ;
set (Graph.arrows (where,oldwhere), 'Color', [0 0 0]) ;
end
% determine where to go to next
oldwhere = where ;
if (c (where) == 0 || rand > p)
% no outgoing links, or ignore the links
follow_link = 0 ;
where = floor (n * rand + 1) ;
set (Graph.node (where), 'FaceColor', [0 0 1]) ;
else
% move along the link
follow_link = 1 ;
where = links{where}(floor (c (where) * rand + 1)) ;
set (Graph.node (where), 'FaceColor', [1 0 0]) ;
set (Graph.arrows (where,oldwhere), 'LineWidth', 5) ;
set (Graph.arrows (where,oldwhere), 'Color', [1 0 0]) ;
end
% increment the hit count
hits (where) = hits (where) + 1 ;
% write the stats to the figure
for k = 1:n
set (Graph.nodelabel (k), 'string', ...
sprintf ('%s %d (%3.1f%%)', Graph.nodes {k}, hits (k), ...
100 * hits (k) / sum (hits))) ;
end
drawnow
% go the next step
steps = steps - 1 ;
if (steps <= 0)
if (nargin > 0)
break ;
end
steps = input ...
('number of steps to make (default 1, control-C to quit): ') ;
if (steps == 0)
break ;
end
if (isempty (steps))
steps = 1 ;
end
end
end
%-------------------------------------------------------------------------------
function Graph = graphinit
% GRAPHINIT create the tiny-web example in Moler, section 2.11, and draw it.
% Example
% G = graphinit ;
figure (1)
clf
nodes = { 'alpha', 'beta', 'gamma', 'delta', 'rho', 'sigma' } ;
xy = [
0 4
1 3
1 2
2 4
2 0
0 0
] ;
x = xy (:,1) ;
y = xy (:,2) ;
% scale x and y to be in the range 0.1 to 0.9
x = 0.8 * x / 2 + .1 ;
y = 0.8 * y / 4 + .1 ;
xy = [x y] ;
xy_delta = [
.08 .04 0
-.03 -.02 -1
.04 0 0
-.05 .04 -1
-.03 0 -1
.03 0 0
] ;
xd = xy_delta (:,1) ;
yd = xy_delta (:,2) ;
tjust = xy_delta (:,3) ;
G = [
0 0 0 1 0 1
1 0 0 0 0 0
0 1 0 0 0 0
0 1 1 0 0 0
0 0 1 0 0 0
1 0 1 0 0 0 ] ;
clf
n = size (G,1) ;
axes ('Position', [0 0 1 1], 'Visible', 'off') ;
node = zeros (n,1) ;
nodelabel = zeros (n,1) ;
for k = 1:n
node (k) = annotation ('ellipse', [x(k)-.025 y(k)-.025 .05 .05]) ;
set (node (k), 'LineWidth', 2) ;
set (node (k), 'FaceColor', [0 1 0]) ;
nodelabel (k) = text (x (k) + xd (k), y (k) + yd (k), nodes {k}, ...
'Units', 'normalized', 'FontSize', 16) ;
if (tjust (k) < 0)
set (nodelabel (k), 'HorizontalAlignment', 'right') ;
end
end
axis off
% Yes, I realize that this is overkill; arrows should be sparse.
% This example is not meant for large graphs.
arrows = zeros (n,n) ;
[i j] = find (G) ;
for k = 1:length (i)
% get the center of the two nodes
figx = [x(j(k)) x(i(k))] ;
figy = [y(j(k)) y(i(k))] ;
% [figx figy] = dsxy2figxy (gca, axx, axy);
% shorten the arrows by s units at each end
s = 0.03 ;
len = sqrt (diff (figx)^2 + diff (figy)^2) ;
fy (1) = diff (figy) * (s/len) + figy(1) ;
fy (2) = diff (figy) * (1-s/len) + figy(1) ;
fx (1) = diff (figx) * (s/len) + figx(1) ;
fx (2) = diff (figx) * (1-s/len) + figx(1) ;
arrows (i(k),j(k)) = annotation ('arrow', fx, fy) ;
set (arrows (i(k),j(k)), 'LineWidth', 2) ;
set (arrows (i(k),j(k)), 'HeadLength', 20) ;
set (arrows (i(k),j(k)), 'HeadWidth', 20) ;
end
Graph.G = G ;
Graph.nodes = nodes ;
Graph.node = node ;
Graph.xy = xy ;
Graph.xy_delta = xy_delta ;
Graph.nodelabel = nodelabel ;
Graph.arrows = arrows ;
|
github
|
lcnbeapp/beapp-master
|
gipper.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MATLAB_Tools/gipper.m
| 7,444 |
utf_8
|
b1e1e96f392196e2afd7ffa43315ff4d
|
function files_out = gipper (directory, include, exclude, exclude_hidden)
%GIPPER zip selected files and subdirectories (gipper = grep + zip)
%
% files = gipper (directory, include, exclude, exclude_hidden) ;
%
% Creates a zip file of all files and subdirectories in a directory. A file in
% the directory or any of its subdirectories whose name matches any expression
% in 'include' via regexp is added to the zip file. A file that matches any
% expression in 'exclude' is not added. A subdirectory whose name or full
% pathname matches any expression in 'exclude' is not searched. The name of
% the zip file is the name of the directory, with '.zip' appended.
%
% 'include' and 'exclude' are either cells of strings, or just single strings.
%
% With no outputs, a list of files is printed and the user is prompted before
% proceeding. Otherwise, the gipper proceeds without prompting and returns a
% list of files that were added to the zip file.
%
% By default, all files and subdirectories of the directory are included, except
% that hidden files and directories (those whose names start with a dot, '.')
% are excluded.
%
% If any parameter is empty or not present, the defaults are used:
% directory: defaults to the current directory
% include: defaults to include all files and directories
% exclude: defaults to exclude nothing, as modified by 'exclude_hidden'
% exclude_hidden: 1 (exclude hidden files and directories)
%
% Empty directories or subdirectories are never included.
%
% Example:
% % suppose 'X' is the name of the current directory.
%
% % include all files in X (except hidden files) in the zip file ../X.zip
% gipper
%
% % create mytoolbox.zip archive of the 'X/mytoolbox' directory
% gipper mytoolbox
%
% % only include *.m files in ../X.zip
% gipper '' '\.m$'
%
% % create ../X.zip, but exclude compiled object and MEX files
% gipper ('', '', { '\.o$' '\.obj$', ['\.' mexext '$'] })
%
% % include everything, including hidden files, in ../X.zip
% gipper ('', '', '', 0)
%
% % zip mytoolbox, except hidden files and the mytoolbox/old directory
% gipper mytoolbox '' old
%
% % these are the same, except gipper also traverses subdirectories
% gipper ('', { '\.m$', '\.*mat$' })
% zip ('../X', { '*.m', '*.mat' })
%
% See also zip, regexp, unzip.
% NOTE: if the directory name is empty or not present, and you hit control-C
% while the gipper is running, your current directory will now be the parent.
% You must install the gipper first, by placing it in your MATLAB path.
% Copyright 2007, Timothy A. Davis, Univ. of Florida. Win one for the gipper.
% Created May 2007, using MATLAB 7.4 (R2007a). Requires MATLAB 6.5 or later.
% exclude hidden files and directories by default
if (nargin < 4)
exclude_hidden = 1 ;
end
% exclude nothing by default (as modified by exclude_hidden)
if (nargin < 3)
exclude = { } ;
end
exclude = cleanup (exclude) ;
% append the hidden file and directory rule, if requested
if (exclude_hidden)
exclude = union (exclude, { '^\.', [ '\' filesep '\.' ] }) ;
end
% always exclude '.' and '..' files
exclude = union (exclude, { '^\.$', '^\.\.$' }) ;
% include all files by default
if (nargin < 2 || isempty (include))
include = { '.' } ;
end
include = cleanup (include) ;
% operate on the current directory, if not specified
if (nargin < 1 || isempty (directory))
here = pwd ;
directory = here ((find (here == filesep, 1, 'last') + 1) : end) ;
% use try-catch so that if a failure occurs, we go back to current
% directory. Unfortunately, this mechanism does not catch a control-C.
gipper_found = 0 ;
try
% run the gipper in the parent
cd ('..') ;
% if gipper.m is not in the path, it will no longer exist
gipper_found = ~isempty (which ('gipper')) ;
if (gipper_found)
if (nargout == 0)
fprintf ('Note that if you terminate gipper with control-C, ') ;
fprintf ('your\ndirectory be changed to the parent') ;
fprintf (' (as in "cd ..").\n') ;
gipper (directory, include, exclude, exclude_hidden) ;
else
files_out = gipper (directory, include, exclude,exclude_hidden);
end
end
catch
cd (here) ;
rethrow (lasterror) ;
end
% go back to where we started
cd (here) ;
if (~gipper_found)
fprintf ('To install the gipper, type "pathtool" and add\n') ;
fprintf ('the directory in which it resides:\n') ;
fprintf ('%s\n', which (mfilename)) ;
error ('You must install the gipper first.') ;
end
return
else
if (nargout == 0)
fprintf ('\ngipper: creating %s%s%s.zip\n', pwd, filesep, directory) ;
end
end
% get the list of files to zip
n = 0 ;
files = { } ;
for file = dir (directory)'
[files, n] = finder (files, n, directory, file.name, include, exclude) ;
end
files = files (1:n)' ;
% cannot create an empty zip file
if (isempty (files))
warning ('gipper:nothing', 'nothing to zip; no zip file created') ;
if (nargout > 0)
files_out = files ;
end
return
end
% return the list of files, or confirm
if (nargout == 0)
% print the list of files and ask for confirmation first
fprintf ('Creating a zip archive containing these files:\n\n') ;
for k = 1:length(files)
fprintf (' %s\n', files {k}) ;
end
fprintf ('\nCreating the zip archive: %s', directory) ;
if (isempty (regexp (directory, '\.zip$', 'once')))
fprintf ('.zip') ;
end
fprintf ('\n') ;
reply = input ('Proceed? (yes or no, default is yes): ', 's') ;
if (~isempty (reply) && lower (reply (1)) == 'n')
fprintf ('zip file not created\n') ;
return
end
else
% zip the files without asking
files_out = files ;
end
% zip the files
zip (directory, files) ;
%-------------------------------------------------------------------------------
function [files, n] = finder (files, n, prefix, name, include, exclude)
% finder: return a list of files to zip
% fullname includes the entire path to the file or directory
fullname = [prefix filesep name] ;
if (isdir (fullname))
% always traverse a subdirectory to look for files to include, unless the
% directory name or fullname itself is explicitly excluded.
if (~(grep (name, exclude) || grep (fullname, exclude)))
% the directory is selected, recursively traverse it
for file = dir (fullname)'
[files, n] = finder (files, n, fullname, file.name, ...
include, exclude) ;
end
end
else
% this is a file, apply the include/exclude rules to just the file name
% itself not the fullname.
if (grep (name, include) && ~grep (name, exclude))
% the file is selected for the archive. Use a dynamic-table approach
% to speed up the dynamic growth of the table.
n = n + 1 ;
files {n} = fullname ;
if (n == length (files))
files {2*n} = [ ] ;
end
end
end
%-------------------------------------------------------------------------------
function match = grep (string, list)
% grep: determine if a string matches an expression in a list
match = 0 ;
for expression = list
if (~isempty (regexp (string, expression {1}, 'once')))
match = 1 ;
return ;
end
end
%-------------------------------------------------------------------------------
function s = cleanup (s)
% cleanup: ensure the input list is in the proper format
s = s (:)' ; % make sure it is a row vector
if (ischar (s))
s = { s } ; % if it is a string, convert it into a cell with one string
end
|
github
|
lcnbeapp/beapp-master
|
shellgui.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MATLAB_Tools/shellgui/shellgui.m
| 11,655 |
utf_8
|
989265d0d56df9f25461086f5878b569
|
function varargout = shellgui(varargin)
%SHELLGUI GUI interface for seashell function
% Timothy A. Davis, Chapman Hall / CRC Press, 7th edition.
% Controls the parameters a, b, c, n, azimuth, and elevation, using
% sliders. To the whole range of each parameter, click on the button to
% the right of each slider.
%
% Example:
% shellgui
%
% See also GUIDE, SEASHELL
% Copyright 2006 Timothy A. Davis
% Last Modified by GUIDE v2.5 29-Jul-2006 11:33:37
% Begin initialization code - DO NOT EDIT
gui_Singleton = 1;
gui_State = struct('gui_Name', mfilename, ...
'gui_Singleton', gui_Singleton, ...
'gui_OpeningFcn', @shellgui_OpeningFcn, ...
'gui_OutputFcn', @shellgui_OutputFcn, ...
'gui_LayoutFcn', [] , ...
'gui_Callback', []);
if nargin && ischar(varargin{1})
gui_State.gui_Callback = str2func(varargin{1});
end
if nargout
[varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});
else
gui_mainfcn(gui_State, varargin{:});
end
% End initialization code - DO NOT EDIT
% --- Executes just before shellgui is made visible.
function shellgui_OpeningFcn(hObject, eventdata, handles, varargin) %#ok
% This function has no output args, see OutputFcn.
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% varargin command line arguments to shellgui (see VARARGIN)
% Choose default command line output for shellgui
handles.output = hObject;
% Update handles structure
guidata(hObject, handles);
% UIWAIT makes shellgui wait for user response (see UIRESUME)
% uiwait(handles.figure1);
% --- Outputs from this function are returned to the command line.
function varargout = shellgui_OutputFcn(hObject, eventdata, handles) %#ok
% varargout cell array for returning output args (see VARARGOUT);
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Get default command line output from handles structure
varargout{1} = handles.output;
% --- Executes on slider movement.
function slider1_Callback(hObject, eventdata, handles) %#ok
% hObject handle to slider1 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'Value') returns position of slider
% get(hObject,'Min') and get(hObject,'Max') to determine range of slider
global a b c n azimuth elevation
a = get (hObject, 'Value') ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes during object creation, after setting all properties.
function slider1_CreateFcn(hObject, eventdata, handles) %#ok
% hObject handle to slider1 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: slider controls usually have a light gray background.
if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor',[.9 .9 .9]);
end
% --- Executes on slider movement.
function slider2_Callback(hObject, eventdata, handles) %#ok
% hObject handle to slider2 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'Value') returns position of slider
% get(hObject,'Min') and get(hObject,'Max') to determine range of slider
global a b c n azimuth elevation
b = get (hObject, 'Value') ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes during object creation, after setting all properties.
function slider2_CreateFcn(hObject, eventdata, handles) %#ok
% hObject handle to slider2 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: slider controls usually have a light gray background.
if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor',[.9 .9 .9]);
end
global a b c n azimuth elevation
a = -0.2 ;
b = 0.5 ;
c = 0.1 ;
n = 2 ;
azimuth = -150 ;
elevation = 10 ;
seashell ;
% --- Executes on slider movement.
function slider3_Callback(hObject, eventdata, handles) %#ok
% hObject handle to slider3 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'Value') returns position of slider
% get(hObject,'Min') and get(hObject,'Max') to determine range of slider
global a b c n azimuth elevation
c = get (hObject, 'Value') ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes during object creation, after setting all properties.
function slider3_CreateFcn(hObject, eventdata, handles) %#ok
% hObject handle to slider3 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: slider controls usually have a light gray background.
if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor',[.9 .9 .9]);
end
% --- Executes on slider movement.
function slider4_Callback(hObject, eventdata, handles) %#ok
% hObject handle to slider2 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'Value') returns position of slider
% get(hObject,'Min') and get(hObject,'Max') to determine range of slider
global a b c n azimuth elevation
n = get (hObject, 'Value') ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes during object creation, after setting all properties.
function slider4_CreateFcn(hObject, eventdata, handles) %#ok
% hObject handle to slider2 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: slider controls usually have a light gray background.
if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor',[.9 .9 .9]);
end
% --- Executes on slider movement.
function slider8_Callback(hObject, eventdata, handles) %#ok
% hObject handle to slider8 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'Value') returns position of slider
% get(hObject,'Min') and get(hObject,'Max') to determine range of slider
global a b c n azimuth elevation
azimuth = get (hObject, 'Value') ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes during object creation, after setting all properties.
function slider8_CreateFcn(hObject, eventdata, handles) %#ok
% hObject handle to slider8 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: slider controls usually have a light gray background.
if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor',[.9 .9 .9]);
end
% --- Executes on slider movement.
function slider9_Callback(hObject, eventdata, handles) %#ok
% hObject handle to slider9 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'Value') returns position of slider
% get(hObject,'Min') and get(hObject,'Max') to determine range of slider
global a b c n azimuth elevation
elevation = get (hObject, 'Value') ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes during object creation, after setting all properties.
function slider9_CreateFcn(hObject, eventdata, handles) %#ok
% hObject handle to slider9 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: slider controls usually have a light gray background.
if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor',[.9 .9 .9]);
end
% --- Executes on button press in pushbutton3.
function pushbutton3_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton3 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
global a b c n azimuth elevation
seashell (a, b, c, n, Inf, elevation) ;
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes on button press in pushbutton4.
function pushbutton4_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton4 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
global a b c n azimuth elevation
for a2 = -1:.1:1
seashell (a2, b, c, n, azimuth, elevation) ;
drawnow
end
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes on button press in pushbutton5.
function pushbutton5_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton5 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
global a b c n azimuth elevation
for b2 = -1:.1:1
seashell (a, b2, c, n, azimuth, elevation) ;
drawnow
end
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes on button press in pushbutton6.
function pushbutton6_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton6 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
global a b c n azimuth elevation
for c2 = -1:.1:1
seashell (a, b, c2, n, azimuth, elevation) ;
drawnow
end
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes on button press in pushbutton7.
function pushbutton7_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton7 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
global a b c n azimuth elevation
for n2 = 0:.5:8
seashell (a, b, c, n2, azimuth, elevation) ;
drawnow
end
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes on button press in pushbutton8.
function pushbutton8_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton8 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
global a b c n azimuth elevation
for el = -80:10:80
seashell (a, b, c, n, azimuth, el) ;
drawnow
end
seashell (a, b, c, n, azimuth, elevation) ;
% --- Executes on button press in pushbutton9.
function pushbutton9_Callback(hObject, eventdata, handles) %#ok
% hObject handle to pushbutton9 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
web ('http://www.cise.ufl.edu/research/sparse/MATLAB') ;
|
github
|
lcnbeapp/beapp-master
|
spok_test.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MATLAB_Tools/spok/spok_test.m
| 1,896 |
utf_8
|
fb75171c79dfb5ddc82c0cd60c48db62
|
function spok_test
%SPOK_TEST installs and tests SPOK
%
% Example:
% spok_install
%
% See also sparse, spok, spok_install
% Copyright 2008, Tim Davis, University of Florida
% compile and install spok
help spok ;
spok_install ;
c = pwd ;
cd private ;
mex spok_invalid.c ;
cd (c) ;
% test with valid matrices
fprintf ('\nTesting spok, please wait ...\n') ;
lastwarn ('') ;
test_spok (sparse ([ ]), 1, '') ;
test_spok (sparse (logical ([ ])), 1, '') ;
test_spok (sparse (0,4), 1, '') ;
test_spok (sparse (4,4), 1, '') ;
for trials = 1:2
for m = 0:10
for n = 0:10
for d = 0:.1:1
A = sprand (m,n,d) ;
B = sprand (m,n,d) ;
test_spok (A, 1, '') ;
test_spok (A + 1i*B, 1, '') ;
test_spok (A > 0, 1, '') ;
end
end
end
end
% test with non-sparse matrices
fprintf ('\nTesting on non-sparse matrices; 7 warnings should appear:\n') ;
test_spok ('hi', 1, 'SPOK:NotSparse') ;
test_spok (cell (42), 1, 'SPOK:NotSparse') ;
test_spok ([ ], 1, 'SPOK:NotSparse') ;
test_spok (ones (10), 1, 'SPOK:NotSparse') ;
test_spok (ones (10) > 0, 1, 'SPOK:NotSparse') ;
test_spok (ones (0,10), 1, 'SPOK:NotSparse') ;
test_spok (ones (10,0), 1, 'SPOK:NotSparse') ;
% test with an invalid matrix
fprintf ('\nTesting on invalid sparse matrices; 2 warnings should appear:\n') ;
test_spok (spok_invalid (0), 0, 'SPOK:QuestionableMatrix') ;
test_spok (spok_invalid (1), 0, 'SPOK:QuestionableMatrix') ;
fprintf ('\nAll tests passed.\n') ;
%-------------------------------------------------------------------------------
function test_spok (A, ok_expected, id_expected)
%TEST_SPOK tests spok and checks its result
lastwarn ('') ;
ok = spok (A) ;
[msg id] = lastwarn ;
if (ok ~= ok_expected || ~strcmp (id, id_expected))
lastwarn
error ('test failure') ;
end
|
github
|
lcnbeapp/beapp-master
|
waitex.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MATLAB_Tools/waitmex/waitex.m
| 1,047 |
utf_8
|
65beaefa68f21bc993f23673d3d2c040
|
function result = waitex
%WAITEX same as the waitexample mexFunction, just in M instead of C.
% The only purpose of this function is to serve as a precise description of
% what the waitexample mexFunction does.
%
% Example:
% waitex % draw a waitbar, make progress, and then close the waitbar
% h = waitex ; % same as above, except leave the waitbar on the screen
% % and return the handle h to the waitbar.
%
% See also waitbar, waitexample.
% Copyright 2007, T. Davis
x = 0 ;
h = waitbar (0, 'Please wait...') ;
for i = 0:100
if (i == 50)
waitbar (i/100, h, 'over half way there') ;
else
waitbar (i/100, h) ;
end
% do some useless work
for j = 0:1e5
x = useless (x) ;
end
end
if (nargout > 0)
% h is return to the caller, leave the waitbar on the screen
result = h ;
else
% close the waitbar, and do not return the handle h
close (h) ;
end
function x = useless (x)
%USELESS do some useless work (x = useless (x) just increments x)
x = x + 1 ;
|
github
|
lcnbeapp/beapp-master
|
find_components_example.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/MATLAB_Tools/find_components/find_components_example.m
| 3,897 |
utf_8
|
a53a3fbb3ee31c9062389fd2faf0f302
|
function find_components_example(example)
%FIND_COMPONENTS_EXAMPLE gives an example usage of find_components.
%
% Example:
% find_components_example(0) % a small example, with lots of printing
% find_components_example(1) % Doug's example, with lots of printing
% find_components_example(2) % a large example, just plotting
% find_components_example(A) % use the matrix A for the example
%
% See http://blogs.mathworks.com/pick/2008/08/18 for Doug Hull's description of
% the problem this m-file solves. With no inputs, Doug's example is used.
%
% See also FIND_COMPONENTS, LARGEST_COMPONENT, DMPERM, GPLOT
% Copyright 2008, Tim Davis, University of Florida
%-------------------------------------------------------------------------------
% construct an image
%-------------------------------------------------------------------------------
if (nargin < 1)
example = 1 ;
end
if (example == 0)
A = [ 1 2 2 3
1 1 2 3
0 0 1 2
0 1 3 3 ] ;
elseif (example == 1)
A = [ 2 2 1 1 2
3 0 1 0 1
3 2 2 2 1
1 2 2 1 2
0 3 2 0 1 ] ;
elseif (example == 2)
A = round (rand (30) * 2) ;
else
A = example ;
end
[m n] = size (A) ;
%-------------------------------------------------------------------------------
% find all of its components
%-------------------------------------------------------------------------------
tic
[p r nc G xy] = find_components (A,1) ;
t = toc ;
fprintf ('Image size: %d-by-%d, time taken: %g seconds\n', m, n, t) ;
%-------------------------------------------------------------------------------
% walk through the components, plotting and printing them.
%-------------------------------------------------------------------------------
prompt = 'hit enter to single-step, ''a'' to show all, ''q'' to quit: ' ;
small = (max (m,n) <= 10) ;
dopause = 1 ;
for k = 1:nc
% get the nodes of the kth component
nodes = p (r (k) : r (k+1)-1) ;
% for large graphs, do not show components of size 1
if (~small && length (nodes) == 1)
continue
end
% plot the graph with the kth component highlighted
hold off
gplot (G, xy, '-') ;
hold on
[X,Y] = gplot (G * sparse (nodes, nodes, 1, m*n, m*n), xy, 'r-') ;
plot (X, Y, 'r-', 'LineWidth', 3) ;
axis ([0 n+1 0 m+1]) ;
a = A (p (r (k))) ;
siz = length (nodes) ;
Title = sprintf ('Graph component %d, size %d, value %g', k, siz, a) ;
title (Title, 'FontSize', 20) ;
label_nodes (xy, A, small, nodes) ;
drawnow
% print the image and the kth component, if the image is small
if (small)
fprintf ('\n%s\n', Title) ;
C = nan (m,n) ;
C (nodes) = a ;
fprintf ('A = \n') ; disp (A) ;
fprintf ('the component = \n') ; disp (C) ;
end
% pause, or prompt the user
if (dopause && (k < nc))
s = input (prompt, 's') ;
dopause = isempty (s) ;
if (~dopause && s (1) == 'q')
break ;
end
else
pause (0.5)
end
end
%-------------------------------------------------------------------------------
% plot the whole graph, no components highlighted
%-------------------------------------------------------------------------------
hold off
gplot (G, xy, '-') ;
title (sprintf ('%d connected components', nc), 'FontSize', 20) ;
axis ([0 n+1 0 m+1]) ;
label_nodes (xy, A, small)
%-------------------------------------------------------------------------------
function label_nodes (xy, A, small, nodes)
%LABEL_NODES label all the nodes in the plot
if (small)
[m n] = size (A) ;
for i = 1:m*n
text (xy (i,1), xy (i,2), sprintf ('%g', A (i)), 'FontSize', 20) ;
end
if (nargin == 4)
for i = nodes
text (xy (i,1), xy (i,2), sprintf ('%g', A (i)), ...
'FontSize', 20, 'Color', 'r') ;
end
end
end
|
github
|
lcnbeapp/beapp-master
|
spqr_make.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/SPQR/MATLAB/spqr_make.m
| 17,403 |
utf_8
|
b2219499a73fd1e89a27ba4c1286be03
|
function spqr_make (metis_path,opt1,opt2)
%SPQR_MAKE compiles the SuiteSparseQR mexFunctions
%
% Example:
% spqr_make
%
% SuiteSparseQR relies on CHOLMOD, AMD, and COLAMD, and optionally CCOLAMD,
% CAMD, and METIS. All but METIS are distributed with CHOLMOD. To compile
% SuiteSparseQR to use METIS you must first place a copy of the metis-4.0
% directory (METIS version 4.0.1) in same directory that contains the AMD,
% COLAMD, CCOLAMD, CHOLMOD, and SuiteSparseQR directories. Next, type
%
% spqr_make
%
% in the MATLAB command window. Alternatively, use this command:
%
% spqr_make ('path to your copy of metis-4.0 here') ;
% spqr_make ('metis') ; % use the default METIS location
%
% See http://www-users.cs.umn.edu/~karypis/metis for a copy of
% METIS 4.0.1. If you do not have METIS, use either of the following:
%
% spqr_make ('')
% spqr_make ('no metis')
%
% To compile using Intel's Threading Building Blocks (TBB) use:
%
% spqr_make ('','tbb') % without METIS
% spqr_make ('metis','tbb','timing') % with METIS, TBB, & timing
% spqr_make ('path to your METIS here','tbb') % with METIS and TBB
%
% TBB parallelism is not the default, since it conflicts with the multithreaded
% BLAS (the Intel MKL are OpenMP based, for example). This may change in
% future versions. The 'timing' string, if present, enables timing and
% exact flop counts (disabled by default; works for Linux only).
%
% You must type the spqr_make command while in the SuiteSparseQR/MATLAB
% directory.
%
% See also spqr, spqr_solve, spqr_qmult, qr, mldivide
% Copyright 2008, Timothy A. Davis
% http://www.cise.ufl.edu/research/sparse
details = 0 ; % 1 if details of each command are to be printed, 0 if not
v = getversion ;
try
% ispc does not appear in MATLAB 5.3
pc = ispc ;
catch %#ok
% if ispc fails, assume we are on a Windows PC if it's not unix
pc = ~isunix ;
end
d = '' ;
is64 = (~isempty (strfind (computer, '64'))) ;
% if (is64)
% % 64-bit MATLAB
% d = '-largeArrayDims' ;
% end
if (is64)
% 64-bit MATLAB
d = '-largeArrayDims' ;
% The next three lines are added by [email protected] (-09).
% These options are needed for some reason in Matlab 7.8 or newer.
if v >= 7.8
d = [d ' -DLONG -D''LONGBLAS=UF_long'''];
end
end
include = '-DNMATRIXOPS -DNMODIFY -I. -I../../AMD/Include -I../../COLAMD/Include -I../../CHOLMOD/Include -I../Include -I../../UFconfig' ;
% Determine the METIS path, and whether or not METIS is available
if (nargin < 1)
metis_path = 'metis' ;
end
if (strcmp (metis_path, 'no metis') | strcmp (metis_path, 'nometis'))
metis_path = '' ;
elseif (strcmp (metis_path, 'metis'))
metis_path = '../../metis-4.0' ;
end
have_metis = ~isempty (metis_path) & isdir (metis_path) ;
% Determine if TBB and/or timing is to be used
if (nargin < 2)
tbb = 0 ;
timing = 0 ;
elseif (nargin < 3)
tbb = strcmp (opt1, 'tbb') ;
timing = strcmp (opt1, 'timing') ;
elseif (nargin < 4)
tbb = strcmp (opt1, 'tbb') | strcmp (opt2, 'tbb') ;
timing = strcmp (opt1, 'timing') | strcmp (opt2, 'timing') ;
end
% fix the METIS 4.0.1 rename.h file
if (have_metis)
fprintf ('Compiling SuiteSparseQR with METIS on MATLAB Version %g\n', v) ;
f = fopen ('rename.h', 'w') ;
if (f == -1)
error ('unable to create rename.h in current directory') ;
end
fprintf (f, '/* do not edit this file; generated by spqr_make */\n') ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#include "%s/Lib/rename.h"\n', metis_path) ;
fprintf (f, '#undef log2\n') ;
fprintf (f, '#define log2 METIS__log2\n') ;
fprintf (f, '#include "mex.h"\n') ;
fprintf (f, '#define malloc mxMalloc\n') ;
fprintf (f, '#define free mxFree\n') ;
fprintf (f, '#define calloc mxCalloc\n') ;
fprintf (f, '#define realloc mxRealloc\n') ;
fclose (f) ;
include = [include ' -I' metis_path '/Lib'] ;
include = [include ' -I../../CCOLAMD/Include -I../../CAMD/Include' ] ;
else
fprintf ('Compiling SuiteSparseQR without METIS on MATLAB Version %g\n', v);
include = ['-DNPARTITION ' include ] ;
end
%-------------------------------------------------------------------------------
% BLAS option
%-------------------------------------------------------------------------------
% This is exceedingly ugly. The MATLAB mex command needs to be told where to
% find the LAPACK and BLAS libraries, which is a real portability nightmare.
% The correct option is highly variable and depends on the MATLAB version.
if (pc)
if (v < 6.5)
% MATLAB 6.1 and earlier: use the version supplied in CHOLMOD
lib = '../../CHOLMOD/MATLAB/lcc_lib/libmwlapack.lib' ;
elseif (v < 7.5)
% use the built-in LAPACK lib (which includes the BLAS)
lib = 'libmwlapack.lib' ;
else
% need to also use the built-in BLAS lib
lib = 'libmwlapack.lib libmwblas.lib' ;
end
else
if (v < 7.5)
% MATLAB 7.5 and earlier, use the LAPACK lib (including the BLAS)
lib = '-lmwlapack' ;
else
% MATLAB 7.6 requires the -lmwblas option; earlier versions do not
lib = '-lmwlapack -lmwblas' ;
end
end
%-------------------------------------------------------------------------------
% TBB option
%-------------------------------------------------------------------------------
% You should install TBB properly so that mex can find the library files and
% include files, but you can also modify the tbb_lib_path and tbb_include_path
% strings below to if you need to specify the path to your own installation of
% TBB.
% vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
% >>>>>>>>>>>>>>>>>>>>> EDIT THE tbb_path BELOW AS NEEDED <<<<<<<<<<<<<<<<<<<<<<
if (pc)
% For Windows, with TBB installed in C:\TBB. Edit this line as needed:
tbb_path = 'C:\TBB\tbb21_009oss' ;
else
% For Linux, edit this line as needed (not needed if already in /usr/lib):
tbb_path = '/cise/homes/davis/Install/tbb21_009oss' ;
end
% ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
% You should not have to edit the lines below.
if (pc)
if (is64)
tbb_lib_path = [tbb_path '\ia32\vc9\lib\'] ;
else
tbb_lib_path = [tbb_path '\em64t\vc9\lib\'] ;
end
tbb_include_path = [tbb_path '\include\'] ;
else
% For Linux, with TBB might be already installed in /usr/lib
if (exist ('/usr/lib/libtbb.so', 'file'))
% do not edit these lines
tbb_path = '' ;
tbb_lib_path = '' ;
tbb_include_path = '' ;
else
if (is64)
tbb_lib_path = '/em64t/cc4.1.0_libc2.4_kernel2.6.16.21/lib' ;
else
tbb_lib_path = '/ia32/cc4.1.0_libc2.4_kernel2.6.16.21/lib' ;
end
tbb_include_path = [tbb_path '\include\'] ;
end
end
if (tbb)
fprintf ('Compiling with Intel TBB parallelism\n') ;
lib = [lib ' -L' tbb_lib_path ' -ltbb'] ;
include = [include ' -I' tbb_include_path ' -DHAVE_TBB' ] ;
end
%-------------------------------------------------------------------------------
% TIMING option (for performance testing on Linux only)
%-------------------------------------------------------------------------------
if (timing && ~pc)
fprintf ('with timing enabled\n') ;
include = [include ' -DTIMING'] ;
lib = [lib ' -lrt'] ;
end
%-------------------------------------------------------------------------------
% make sure the paths have valid file-separators
%-------------------------------------------------------------------------------
include = strrep (include, '/', filesep) ;
lib = strrep (lib, '/', filesep) ;
%-------------------------------------------------------------------------------
% ready to compile ...
%-------------------------------------------------------------------------------
amd_c_src = { ...
'../../AMD/Source/amd_1', ...
'../../AMD/Source/amd_2', ...
'../../AMD/Source/amd_aat', ...
'../../AMD/Source/amd_control', ...
'../../AMD/Source/amd_defaults', ...
'../../AMD/Source/amd_dump', ...
'../../AMD/Source/amd_global', ...
'../../AMD/Source/amd_info', ...
'../../AMD/Source/amd_order', ...
'../../AMD/Source/amd_postorder', ...
'../../AMD/Source/amd_post_tree', ...
'../../AMD/Source/amd_preprocess', ...
'../../AMD/Source/amd_valid' } ;
colamd_c_src = {
'../../COLAMD/Source/colamd', ...
'../../COLAMD/Source/colamd_global' } ;
% CAMD and CCOLAMD are not needed if we don't have METIS
camd_c_src = { ...
'../../CAMD/Source/camd_1', ...
'../../CAMD/Source/camd_2', ...
'../../CAMD/Source/camd_aat', ...
'../../CAMD/Source/camd_control', ...
'../../CAMD/Source/camd_defaults', ...
'../../CAMD/Source/camd_dump', ...
'../../CAMD/Source/camd_global', ...
'../../CAMD/Source/camd_info', ...
'../../CAMD/Source/camd_order', ...
'../../CAMD/Source/camd_postorder', ...
'../../CAMD/Source/camd_preprocess', ...
'../../CAMD/Source/camd_valid' } ;
ccolamd_c_src = {
'../../CCOLAMD/Source/ccolamd', ...
'../../CCOLAMD/Source/ccolamd_global' } ;
metis_c_src = {
'Lib/balance', ...
'Lib/bucketsort', ...
'Lib/ccgraph', ...
'Lib/coarsen', ...
'Lib/compress', ...
'Lib/debug', ...
'Lib/estmem', ...
'Lib/fm', ...
'Lib/fortran', ...
'Lib/frename', ...
'Lib/graph', ...
'Lib/initpart', ...
'Lib/kmetis', ...
'Lib/kvmetis', ...
'Lib/kwayfm', ...
'Lib/kwayrefine', ...
'Lib/kwayvolfm', ...
'Lib/kwayvolrefine', ...
'Lib/match', ...
'Lib/mbalance2', ...
'Lib/mbalance', ...
'Lib/mcoarsen', ...
'Lib/memory', ...
'Lib/mesh', ...
'Lib/meshpart', ...
'Lib/mfm2', ...
'Lib/mfm', ...
'Lib/mincover', ...
'Lib/minitpart2', ...
'Lib/minitpart', ...
'Lib/mkmetis', ...
'Lib/mkwayfmh', ...
'Lib/mkwayrefine', ...
'Lib/mmatch', ...
'Lib/mmd', ...
'Lib/mpmetis', ...
'Lib/mrefine2', ...
'Lib/mrefine', ...
'Lib/mutil', ...
'Lib/myqsort', ...
'Lib/ometis', ...
'Lib/parmetis', ...
'Lib/pmetis', ...
'Lib/pqueue', ...
'Lib/refine', ...
'Lib/separator', ...
'Lib/sfm', ...
'Lib/srefine', ...
'Lib/stat', ...
'Lib/subdomains', ...
'Lib/timing', ...
'Lib/util' } ;
for i = 1:length (metis_c_src)
metis_c_src {i} = [metis_path '/' metis_c_src{i}] ;
end
cholmod_c_src = {
'../../CHOLMOD/Core/cholmod_aat', ...
'../../CHOLMOD/Core/cholmod_add', ...
'../../CHOLMOD/Core/cholmod_band', ...
'../../CHOLMOD/Core/cholmod_change_factor', ...
'../../CHOLMOD/Core/cholmod_common', ...
'../../CHOLMOD/Core/cholmod_complex', ...
'../../CHOLMOD/Core/cholmod_copy', ...
'../../CHOLMOD/Core/cholmod_dense', ...
'../../CHOLMOD/Core/cholmod_error', ...
'../../CHOLMOD/Core/cholmod_factor', ...
'../../CHOLMOD/Core/cholmod_memory', ...
'../../CHOLMOD/Core/cholmod_sparse', ...
'../../CHOLMOD/Core/cholmod_transpose', ...
'../../CHOLMOD/Core/cholmod_triplet', ...
'../../CHOLMOD/Check/cholmod_check', ...
'../../CHOLMOD/Check/cholmod_read', ...
'../../CHOLMOD/Check/cholmod_write', ...
'../../CHOLMOD/Cholesky/cholmod_amd', ...
'../../CHOLMOD/Cholesky/cholmod_analyze', ...
'../../CHOLMOD/Cholesky/cholmod_colamd', ...
'../../CHOLMOD/Cholesky/cholmod_etree', ...
'../../CHOLMOD/Cholesky/cholmod_factorize', ...
'../../CHOLMOD/Cholesky/cholmod_postorder', ...
'../../CHOLMOD/Cholesky/cholmod_rcond', ...
'../../CHOLMOD/Cholesky/cholmod_resymbol', ...
'../../CHOLMOD/Cholesky/cholmod_rowcolcounts', ...
'../../CHOLMOD/Cholesky/cholmod_rowfac', ...
'../../CHOLMOD/Cholesky/cholmod_solve', ...
'../../CHOLMOD/Cholesky/cholmod_spsolve', ...
'../../CHOLMOD/Supernodal/cholmod_super_numeric', ...
'../../CHOLMOD/Supernodal/cholmod_super_solve', ...
'../../CHOLMOD/Supernodal/cholmod_super_symbolic' } ;
cholmod_c_partition_src = {
'../../CHOLMOD/Partition/cholmod_ccolamd', ...
'../../CHOLMOD/Partition/cholmod_csymamd', ...
'../../CHOLMOD/Partition/cholmod_camd', ...
'../../CHOLMOD/Partition/cholmod_metis', ...
'../../CHOLMOD/Partition/cholmod_nesdis' } ;
% SuiteSparseQR does not need the MatrixOps or Modify modules of CHOLMOD
% cholmod_unused = {
% '../../CHOLMOD/MatrixOps/cholmod_drop', ...
% '../../CHOLMOD/MatrixOps/cholmod_horzcat', ...
% '../../CHOLMOD/MatrixOps/cholmod_norm', ...
% '../../CHOLMOD/MatrixOps/cholmod_scale', ...
% '../../CHOLMOD/MatrixOps/cholmod_sdmult', ...
% '../../CHOLMOD/MatrixOps/cholmod_ssmult', ...
% '../../CHOLMOD/MatrixOps/cholmod_submatrix', ...
% '../../CHOLMOD/MatrixOps/cholmod_vertcat', ...
% '../../CHOLMOD/MatrixOps/cholmod_symmetry', ...
% '../../CHOLMOD/Modify/cholmod_rowadd', ...
% '../../CHOLMOD/Modify/cholmod_rowdel', ...
% '../../CHOLMOD/Modify/cholmod_updown' } ;
% SuiteSparseQR source code, and mex support file
spqr_cpp_src = {
'../Source/spqr_parallel', ...
'../Source/spqr_1colamd', ...
'../Source/spqr_1factor', ...
'../Source/spqr_1fixed', ...
'../Source/spqr_analyze', ...
'../Source/spqr_append', ...
'../Source/spqr_assemble', ...
'../Source/spqr_cpack', ...
'../Source/spqr_csize', ...
'../Source/spqr_cumsum', ...
'../Source/spqr_debug', ...
'../Source/spqr_factorize', ...
'../Source/spqr_fcsize', ...
'../Source/spqr_freefac', ...
'../Source/spqr_freenum', ...
'../Source/spqr_freesym', ...
'../Source/spqr_front', ...
'../Source/spqr_fsize', ...
'../Source/spqr_happly', ...
'../Source/spqr_happly_work', ...
'../Source/spqr_hpinv', ...
'../Source/spqr_kernel', ...
'../Source/spqr_larftb', ...
'../Source/spqr_panel', ...
'../Source/spqr_rconvert', ...
'../Source/spqr_rcount', ...
'../Source/spqr_rhpack', ...
'../Source/spqr_rmap', ...
'../Source/spqr_rsolve', ...
'../Source/spqr_shift', ...
'../Source/spqr_stranspose1', ...
'../Source/spqr_stranspose2', ...
'../Source/spqr_trapezoidal', ...
'../Source/spqr_type', ...
'../Source/spqr_tol', ...
'../Source/spqr_maxcolnorm', ...
'../Source/SuiteSparseQR_qmult', ...
'../Source/SuiteSparseQR', ...
'../Source/SuiteSparseQR_expert', ...
'../MATLAB/spqr_mx' } ;
% SuiteSparse C source code, for MATLAB error handling
spqr_c_mx_src = { '../MATLAB/spqr_mx_error' } ;
% SuiteSparseQR mexFunctions
spqr_mex_cpp_src = { 'spqr', 'spqr_qmult', 'spqr_solve', 'spqr_singletons' } ;
if (pc)
% Windows does not have drand48 and srand48, required by METIS. Use
% drand48 and srand48 in CHOLMOD/MATLAB/Windows/rand48.c instead.
% Also provide Windows with an empty <strings.h> include file.
obj_extension = '.obj' ;
cholmod_c_src = [cholmod_c_src {'..\..\CHOLMOD\MATLAB\Windows\rand48'}] ;
include = [include ' -I..\..\CHOLMOD\MATLAB\Windows'] ;
else
obj_extension = '.o' ;
end
lib = strrep (lib, '/', filesep) ;
include = strrep (include, '/', filesep) ;
% compile each library source file
obj = '' ;
c_source = [amd_c_src colamd_c_src cholmod_c_src spqr_c_mx_src ] ;
if (have_metis)
c_source = [c_source cholmod_c_partition_src ccolamd_c_src ] ;
c_source = [c_source camd_c_src metis_c_src] ;
end
cpp_source = spqr_cpp_src ;
kk = 0 ;
for f = cpp_source
ff = strrep (f {1}, '/', filesep) ;
slash = strfind (ff, filesep) ;
if (isempty (slash))
slash = 1 ;
else
slash = slash (end) + 1 ;
end
o = ff (slash:end) ;
obj = [obj ' ' o obj_extension] ; %#ok
s = sprintf ('mex %s -O %s -c %s.cpp', d, include, ff) ;
kk = do_cmd (s, kk, details) ;
end
for f = c_source
ff = strrep (f {1}, '/', filesep) ;
slash = strfind (ff, filesep) ;
if (isempty (slash))
slash = 1 ;
else
slash = slash (end) + 1 ;
end
o = ff (slash:end) ;
obj = [obj ' ' o obj_extension] ; %#ok
s = sprintf ('mex %s -DDLONG -O %s -c %s.c', d, include, ff) ;
kk = do_cmd (s, kk, details) ;
end
% compile each mexFunction
for f = spqr_mex_cpp_src
s = sprintf ('mex %s -O %s %s.cpp', d, include, f{1}) ;
s = [s obj ' ' lib] ; %#ok
kk = do_cmd (s, kk, details) ;
end
% clean up
s = ['delete ' obj] ;
status = warning ('off', 'MATLAB:DELETE:FileNotFound') ;
delete rename.h
warning (status) ;
do_cmd (s, kk, details) ;
fprintf ('\nSuiteSparseQR successfully compiled\n') ;
%-------------------------------------------------------------------------------
function kk = do_cmd (s, kk, details)
%DO_CMD evaluate a command, and either print it or print a "."
if (details)
fprintf ('%s\n', s) ;
else
if (mod (kk, 60) == 0)
fprintf ('\n') ;
end
kk = kk + 1 ;
fprintf ('.') ;
end
eval (s) ;
%-------------------------------------------------------------------------------
function v = getversion
%GETVERSION determine the MATLAB version, and return it as a double.
v = sscanf (version, '%d.%d.%d') ;
v = 10.^(0:-1:-(length(v)-1)) * v ;
|
github
|
lcnbeapp/beapp-master
|
UFpage.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/UFcollection/UFpage.m
| 18,515 |
utf_8
|
f7bbbbf0f99dcc348d3d968ee195f656
|
function UFpage (matrix, index, figures)
%UFPAGE create web page for a matrix in UF Sparse Matrix Collection
%
% Usage:
% UFpage (matrix, index, figures)
%
% matrix: id or name of matrix to create the web page for.
% index: the UF index, from UFget.
% figures: 1 if the figures are to be created, 0 otherwise
%
% Example:
%
% UFpage (267)
% UFpage ('HB/west0479')
%
% See also UFget, cspy, UFgplot, UFint.
% This function assumes that the mat/, MM/, and RB/ directories all reside in
% the same parent directory, given by the download directory specified by
% UFget_defaults.
% Copyright 2006-2007, Timothy A. Davis
%-------------------------------------------------------------------------------
% get inputs
%-------------------------------------------------------------------------------
if (nargin < 2)
index = UFget ;
end
if (nargin < 3)
figures = 1 ;
end
%-------------------------------------------------------------------------------
% get the Problem and its contents
%-------------------------------------------------------------------------------
Problem = UFget (matrix,index) ;
disp (Problem) ;
fullname = Problem.name ;
s = strfind (fullname, '/') ;
grp = fullname (1:s-1) ;
name = fullname (s+1:end) ;
id = Problem.id ;
% create the primary directory
[url topdir] = UFlocation ;
matrices = [topdir 'matrices'] ;
if (~exist (matrices, 'dir'))
mkdir (matrices) ;
end
% create the group directory
if (~exist ([matrices filesep grp], 'dir'))
mkdir ([matrices filesep grp]) ;
end
% determine the full path of the problem
fullpath = regexprep ([matrices filesep fullname], '[\/\\]', filesep) ;
ptitle = Problem.title ;
z = 0 ;
if (isfield (Problem, 'Zeros'))
z = nnz (Problem.Zeros) ;
Problem = rmfield (Problem, 'Zeros') ;
end
nblocks = index.nblocks (id) ;
ncc = index.ncc (id) ;
has_b = isfield (Problem, 'b') ;
has_x = isfield (Problem, 'x') ;
has_aux = isfield (Problem, 'aux') ;
if (has_b)
b = Problem.b ;
Problem = rmfield (Problem, 'b') ;
if (iscell (b))
b = sprintf ('cell %d-by-%d\n', size (b)) ;
elseif (issparse (b))
b = sprintf ('sparse %d-by-%d\n', size (b)) ;
else
b = sprintf ('full %d-by-%d\n', size (b)) ;
end
end
if (has_x)
x = Problem.x ;
Problem = rmfield (Problem, 'x') ;
if (iscell (x))
x = sprintf ('cell %d-by-%d\n', size (x)) ;
elseif (issparse (x))
x = sprintf ('sparse %d-by-%d\n', size (x)) ;
else
x = sprintf ('full %d-by-%d\n', size (x)) ;
end
end
nodename = [ ] ;
if (has_aux)
aux = Problem.aux ;
Problem = rmfield (Problem, 'aux') ;
auxfields = fields (aux) ;
has_coord = isfield (aux, 'coord') ;
has_nodename = isfield (aux, 'nodename') ;
auxs = cell (1, length (auxfields)) ;
for k = 1:length(auxfields)
siz = size (aux.(auxfields{k})) ;
if (iscell (aux.(auxfields{k})))
auxs {k} = sprintf ('cell %d-by-%d\n', siz) ;
elseif (issparse (aux.(auxfields{k})))
auxs {k} = sprintf ('sparse %d-by-%d\n', siz) ;
else
auxs {k} = sprintf ('full %d-by-%d\n', siz) ;
end
end
if (has_coord)
xyz = aux.coord ;
end
if (has_nodename)
nodename = aux.nodename ;
end
clear aux
else
has_coord = 0 ;
end
kind = Problem.kind ;
if (isfield (Problem, 'notes'))
notes = Problem.notes ;
else
notes = '' ;
end
au = Problem.author ;
ed = Problem.ed ;
da = Problem.date ;
m = index.nrows (id) ;
n = index.ncols (id) ;
nz = index.nnz (id) ;
nnzdiag = index.nnzdiag (id) ;
if (strfind (kind, 'graph'))
bipartite = ~isempty (strfind (kind, 'bipartite')) ;
directed = ~isempty (regexp (kind, '\<directed', 'once')) ;
else
bipartite = (m ~= n) ;
directed = (index.pattern_symmetry (id) < 1) ;
end
%-------------------------------------------------------------------------------
% create the pictures
%-------------------------------------------------------------------------------
if (figures)
try
A = Problem.A ;
catch
fprintf ('failed to extract A from Problem struct\n') ;
A = sparse (0) ;
end
clear Problem
%---------------------------------------------------------------------------
% create the gplot
%---------------------------------------------------------------------------
do_gplot = has_coord ;
if (do_gplot)
UFgplot (A, xyz, directed, nodename) ;
print (gcf, '-dpng', '-r128', [fullpath '_gplot.png']) ;
print (gcf, '-dpng', '-r512', [fullpath '_gplot_big.png']) ;
end
%---------------------------------------------------------------------------
% create the thumbnail picture
%---------------------------------------------------------------------------
cspy (A, 16) ;
print (gcf, '-dpng', '-r12', [fullpath '_thumb.png']) ;
%---------------------------------------------------------------------------
% create the regular picture
%---------------------------------------------------------------------------
cspy (A, 128) ;
print (gcf, '-dpng', '-r64', [fullpath '.png']) ;
%---------------------------------------------------------------------------
% create the dmperm figure, but not for graphs
%---------------------------------------------------------------------------
do_dmspy = (nblocks > 1) & (isempty (strfind (kind, 'graph'))) ;
if (do_dmspy)
try
cs_dmspy (A, 128) ;
title ('Dulmage-Mendelsohn permutation') ;
catch
fprintf ('dmspy failed\n') ;
delete ([fullpath '_dmperm.png']) ;
do_dmspy = 0 ;
end
print (gcf, '-dpng', '-r64', [fullpath '_dmperm.png']) ;
end
%---------------------------------------------------------------------------
% create the ccspy figure
%---------------------------------------------------------------------------
do_scc = (ncc > 1) ;
if (do_dmspy && m == n && nnzdiag == n)
% don't do scc for a square matrix with zero-free diagonal
do_scc = 0 ;
end
if (do_scc)
try
ccspy (A, bipartite, 128) ;
if (bipartite)
title ('connected components of the bipartite graph') ;
else
title ('strongly connected components of the graph') ;
end
print (gcf, '-dpng', '-r64', [fullpath '_scc.png']) ;
catch
fprintf ('ccspy failed\n') ;
delete ([fullpath '_cc.png']) ;
do_scc = 0 ;
end
end
else
%---------------------------------------------------------------------------
% the plots already exist - check the files
%---------------------------------------------------------------------------
do_scc = exist ([fullpath '_scc.png'], 'file') ;
do_dmspy = exist ([fullpath '_dmperm.png'], 'file') ;
do_gplot = exist ([fullpath '_gplot.png'], 'file') ;
end
clear Problem
%-------------------------------------------------------------------------------
% create the web page for the matrix
%-------------------------------------------------------------------------------
f = fopen ([fullpath '.html'], 'w') ;
if (f < 0)
error ('unable to create matrix web page') ;
end
% add the header
fprintf (f,'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n');
fprintf (f, '<html lang="EN"><head>\n') ;
fprintf (f, '<meta http-equiv="content-type" content="text/html; charset=') ;
fprintf (f, 'iso-8859-1"><title>%s sparse matrix</title></head>\n', fullname) ;
fprintf (f, '<body bgcolor="#ffffff" link="#0021a5">\n') ;
% Yifan Hu's medium-sized graph plot, for all matrices
yifan_graphs = 'http://www.research.att.com/~yifanhu/GALLERY/GRAPHS/' ;
yifan_thumb = [yifan_graphs 'GIF_THUMBNAIL/'] ;
yifan_medium = [yifan_graphs 'GIF_SMALL/'] ;
yname = strrep (fullname, '/', '@') ;
fprintf (f, '\n<p>') ;
fprintf (f, ...
'<a href="%s%s.html"><img alt="%s graph" src="%s%s.gif"></a>\n\n', ...
yifan_medium, yname, fullname, yifan_medium, yname) ;
% small gifs go first:
% fprintf (f, '\n\n<p><img alt="%s" src="%s_thumb.png"></a>\n', fullname, name) ;
%
%
% fprintf (f, ...
% '<a href="%s%s.html"><img alt="%s graph" src="%s%s.gif"></a>\n', ...
% yifan_medium, yname, fullname, yifan_thumb, yname) ;
% matrix name and description
fprintf (f, '<p>Matrix: %s\n', fullname) ;
fprintf (f, '<p>Description: %s<p><hr>\n', ptitle) ;
% link to UF collection
fprintf (f, '<li><a href="..">UF Sparse Matrix Collection</a>\n') ;
% link to group
fprintf (f, '<li><a href="./index.html">Matrix group: %s</a>\n', grp) ;
% add link to mat/<group>/README.txt
fprintf (f, '<li><a href="../../mat/%s/README.txt">', grp) ;
fprintf (f, 'Click here for a description of the %s group.</a>\n', grp) ;
% link to all matrices
fprintf (f, '<li><a href="../list_by_id.html">') ;
fprintf (f, 'Click here for a list of all matrices</a>\n') ;
% link to all groups
fprintf (f, '<li><a href="../groups.html">') ;
fprintf (f, 'Click here for a list of all matrix groups</a>\n') ;
% download link for MATLAB format
fprintf (f, ...
'<li><a href="../../mat/%s.mat">download as a MATLAB mat-file</a>',...
fullname) ;
fsize (f, [topdir 'mat/' fullname '.mat']) ;
fprintf (f, 'Use <a href="%smat/UFget.html">UFget</a>(%d)', url, id) ;
fprintf (f, ' or UFget(''%s'') in MATLAB.\n', fullname) ;
% download link for Matrix Market format
fprintf (f, ...
'<li><a href="../../MM/%s.tar.gz">download in Matrix Market format</a>',...
fullname) ;
fsize (f, [topdir 'MM/' fullname '.tar.gz']) ;
% download link for Rutherford/Boeing format
fprintf (f, ...
'<li><a href="../../RB/%s.tar.gz">download in Rutherford/Boeing format</a>',...
fullname) ;
fsize (f, [topdir 'RB/' fullname '.tar.gz']) ;
%-------------------------------------------------------------------------------
% link to images
%-------------------------------------------------------------------------------
fprintf (f, '\n\n<p><img alt="%s" src="%s.png"></a>\n', fullname, name) ;
% dmspy, if it exists
if (do_dmspy)
fprintf (f, '\n<p><img alt="dmperm of %s" src="%s_dmperm.png">\n', ...
fullname, name) ;
end
% ccspy, if it exists
if (do_scc)
fprintf (f, '<p><img alt="scc of %s" src="%s_scc.png">\n', ...
fullname, name) ;
end
% gplot, if it exists
if (do_gplot)
fprintf (f, '<p>') ;
fprintf (f, ...
'<a href="%s_gplot_big.png"><img alt="%s graph" src="%s_gplot.png"></a>\n', ...
name, fullname, name) ;
end
%-------------------------------------------------------------------------------
% table of matrix properties
%-------------------------------------------------------------------------------
fprintf (f, '<p><table border=1>\n') ;
stat (f, '<i><a href="../legend.html">Matrix properties</a></i>', '%s', ' ') ;
stat (f, 'number of rows', '%s', UFint (m)) ;
stat (f, 'number of columns', '%s', UFint (n)) ;
stat (f, 'nonzeros', '%s', UFint (nz)) ;
srank = index.sprank (id) ;
if (srank == min (m,n))
stat (f, 'structural full rank?', '%s', 'yes') ;
else
stat (f, 'structural full rank?', '%s', 'no') ;
end
stat (f, 'structural rank', '%s', UFint (srank)) ;
stat (f, '# of blocks from dmperm', '%s', UFint (nblocks)) ;
stat (f, '# strongly connected comp.', '%s', UFint (ncc)) ;
if (srank == min (m,n))
stat (f, 'entries not in dmperm blocks', '%s', ...
UFint (index.nzoff (id))) ;
end
stat (f, 'explicit zero entries', '%s', UFint (z)) ;
s = index.pattern_symmetry (id) ;
if (s == 1)
stat (f, 'nonzero pattern symmetry', '%s', 'symmetric') ;
else
stat (f, 'nonzero pattern symmetry', '%8.0f%%', s*100) ;
end
s = index.numerical_symmetry (id) ;
if (s == -1)
stat (f, 'numeric value symmetry', '%s', 'unknown') ;
elseif (s == 1)
stat (f, 'numeric value symmetry', '%s', 'symmetric') ;
else
stat (f, 'numeric value symmetry', '%8.0f%%', s*100) ;
end
% print the Rutherford/Boeing type
mtype = index.RBtype (id,:) ;
ss = '-' ;
if (mtype (1) == 'r')
ss = 'real' ;
elseif (mtype (1) == 'c')
ss = 'complex' ;
elseif (mtype (1) == 'i')
ss = 'integer' ;
elseif (mtype (1) == 'p')
ss = 'binary' ;
end
stat (f, 'type', '%s', ss) ;
ss = '-' ;
if (mtype (2) == 'r')
ss = 'rectangular' ;
elseif (mtype (2) == 'u')
ss = 'unsymmetric' ;
elseif (mtype (2) == 's')
ss = 'symmetric' ;
elseif (mtype (2) == 'h')
ss = 'Hermitian' ;
elseif (mtype (2) == 'z')
ss = 'skew-symmetric' ;
end
stat (f, 'structure', '%s', ss) ;
if (index.cholcand (id) == 1)
ss = 'yes' ;
elseif (index.cholcand (id) == 0)
ss = 'no' ;
else
ss = '?' ;
end
stat (f, 'Cholesky candidate?', '%s', ss) ;
s = index.posdef (id) ;
if (s == 0)
ss = 'no' ;
elseif (s == 1)
ss = 'yes' ;
else
ss = 'unknown' ;
end
stat (f, 'positive definite?', '%s', ss) ;
fprintf (f, '</table><p>\n') ;
%-------------------------------------------------------------------------------
% problem author, ed, kind
%-------------------------------------------------------------------------------
fprintf (f, '<p><table border=1>\n') ;
fprintf (f, '<tr><td>author</td><td align=left>%s</td>\n', au) ;
fprintf (f, '<tr><td>editor</td><td align=left>%s</td>\n', ed) ;
fprintf (f, '<tr><td>date</td><td align=left>%s</td>\n', da) ;
fprintf (f, '<tr><td><a href=../kind.html>kind</a></td><td align=left>%s</td>\n', kind);
s = index.isND (id) ;
if (s == 0)
ss = 'no' ;
else
ss = 'yes' ;
end
fprintf (f, '<tr><td>2D/3D problem?</td><td align=left>%s</td>\n', ss) ;
fprintf (f, '</table><p>\n') ;
%-------------------------------------------------------------------------------
% fields
%-------------------------------------------------------------------------------
if (has_b || has_x || has_aux)
fprintf (f, '<p><table border=1>\n') ;
stat (f, 'Additional fields', '%s', 'size and type') ;
if (has_b)
stat (f, 'b', '%s', b) ;
end
if (has_x)
stat (f, 'x', '%s', x) ;
end
if (has_aux)
for k = 1:length(auxfields)
stat (f, auxfields{k}, '%s', char (auxs{k})) ;
end
end
fprintf (f, '</table><p>\n') ;
end
%-------------------------------------------------------------------------------
% Notes
%-------------------------------------------------------------------------------
if (~isempty (notes))
fprintf (f, '<p>Notes:<p><pre>\n') ;
for k = 1:size(notes,1)
fprintf (f, '%s\n', notes (k,:)) ;
end
fprintf (f, '</pre>\n') ;
end
%-------------------------------------------------------------------------------
% ordering statistics
%-------------------------------------------------------------------------------
fprintf (f, '<p><table border=1>\n') ;
if (nblocks == 1 || index.nzoff (id) == -2)
stat (f, ...
'<i><a href="../legend.html">Ordering statistics:</a></i>', ...
'%s', '<i>AMD</i>', '<i>METIS</i>') ;
if (index.amd_lnz (id) > -2)
stat (f, 'nnz(chol(P*(A+A''+s*I)*P''))', '%s', ...
UFint (index.amd_lnz (id)), ...
UFint (index.metis_lnz (id))) ;
stat (f, 'Cholesky flop count', '%7.1e', ...
index.amd_flops (id), ...
index.metis_flops (id)) ;
stat (f, 'nnz(L+U), no partial pivoting', '%s', ...
UFint (2*index.amd_lnz (id) - min(m,n)), ...
UFint (2*index.metis_lnz (id) - min(m,n))) ;
end
stat (f, 'nnz(V) for QR, upper bound nnz(L) for LU', '%s', ...
UFint (index.amd_vnz (id)), ...
UFint (index.metis_vnz (id))) ;
stat (f, 'nnz(R) for QR, upper bound nnz(U) for LU', '%s', ...
UFint (index.amd_rnz (id)), ...
UFint (index.metis_rnz (id))) ;
else
stat (f, ...
'<i><a href="../legend.html">Ordering statistics:</a></i>', ...
'%s', '<i>AMD</i>', '<i>METIS</i>', '<i>DMPERM+</i>') ;
if (index.amd_lnz (id) > -2)
stat (f, 'nnz(chol(P*(A+A''+s*I)*P''))', '%s', ...
UFint (index.amd_lnz (id)), ...
UFint (index.metis_lnz (id)), ...
UFint (index.dmperm_lnz (id))) ;
stat (f, 'Cholesky flop count', '%7.1e', ...
index.amd_flops (id), ...
index.metis_flops (id), ...
index.dmperm_flops (id)) ;
stat (f, 'nnz(L+U), no partial pivoting', '%s', ...
UFint (2*index.amd_lnz (id) - min(m,n)), ...
UFint (2*index.metis_lnz (id) - min(m,n)), ...
UFint (index.dmperm_lnz (id) + index.dmperm_unz (id)-min(m,n))) ;
end
stat (f, 'nnz(V) for QR, upper bound nnz(L) for LU', '%s', ...
UFint (index.amd_vnz (id)), ...
UFint (index.metis_vnz (id)), ...
UFint (index.dmperm_vnz (id))) ;
stat (f, 'nnz(R) for QR, upper bound nnz(U) for LU', '%s', ...
UFint (index.amd_rnz (id)), ...
UFint (index.metis_rnz (id)), ...
UFint (index.dmperm_rnz (id))) ;
end
fprintf (f, '</table><p>\n') ;
%-------------------------------------------------------------------------------
% note regarding orderings
%-------------------------------------------------------------------------------
if (z > 0)
fprintf (f, '<p><i>Note that all matrix statistics (except nonzero');
fprintf (f, ' pattern symmetry) exclude the %d explicit zero entries.\n',z);
fprintf (f, '<i>\n') ;
end
%-------------------------------------------------------------------------------
% etc ...
%-------------------------------------------------------------------------------
fprintf (f, '<p><p><i>Maintained by Tim Davis</a>, last updated %s.', date) ;
fprintf (f, '<br>Matrix pictures by <a href=') ;
fprintf (f, '"%sCSparse/CSparse/MATLAB/CSparse/cspy.m">cspy</a>, a ', url) ;
fprintf (f, 'MATLAB function in the <a href="%sCSparse">CSparse</a>', url) ;
fprintf (f, ' package.<br>\n') ;
fprintf (f, 'Matrix graphs by Yifan Hu, AT&T Labs Visualization Group.\n') ;
fprintf (f, '</body>\n</html>\n') ;
fclose (f) ;
%-------------------------------------------------------------------------------
function fsize (f, filename)
% fsize: print the filesize
d = dir (regexprep (filename, '[\/\\]', filesep)) ;
if (isempty (d))
fprintf ('\n') ;
elseif (d.bytes < 1024)
fprintf (f, ', file size: %4d bytes.\n', d.bytes) ;
elseif (d.bytes > 2^20)
fprintf (f, ', file size: %8.0f MB.\n', d.bytes / 2^20) ;
else
fprintf (f, ', file size: %8.0f KB.\n', d.bytes / 2^10) ;
end
%-------------------------------------------------------------------------------
function stat (f, what, format, value1, value2, value3)
% stat: print one row of a table
s = val (format, value1) ;
fprintf (f, '<tr><td>%s</td><td align=right>%s</td>\n', what, s) ;
if (nargin > 4)
fprintf (f, '<td align=right>%s</td>\n', val (format, value2)) ;
end
if (nargin > 5)
fprintf (f, '<td align=right>%s</td>\n', val (format, value3)) ;
end
fprintf (f, '</tr>\n') ;
%-------------------------------------------------------------------------------
function s = val (format, value)
% val: print a value in a table
if (~ischar (value) && value < 0)
s = '-' ;
else
s = sprintf (format, value) ;
end
|
github
|
lcnbeapp/beapp-master
|
UFread.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/UFcollection/UFread.m
| 13,594 |
utf_8
|
f661a98aa4deaee771002bd338d97870
|
function Problem = UFread (directory, tmp)
%UFREAD read a Problem in Matrix Market or Rutherford/Boeing format
% containing a set of files created by UFwrite, in either Matrix Market or
% Rutherford/Boeing format. See UFwrite for a description of the Problem struct.
%
% Usage: Problem = UFread (directory)
%
% Example:
%
% load west0479
% clear Problem
% Problem.name = 'HB/west0479' ;
% Problem.title = '8 STAGE COLUMN SECTION, ALL SECTIONS RIGOROUS (CHEM.ENG.)';
% Problem.A = west0479 ;
% Problem.id = 267 ; % the id number of west0479 in the UF collection
% Problem.date = '1983' ;
% Problem.author = 'A. Westerberg' ;
% Problem.ed = 'I. Duff, R. Grimes, J. Lewis'
% Problem.kind = 'chemical process simulation problem' ;
% UFwrite (Problem, 'RB/', '') ;
% Prob3 = UFread ('RB/HB/west0479')
% isequal (Problem, Prob3)
%
% This part of the example requires CHOLMOD, for the mread function:
%
% UFwrite (Problem, 'MM/') ;
% Prob2 = UFread ('MM/HB/west0479')
% isequal (Problem, Prob2)
%
% You can also compare this Problem with the version in the UF Sparse Matrix
% Collection, with UFget(267) or UFget('HB/west0479'). Note that this includes
% the 22 explicit zero entries present in the west0479 Harwell/Boeing matrix,
% but not included in the MATLAB west0479.mat demo matrix. Those entries are
% present in the UF Sparse Matrix Collection. This example assumes your current
% directory is the RBio directory, containing the west0479 problem in the
% RBio/Test directory:
%
% Prob5 = UFget ('HB/west0479')
% Prob6 = UFread ('Test/west0479')
% isequal (Prob5, Prob6)
%
% The directory can be a compressed tar file of the form "name.tar.gz", in
% which case the tarfile is uncompressed into a temporary directory, and
% the temporary directory is deleted when done. The '.tar.gz' should not be
% part of the directory argument. In this case, a 2nd input argument can be
% provided: Problem = UFread (directory, tmp). The problem is extracted into
% the tmp directory. If tmp is not present, the output of the tempdir function
% is used instead.
%
% Note that UFget is much faster than UFread. UFread is useful if you are
% short on disk space, and want to have just one copy of the collection that
% can be read by MATLAB (via UFread) and a non-MATLAB program (the MM or RB
% versions of the collection).
%
% See also UFwrite, mread, mwrite, RBread, RBread, UFget, untar, tempdir.
% Optionally uses the CHOLMOD mread mexFunction, for reading Problems in
% Matrix Market format.
% Copyright 2006-2007, Timothy A. Davis, Univ. of Florida
%-------------------------------------------------------------------------------
% determine the Problem name from the directory name
%-------------------------------------------------------------------------------
directory = regexprep (directory, '[\/\\]', filesep) ;
t = find (directory == filesep) ;
if (isempty (t))
name = directory ;
else
name = directory (t(end)+1:end) ;
end
%-------------------------------------------------------------------------------
% open the directory, or untar the tar.gz file
%-------------------------------------------------------------------------------
d = dir (directory) ;
is_tar = 0 ;
if (isempty (d))
% look for a .tar.gz file
if (nargin < 2)
tmpdir = [tempname '_UFread_' name] ;
else
tmpdir = [tmp filesep name] ;
end
try
% try untaring the problem
untar ([directory '.tar.gz'], tmpdir) ;
catch
% untar failed, make sure tmpdir is deleted
try
rmdir (tmpdir, 's') ;
catch
end
error (['unable to read problem: ' directory]) ;
end
directory = [tmpdir filesep name] ;
d = dir (directory) ;
is_tar = 1 ;
end
%-------------------------------------------------------------------------------
% read the problem
%-------------------------------------------------------------------------------
try
%---------------------------------------------------------------------------
% get name, title, id, kind, date, author, editor, notes from master file
%---------------------------------------------------------------------------
masterfile = [directory filesep name] ;
[Problem notes RB] = get_header (masterfile) ;
%---------------------------------------------------------------------------
% get the A and Zero matrices from the master file and add to the Problem
%---------------------------------------------------------------------------
if (RB)
% read in the primary Rutherford/Boeing file
[Problem.A Zeros] = RBread ([masterfile '.rb']) ;
else
% read in the primary Matrix Market file. Get patterns as binary.
[Problem.A Zeros] = mread ([masterfile '.mtx'], 1) ;
end
if (nnz (Zeros) > 0)
Problem.Zeros = Zeros ;
end
% add the notes after A and Zeros
if (~isempty (notes))
Problem.notes = notes ;
end
namelen = length (name) ;
%---------------------------------------------------------------------------
% read b, x, aux (incl. any aux.cell sequences), stored as separate files
%---------------------------------------------------------------------------
for k = 1:length(d)
% get the next filename in the directory
file = d(k).name ;
fullfilename = [directory filesep file] ;
if (length (file) < length (name) + 1)
% unrecognized file; skip it
continue
elseif (strcmp (file, [name '.mtx']))
% skip the master file; already read in
continue
elseif (strcmp (file, [name '_b.mtx']))
% read in b as a Matrix Market file
Problem.b = mtx_read (fullfilename, RB) ;
elseif (strcmp (file, [name '_x.mtx']))
% read in x as a Matrix Market file
Problem.x = mtx_read (fullfilename, RB) ;
elseif (strcmp (file, [name '_b.rb']))
% read in b as a Rutherford/Boeing file
Problem.b = RBread (fullfilename) ;
elseif (strcmp (file, [name '_x.rb']))
% read in x as a Rutherford/Boeing file
Problem.x = RBread (fullfilename) ;
elseif (strcmp (file (1:length(name)+1), [name '_']))
% read in an aux component, in the form name_whatever.mtx
thedot = find (file == '.', 1, 'last') ;
ext = file (thedot:end) ;
if (strcmp (ext, '.txt'))
% get a txt file
% first, determine the longest line in the file
f = fopen (fullfilename) ;
if (f < 0)
error (['cannot open ' fullfilename]) ;
end
len = 0 ;
nline = 0 ;
while (1)
s = fgetl (f) ;
if (~ischar (s))
break
end
len = max (len, length (s)) ;
nline = nline + 1 ;
end
fclose (f) ;
% next, read in the file as a char array
C = repmat (' ', nline, len) ;
f = fopen (fullfilename) ;
if (f < 0)
error (['cannot open ' fullfilename]) ;
end
i = 0 ;
while (1)
s = fgetl (f) ;
if (~ischar (s))
break
end
i = i + 1 ;
len = length (s) ;
if (len > 0)
C (i, 1:len) = s ;
end
end
fclose (f) ;
elseif (strcmp (ext, '.mtx'))
% read a full or sparse auxiliary matrix in the Matrix Market
% form, or a full auxiliary matrix in the Rutherford/Boeing form.
C = mtx_read (fullfilename, RB) ;
elseif (strcmp (ext, '.rb'))
% read in a sparse matrix, for a Rutherford/Boeing collection
C = RBread (fullfilename) ;
else
% this file is not recognized - skip it.
C = [ ] ;
end
% determine the name of the component and place it in the Problem
if (~isempty (C))
% Determine if this is part of an aux.whatever cell sequence.
% These filenames have the form name_whatever_#.mtx, where name
% is the name of the Problem, and # is a number (1 or more
% digts) greater than zero. If # = i, this becomes the
% aux.whatever{i} matrix.
suffix = file (namelen+2:thedot-1) ;
t = find (suffix == '_', 1, 'last') ;
what = suffix (1:t-1) ;
i = str2num (suffix (t+1:end)) ; %#ok
if (~isempty (i) && i > 0 && ~isempty (what))
% this is part of aux.whatever{i} cell array
Problem.aux.(what) {i,1} = C ;
elseif (~isempty (suffix))
% this is not a cell, simply an aux.whatever matrix
Problem.aux.(suffix) = C ;
end
end
end
end
%---------------------------------------------------------------------------
% delete the uncompressed version of the tar file
%---------------------------------------------------------------------------
if (is_tar)
rmdir (tmpdir, 's') ;
end
catch
%---------------------------------------------------------------------------
% catch the error, delete the temp directory, and rethrow the error
%---------------------------------------------------------------------------
try
if (is_tar)
rmdir (tmpdir, 's') ;
end
catch
end
rethrow (lasterror) ;
end
%-------------------------------------------------------------------------------
% get_header: get the header of the master file (Group/name/name.txt or .mtx)
%-------------------------------------------------------------------------------
function [Problem, notes, RB] = get_header (masterfile)
% Get the name, title, id, kind, date, author, editor and notes from the master
% file. The name, title, and id are required. They appear as structured
% comments in the Matrix Market file (masterfile.mtx) or in the text file for
% a problem in Rutherford/Boeing format (masterfile.txt). RB is returned as
% 1 if the problem is in Rutherford/Boeing format, 0 otherwise.
% first assume it's in Matrix Market format
f = fopen ([masterfile '.mtx'], 'r') ;
if (f < 0)
% oops, that failed. This must be a problem in Rutherford/Boeing format
RB = 1 ;
f = fopen ([masterfile '.txt'], 'r') ;
if (f < 0)
% oops again, this is not a valid problem in the UF Sparse collection
error (['invalid problem: ' masterfile]) ;
end
else
% we found the Matrix Market file
RB = 0 ;
end
Problem = [ ] ;
notes = [ ] ;
while (1)
% get the next line
s = fgetl (f) ;
if (~ischar (s) || length (s) < 3 || s (1) ~= '%')
% end of file or end of leading comments ... no notes found
fclose (f) ;
[Problem notes] = valid_problem (Problem, [ ]) ;
return ;
end
% remove the leading '% ' and get the first token
s = s (3:end) ;
[t r] = strtok (s) ;
% parse the line
if (strcmp (t, 'name:'))
% get the Problem.name. It must be of the form Group/Name.
Problem.name = strtrim (r) ;
if (length (find (Problem.name == '/')) ~= 1)
fclose (f) ;
error (['invalid problem name ' Problem.name]) ;
end
elseif (s (1) == '[')
% get the Problem.title
k = find (s == ']', 1, 'last') ;
if (isempty (k))
fclose (f) ;
error ('invalid problem title') ;
end
Problem.title = s (2:k-1) ;
elseif (strcmp (t, 'id:'))
% get the Problem.id
Problem.id = str2num (r) ; %#ok
if (isempty (Problem.id) || Problem.id < 0)
fclose (f) ;
error ('invalid problem id') ;
end
elseif (strcmp (t, 'kind:'))
% get the Problem.kind
Problem.kind = strtrim (r) ;
elseif (strcmp (t, 'date:'))
% get the Problem.date
Problem.date = strtrim (r) ;
elseif (strcmp (t, 'author:'))
% get the Problem.author
Problem.author = strtrim (r) ;
elseif (strcmp (t, 'ed:'))
% get the Problem.ed
Problem.ed = strtrim (r) ;
elseif (strcmp (t, 'notes:'))
% get the notes, which always appear last
k = 0 ;
notes = [ ] ;
while (1)
% get the next line
s = fgetl (f) ;
if (~ischar (s) || length (s) < 2 || ~strcmp (s (1:2), '% '))
% end of file or end of notes ... convert notes to char array
fclose (f) ;
[Problem notes] = valid_problem (Problem, notes) ;
return ;
end
% add the line to the notes
k = k + 1 ;
notes {k} = s ; %#ok
end
end
end
%-------------------------------------------------------------------------------
% valid_problem: determine if a problem is valid, and finalizes the notes
%-------------------------------------------------------------------------------
function [Problem, notes] = valid_problem (Problem, notes)
% make sure the required fields (name, title, id, date, author, ed) are present.
% Convert notes to char, and strip off the leading '% ', inserted when the notes
% were printed in the Matrix Market file.
if (~isfield (Problem, 'name') || ~isfield (Problem, 'title') || ...
~isfield (Problem, 'id') || ~isfield (Problem, 'date') || ...
~isfield (Problem, 'author') || ~isfield (Problem, 'ed') || ...
~isfield (Problem, 'kind'))
error ('invalid Problem mfile') ;
end
if (~isempty (notes))
notes = char (notes) ;
notes = notes (:, 3:end) ;
end
%-------------------------------------------------------------------------------
% mtx_read: read a *.mtx file
%-------------------------------------------------------------------------------
% In the Rutherford/Boeing form, a *.mtx file is used only for full matrices,
% using a tiny subset of the Matrix Market format. In the Matrix Market form,
% the *.mtx is used for all b, x, and aux matrices (both full and sparse).
function C = mtx_read (file, RB)
if (~RB)
% Get a Matrix Market file, using full Matrix Market features.
C = mread (file, 1) ;
else
% mread is not installed. The RB format uses a tiny subset of the Matrix
% Market format for full matrices: just the one header line, and no comment
% or blank lines permitted. Allowable header lines are:
% %%MatrixMarket matrix array real general
% %%MatrixMarket matrix array complex general
% This tiny subset can be read by UFfull_read.
C = UFfull_read (file) ;
end
|
github
|
lcnbeapp/beapp-master
|
UFwrite.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/SuiteSparse/UFcollection/UFwrite.m
| 17,757 |
utf_8
|
c4256659f09dc533119bcaa94146d846
|
function UFwrite (Problem, Master, arg3, arg4)
%UFWRITE write a Problem in Matrix Market or Rutherford/Boeing format
% containing a set of text files in either Matrix Market or Rutherford/Boeing
% format. The Problem can be read from the files back into MATLAB via UFread.
% See http://www.cise.ufl.edu/research/sparse/matrices for the UF Sparse
% Matrix Collection home page. The Problem directory is optionally compressed
% via tar and gzip. Arguments 3 and 4, below, are optional and can appear in
% any order.
%
% UFwrite (Problem) % Matrix Market format, no tar, use current dir.
%
% The following usages write the Problem into the Master/Group/Name directory,
% where Problem.name = 'Group/Name' is given in the Problem.
%
% UFwrite (Problem, Master) % Matrix Market, no tar
% UFwrite (Problem, Master, 'MM') % ditto
% UFwrite (Problem, Master, 'RB') % Rutherford/Boeing, no tar
% UFwrite (Problem, Master, 'tar') % Matrix Market, with tar
% UFwrite (Problem, Master, 'MM', 'tar') % ditto
% UFwrite (Problem, Master, 'RB', 'tar') % Rutherford/Boeing, with tar
%
% Problem is a struct, in the UF Sparse Matrix format (see below). Master is
% the top-level directory in which directory containing the problem will be
% placed. Master defaults to the current working directory if not present (an
% empty string also means the current directory will be used).
%
% The following fields are always present in a UF Sparse Matrix Problem:
%
% name the group directory and problem name (i.e. 'HB/arc130')
%
% title short descriptive title
%
% A an m-by-n sparse matrix, real or complex
%
% id an integer in the range of 1 to the # of problems in the
% collection. Identical to the line number of
% http://www.cise.ufl.edu/research/sparse/mat/UF_Listing.txt
% containing the Problem.name. New matrices are always added at
% the end of this list.
%
% date a string containing the date the matrix was created, or added
% to the collection if the creating date is unknown (but is likely
% close to the creation date); empty string otherwise.
%
% author a string containing the name of the author; the computational
% scientist who created the matrix from his or her application.
% Empty string, or "author unknown" if unknown.
%
% ed a string containing the name of the editor/collector; the person
% who acquired the matrix from the author, for inclusion in this
% (or other) sparse matrix / graph collections.
%
% kind a string (i.e. 'directed graph') describing the type of problem;
% a list of words from a well-defined set (see the UF Sparse
% Matrix Collection home page for a description, or
% http://www.cise.ufl.edu/research/sparse/matrices/kind.html).
%
% A Problem struct may also have the following optional fields.
%
% Zeros pattern of explicit zero entries in A as a binary m-by-n matrix.
% These entries are provided by the matrix submittor with zero
% numerical value. MATLAB drops explicit zero entries from any
% sparse matrix. These entries can be important to the problem,
% if (for example) the matrix is first in a series of matrices
% arising in a nonlinear solver (those entries will become nonzero
% later). These entries are thus kept in the binary matrix Zeros.
%
% b right-hand-side, any size, real or complex, full or sparse
%
% x supposed solution to A*x=b, any size, real or complex, full or
% sparse
%
% notes a character array with notes about the problem.
%
% aux a struct, containing auxiliary information. The contents of
% this struct are problem dependent, but its fields must not
% end with '_[0-9]*' where [0-9]* means zero or more digits, nor
% can there be an aux.b or aux.x matrix (there can be aux.b
% and aux.x cells). The aux struct must not include any structs,
% just matrices and cell arrays of matrices. The matrices in aux
% can be full or sparse, and real, complex, or char.
%
% -------------------------------------------------
% for Problems written in Rutherford/Boeing format:
% -------------------------------------------------
%
% The Problem.name (including the full 'Group/Name'), date, author, and editor
% define the Rutherford/Boeing title (first line of the file), followed by a
% '|' in the 72nd column, and then up to 8 characters of the key. If the key
% is an empty string, the Problem.id is used as the key. The A and Zeros
% matrices are merged and written to this file. The full name, title, id,
% kind, date, author, editor, and notes are written to a file of the same name
% as the primary file, but with a .txt extension.
%
% Additional Rutherford/Boeing files are created for b, x, and each sparse
% matrix in aux. Full arrays are written using a tiny subset of the Matrix
% Market format. The first line of the file is the header, either of:
%
% %%MatrixMarket array real general
% %%MatrixMarket array complex general
%
% The 2nd row contains the # of rows and columns, and subsequent lines contain
% one matrix entry each (two values if complex) listed in column-major order.
% No comments or blank lines are permitted. The header is ignored when the
% matrix is read back in by UFread; the real/complex case is determined by how
% many entries appear on each line. You can of course read these files with
% mread, the Matrix Market reader. The header is added only to make it easier
% for functions *other* than UFread to read and understand the data (the file
% can be read in by mread, for example, but mread is not required). Thus, a
% complete Rutherord/Boeing directory can be read/written via UFread/UFwrite,
% without requiring the installation of mread/mwrite (in CHOLMOD), or
% mmread/mmread (M-file functions from NIST that read/write a Matrix Market
% file).
%
% ---------------------------------------------
% for Problems written in Matrix Market format:
% ---------------------------------------------
%
% The name, title, A, id, Zeros, kind, date, author, editor, and notes fields of
% the Problem are written to the primary Matrix Market file, with a .mtx
% extension. Additional Matrix Market files are created for b (as name_b),
% x (as name_x), and each sparse or full matrix in aux.
%
% -----------------
% for both formats:
% -----------------
%
% A matrix Problem.aux.whatever is written out as name_whatever.xxx, without
% the 'aux' part. If Problem.aux.whatever is a char array, it is written as
% the file name_whatever.txt, with one line per row of the char array (trailing
% spaces in each line are not printed). If aux.whatever is a cell array, each
% entry aux.whatever{i} is written as the file name_whatever_<i>.xxx
% (name_whatever_1.mtx, name_whatever_2.mtx, etc). All files are placed in the
% single directory, given by the Problem.name (Group/Name, or 'HB/arc130' for
% example). Each directory can only hold one MATLAB Problem struct of the UF
% Sparse Matrix Collection.
%
% Example:
%
% Problem = UFget ('HB/arc130') % get the HB/arc130 MATLAB Problem
% UFwrite (Problem) ; % write a MM version in current directory
% UFwrite (Problem, 'MM') ; % write a MM version in MM/HB/arc130
% UFwrite (Problem, '', 'RB') ; % write a RB version in current directory
%
% See also mwrite, mread, RBwrite, RBread, UFread, UFget, tar
% Optionally uses the CHOLMOD mwrite mexFunction, for writing Problems in
% Matrix Market format.
% Copyright 2006-2007, Timothy A. Davis, University of Florida.
%-------------------------------------------------------------------------------
% check inputs
%-------------------------------------------------------------------------------
if (nargin < 2)
% place the result in the current directory
Master = '' ;
end
if (nargin < 3)
arg3 = '' ;
end
if (nargin < 4)
arg4 = '' ;
end
arg3 = lower (arg3) ;
arg4 = lower (arg4) ;
do_tar = (strcmp (arg3, 'tar') | strcmp (arg4, 'tar')) ;
RB = (strcmp (arg3, 'rb') | strcmp (arg4, 'rb')) ;
Master = regexprep (Master, '[\/\\]', filesep) ;
if (~isempty (Master) && Master (end) ~= filesep)
Master = [Master filesep] ;
end
%-------------------------------------------------------------------------------
% create the problem directory. Do not report any errors
%-------------------------------------------------------------------------------
t = find (Problem.name == '/') ;
group = Problem.name (1:t-1) ;
name = Problem.name (t+1:end) ;
groupdir = [Master group] ;
probdir = [groupdir filesep name] ;
probname = [probdir filesep name] ;
s = warning ('query', 'MATLAB:MKDIR:DirectoryExists') ; % get current state
warning ('off', 'MATLAB:MKDIR:DirectoryExists') ;
mkdir (groupdir) ;
mkdir (probdir) ;
warning (s) ; % restore state
%-------------------------------------------------------------------------------
% write the name, title, id, kind, date, author, ed, list of fields, and notes
%-------------------------------------------------------------------------------
cfile = [probname '.txt'] ;
if (RB)
prefix = '%' ;
else
prefix = '' ;
end
cf = fopen (cfile, 'w') ;
print_header (cf, prefix, Problem.name) ;
fprintf (cf, '%s [%s]\n', prefix, Problem.title) ;
fprintf (cf, '%s id: %d\n', prefix, Problem.id) ;
fprintf (cf, '%s date: %s\n', prefix, Problem.date) ;
fprintf (cf, '%s author: %s\n', prefix, Problem.author) ;
fprintf (cf, '%s ed: %s\n', prefix, Problem.ed) ;
fprintf (cf, '%s fields:', prefix) ;
s = fields (Problem) ;
for k = 1:length(s)
fprintf (cf, ' %s', s {k}) ;
end
fprintf (cf, '\n') ;
if (isfield (Problem, 'aux'))
aux = Problem.aux ;
fprintf (cf, '%s aux:', prefix) ;
auxfields = fields (aux) ;
for k = 1:length(auxfields)
fprintf (cf, ' %s', auxfields {k}) ;
end
fprintf (cf, '\n') ;
else
auxfields = { } ;
end
fprintf (cf, '%s kind: %s\n', prefix, Problem.kind) ;
print_separator (cf, prefix) ;
if (isfield (Problem, 'notes'))
fprintf (cf, '%s notes:\n', prefix) ;
for k = 1:size (Problem.notes,1)
fprintf (cf, '%s %s\n', prefix, Problem.notes (k,:)) ;
end
print_separator (cf, prefix) ;
end
fclose(cf) ;
%-------------------------------------------------------------------------------
% write out the A and Z matrices to the RB or MM primary file
%-------------------------------------------------------------------------------
A = Problem.A ;
[m n] = size (A) ;
if (~issparse (A) || m == 0 || n == 0)
error ('A must be sparse and non-empty') ;
end
if (isfield (Problem, 'Zeros'))
Z = Problem.Zeros ;
if (any (size (A) ~= size (Z)) || ~isreal (Z) || ~issparse (Z))
error ('Zeros must have same size as A, and be sparse and real') ;
end
else
Z = [ ] ;
end
% use the Problem.id number as the RB key
key = sprintf ('%d', Problem.id) ;
if (RB)
% write the files in Rutherford/Boeing form
ptitle = [Problem.name '; ' Problem.date '; ' etal(Problem.author)] ;
ptitle = [ptitle '; ed: ' etal(Problem.ed)] ;
% note that b, g, and x are NOT written to the RB file
RBwrite ([probname '.rb'], A, Z, ptitle, key) ;
else
% write the files in Matrix Market form
mwrite ([probname '.mtx'], A, Z, cfile) ;
delete (cfile) ; % the comments file has been included in the .mtx file.
end
%-------------------------------------------------------------------------------
% write out the b and x matrices as separate files
%-------------------------------------------------------------------------------
if (isfield (Problem, 'b'))
write_component (probname, Problem.name, 'b', Problem.b, cfile, ...
prefix, RB, [key 'b']) ;
end
if (isfield (Problem, 'x'))
write_component (probname, Problem.name, 'x', Problem.x, cfile, ...
prefix, RB, [key 'x']) ;
end
%-------------------------------------------------------------------------------
% write out each component of aux, each in a separate file
%-------------------------------------------------------------------------------
for k = 1:length(auxfields)
what = auxfields {k} ;
X = aux.(what) ;
if (~iscell (X) && (strcmp (what, 'b') || strcmp (what, 'x')))
% aux.b or aux.x would get written out with the same filename as the
% Problem.b and Problem.x matrices, and read back in by UFread as
% Problem.b and Problem.x instead of aux.b and aux.x.
error (['invalid aux component: ' what]) ;
end
if (regexp (what, '_[0-9]*\>'))
% aux.whatever_42 would be written as the file name_whatever_42, which
% would be intrepretted as aux.whatever{42} when read back in by UFread.
error (['invalid aux component: ' what]) ;
end
if (iscell (X))
len = length (X) ;
for i = 1:len
% this problem includes a sequence of matrices in the new
% format (kind = 'sequence', and Problem.id > 1377).
write_component (probname, Problem.name, ...
sprintf (fmt (i, len), what, i) , X{i}, cfile, ...
prefix, RB, [key what(1) sprintf('%d', i)]) ;
end
else
% This is a non-cell component of aux. For an LP problem, this might
% be c, lo, or hi. For an Oberwolfach model reduction problem, this
% might be M, C, or E. For a graph in the Pajek collection, it could
% be a vector 'year', with the publication of each article in the graph.
% The possibilities are endless, and problem dependent. Adding new
% components to aux can be done without modifying UFread or UFwrite.
write_component (probname, Problem.name, what, X, cfile, ...
prefix, RB, [key what]) ;
end
end
%-------------------------------------------------------------------------------
% tar up the result, if requested
%-------------------------------------------------------------------------------
if (do_tar)
try
tar ([probdir '.tar.gz'], probdir) ;
rmdir (probdir, 's') ;
catch
warning ('SuiteSparse:UFwrite', ...
'unable to create tar file; directly left uncompressed') ;
end
end
%-------------------------------------------------------------------------------
% fmt
%-------------------------------------------------------------------------------
function s = fmt (i, len)
% fmt: determine the format to use for the name of component in a aux.cell array
if (len < 10)
s = '%s_%d' ; % x1 through x9
elseif (len < 100)
if (i < 10)
s = '%s_0%d' ; % x01 through x09
else
s = '%s_%d' ; % x10 through x99
end
else
if (i < 10)
s = '%s_00%d' ; % x001 through x009
elseif (i < 100)
s = '%s_0%d' ; % x010 through x099
else
s = '%s_%d' ; % x100 through x999
end
end
%-------------------------------------------------------------------------------
% write_component
%-------------------------------------------------------------------------------
function write_component (probname, name, what, X, cfile, prefix, RB, key)
% write_component: write out a single component of the Problem to a file
if (isempty (X))
% empty matrices (one or more zero dimensions) are not supported
error (['invalid component: ' what ' (cannot be empty)']) ;
elseif (ischar (X))
% Write out a char array as a text file. Remove trailing spaces from the
% strings. Keep the leading spaces; they might be significant.
ff = fopen ([probname '_' what '.txt'], 'w') ;
for i = 1:size (X,1)
fprintf (ff, '%s\n', deblank (X (i,:))) ;
end
fclose (ff) ;
elseif (RB)
% write out a full or sparse matrix in Rutherford/Boeing format
if (issparse (X))
% write out the component as a Rutherford/Boeing sparse matrix
RBwrite ([probname '_' what '.rb'], X, [ ], [name '_' what], key) ;
else
% Write out a full matrix in column oriented form,
% using a tiny subset of the Matrix Market format for full matrices.
UFfull_write ([probname '_' what '.mtx'], X) ;
end
else
% write out the component in Matrix Market format (full or sparse)
cf = fopen (cfile, 'w') ;
print_header (cf, prefix, name, what) ;
print_separator (cf, prefix) ;
fclose(cf) ;
mwrite ([probname '_' what '.mtx'], X, cfile) ;
delete (cfile) ;
end
%-------------------------------------------------------------------------------
% print_separator
%-------------------------------------------------------------------------------
function print_separator (cf, prefix)
% print_separator: print a separator line in the comment file
fprintf (cf, '%s---------------------------------------', prefix) ;
fprintf (cf, '----------------------------------------\n') ;
%-------------------------------------------------------------------------------
% print_header
%-------------------------------------------------------------------------------
function print_header (cf, prefix, name, what)
% print_header: print the header to the comment file
print_separator (cf, prefix) ;
fprintf (cf, '%s UF Sparse Matrix Collection, Tim Davis\n', prefix) ;
fprintf (cf, '%s http://www.cise.ufl.edu/research/sparse/matrices/%s\n', ...
prefix, name) ;
fprintf (cf, '%s name: %s', prefix, name) ;
if (nargin > 3)
fprintf (cf, ' : %s matrix', what) ;
end
fprintf (cf, '\n') ;
%-------------------------------------------------------------------------------
% etal
%-------------------------------------------------------------------------------
function s = etal(name)
% etal: change a long list of authors to first author 'et al.'
t = find (name == ',') ;
if (length (t) > 1)
s = [name(1:t(1)-1) ' et al.'] ;
else
s = name ;
end
|
github
|
lcnbeapp/beapp-master
|
isLegal.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/minFunc/isLegal.m
| 111 |
utf_8
|
201b5c177a5a05ba3a3322077c1acae1
|
function [legal] = isLegal(v)
legal = sum(any(imag(v(:))))==0 & sum(isnan(v(:)))==0 & sum(isinf(v(:)))==0;
end
|
github
|
lcnbeapp/beapp-master
|
WolfeLineSearch.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/minFunc/WolfeLineSearch.m
| 11,132 |
utf_8
|
bbe46a7fa9b05110d6c9f2ecba335fd1
|
function [t,f_new,g_new,funEvals,H] = WolfeLineSearch(...
x,t,d,f,g,gtd,c1,c2,LS,maxLS,tolX,debug,doPlot,saveHessianComp,funObj,varargin)
%
% Bracketing Line Search to Satisfy Wolfe Conditions
%
% Inputs:
% x: starting location
% t: initial step size
% d: descent direction
% f: function value at starting location
% g: gradient at starting location
% gtd: directional derivative at starting location
% c1: sufficient decrease parameter
% c2: curvature parameter
% debug: display debugging information
% LS: type of interpolation
% maxLS: maximum number of iterations
% tolX: minimum allowable step length
% doPlot: do a graphical display of interpolation
% funObj: objective function
% varargin: parameters of objective function
%
% Outputs:
% t: step length
% f_new: function value at x+t*d
% g_new: gradient value at x+t*d
% funEvals: number function evaluations performed by line search
% H: Hessian at initial guess (only computed if requested
% Evaluate the Objective and Gradient at the Initial Step
if nargout == 5
[f_new,g_new,H] = funObj(x + t*d,varargin{:});
else
[f_new,g_new] = funObj(x+t*d,varargin{:});
end
funEvals = 1;
gtd_new = g_new'*d;
% Bracket an Interval containing a point satisfying the
% Wolfe criteria
LSiter = 0;
t_prev = 0;
f_prev = f;
g_prev = g;
gtd_prev = gtd;
done = 0;
while LSiter < maxLS
%% Bracketing Phase
if ~isLegal(f_new) || ~isLegal(g_new)
if 0
if debug
fprintf('Extrapolated into illegal region, Bisecting\n');
end
t = (t + t_prev)/2;
if ~saveHessianComp && nargout == 5
[f_new,g_new,H] = funObj(x + t*d,varargin{:});
else
[f_new,g_new] = funObj(x + t*d,varargin{:});
end
funEvals = funEvals + 1;
gtd_new = g_new'*d;
LSiter = LSiter+1;
continue;
else
if debug
fprintf('Extrapolated into illegal region, switching to Armijo line-search\n');
end
t = (t + t_prev)/2;
% Do Armijo
if nargout == 5
[t,x_new,f_new,g_new,armijoFunEvals,H] = ArmijoBacktrack(...
x,t,d,f,f,g,gtd,c1,max(0,min(LS-2,2)),tolX,debug,doPlot,saveHessianComp,...
funObj,varargin{:});
else
[t,x_new,f_new,g_new,armijoFunEvals] = ArmijoBacktrack(...
x,t,d,f,f,g,gtd,c1,max(0,min(LS-2,2)),tolX,debug,doPlot,saveHessianComp,...
funObj,varargin{:});
end
funEvals = funEvals + armijoFunEvals;
return;
end
end
if f_new > f + c1*t*gtd || (LSiter > 1 && f_new >= f_prev)
bracket = [t_prev t];
bracketFval = [f_prev f_new];
bracketGval = [g_prev g_new];
break;
elseif abs(gtd_new) <= -c2*gtd
bracket = t;
bracketFval = f_new;
bracketGval = g_new;
done = 1;
break;
elseif gtd_new >= 0
bracket = [t_prev t];
bracketFval = [f_prev f_new];
bracketGval = [g_prev g_new];
break;
end
temp = t_prev;
t_prev = t;
minStep = t + 0.01*(t-temp);
maxStep = t*10;
if LS == 3
if debug
fprintf('Extending Braket\n');
end
t = maxStep;
elseif LS ==4
if debug
fprintf('Cubic Extrapolation\n');
end
t = polyinterp([temp f_prev gtd_prev; t f_new gtd_new],doPlot,minStep,maxStep);
else
t = mixedExtrap(temp,f_prev,gtd_prev,t,f_new,gtd_new,minStep,maxStep,debug,doPlot);
end
f_prev = f_new;
g_prev = g_new;
gtd_prev = gtd_new;
if ~saveHessianComp && nargout == 5
[f_new,g_new,H] = funObj(x + t*d,varargin{:});
else
[f_new,g_new] = funObj(x + t*d,varargin{:});
end
funEvals = funEvals + 1;
gtd_new = g_new'*d;
LSiter = LSiter+1;
end
if LSiter == maxLS
bracket = [0 t];
bracketFval = [f f_new];
bracketGval = [g g_new];
end
%% Zoom Phase
% We now either have a point satisfying the criteria, or a bracket
% surrounding a point satisfying the criteria
% Refine the bracket until we find a point satisfying the criteria
insufProgress = 0;
Tpos = 2;
LOposRemoved = 0;
while ~done && LSiter < maxLS
% Find High and Low Points in bracket
[f_LO LOpos] = min(bracketFval);
HIpos = -LOpos + 3;
% Compute new trial value
if LS == 3 || ~isLegal(bracketFval) || ~isLegal(bracketGval)
if debug
fprintf('Bisecting\n');
end
t = mean(bracket);
elseif LS == 4
if debug
fprintf('Grad-Cubic Interpolation\n');
end
t = polyinterp([bracket(1) bracketFval(1) bracketGval(:,1)'*d
bracket(2) bracketFval(2) bracketGval(:,2)'*d],doPlot);
else
% Mixed Case %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
nonTpos = -Tpos+3;
if LOposRemoved == 0
oldLOval = bracket(nonTpos);
oldLOFval = bracketFval(nonTpos);
oldLOGval = bracketGval(:,nonTpos);
end
t = mixedInterp(bracket,bracketFval,bracketGval,d,Tpos,oldLOval,oldLOFval,oldLOGval,debug,doPlot);
end
% Test that we are making sufficient progress
if min(max(bracket)-t,t-min(bracket))/(max(bracket)-min(bracket)) < 0.1
if debug
fprintf('Interpolation close to boundary');
end
if insufProgress || t>=max(bracket) || t <= min(bracket)
if debug
fprintf(', Evaluating at 0.1 away from boundary\n');
end
if abs(t-max(bracket)) < abs(t-min(bracket))
t = max(bracket)-0.1*(max(bracket)-min(bracket));
else
t = min(bracket)+0.1*(max(bracket)-min(bracket));
end
insufProgress = 0;
else
if debug
fprintf('\n');
end
insufProgress = 1;
end
else
insufProgress = 0;
end
% Evaluate new point
if ~saveHessianComp && nargout == 5
[f_new,g_new,H] = funObj(x + t*d,varargin{:});
else
[f_new,g_new] = funObj(x + t*d,varargin{:});
end
funEvals = funEvals + 1;
gtd_new = g_new'*d;
LSiter = LSiter+1;
if f_new > f + c1*t*gtd || f_new >= f_LO
% Armijo condition not satisfied or not lower than lowest
% point
bracket(HIpos) = t;
bracketFval(HIpos) = f_new;
bracketGval(:,HIpos) = g_new;
Tpos = HIpos;
else
if abs(gtd_new) <= - c2*gtd
% Wolfe conditions satisfied
done = 1;
elseif gtd_new*(bracket(HIpos)-bracket(LOpos)) >= 0
% Old HI becomes new LO
bracket(HIpos) = bracket(LOpos);
bracketFval(HIpos) = bracketFval(LOpos);
bracketGval(:,HIpos) = bracketGval(:,LOpos);
if LS == 5
if debug
fprintf('LO Pos is being removed!\n');
end
LOposRemoved = 1;
oldLOval = bracket(LOpos);
oldLOFval = bracketFval(LOpos);
oldLOGval = bracketGval(:,LOpos);
end
end
% New point becomes new LO
bracket(LOpos) = t;
bracketFval(LOpos) = f_new;
bracketGval(:,LOpos) = g_new;
Tpos = LOpos;
end
if sum(abs(bracket(1)-bracket(2))*gtd_new) < tolX
if debug
fprintf('Line Search can not make further progress\n');
end
break;
end
end
%%
if LSiter == maxLS
if debug
fprintf('Line Search Exceeded Maximum Line Search Iterations\n');
end
end
[f_LO LOpos] = min(bracketFval);
t = bracket(LOpos);
f_new = bracketFval(LOpos);
g_new = bracketGval(:,LOpos);
% Evaluate Hessian at new point
if nargout == 5 && funEvals > 1 && saveHessianComp
[f_new,g_new,H] = funObj(x + t*d,varargin{:});
funEvals = funEvals + 1;
end
end
%%
function [t] = mixedExtrap(x0,f0,g0,x1,f1,g1,minStep,maxStep,debug,doPlot);
alpha_c = polyinterp([x0 f0 g0; x1 f1 g1],doPlot,minStep,maxStep);
alpha_s = polyinterp([x0 f0 g0; x1 sqrt(-1) g1],doPlot,minStep,maxStep);
if alpha_c > minStep && abs(alpha_c - x1) < abs(alpha_s - x1)
if debug
fprintf('Cubic Extrapolation\n');
end
t = alpha_c;
else
if debug
fprintf('Secant Extrapolation\n');
end
t = alpha_s;
end
end
%%
function [t] = mixedInterp(bracket,bracketFval,bracketGval,d,Tpos,oldLOval,oldLOFval,oldLOGval,debug,doPlot);
% Mixed Case %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
nonTpos = -Tpos+3;
gtdT = bracketGval(:,Tpos)'*d;
gtdNonT = bracketGval(:,nonTpos)'*d;
oldLOgtd = oldLOGval'*d;
if bracketFval(Tpos) > oldLOFval
alpha_c = polyinterp([oldLOval oldLOFval oldLOgtd
bracket(Tpos) bracketFval(Tpos) gtdT],doPlot);
alpha_q = polyinterp([oldLOval oldLOFval oldLOgtd
bracket(Tpos) bracketFval(Tpos) sqrt(-1)],doPlot);
if abs(alpha_c - oldLOval) < abs(alpha_q - oldLOval)
if debug
fprintf('Cubic Interpolation\n');
end
t = alpha_c;
else
if debug
fprintf('Mixed Quad/Cubic Interpolation\n');
end
t = (alpha_q + alpha_c)/2;
end
elseif gtdT'*oldLOgtd < 0
alpha_c = polyinterp([oldLOval oldLOFval oldLOgtd
bracket(Tpos) bracketFval(Tpos) gtdT],doPlot);
alpha_s = polyinterp([oldLOval oldLOFval oldLOgtd
bracket(Tpos) sqrt(-1) gtdT],doPlot);
if abs(alpha_c - bracket(Tpos)) >= abs(alpha_s - bracket(Tpos))
if debug
fprintf('Cubic Interpolation\n');
end
t = alpha_c;
else
if debug
fprintf('Quad Interpolation\n');
end
t = alpha_s;
end
elseif abs(gtdT) <= abs(oldLOgtd)
alpha_c = polyinterp([oldLOval oldLOFval oldLOgtd
bracket(Tpos) bracketFval(Tpos) gtdT],...
doPlot,min(bracket),max(bracket));
alpha_s = polyinterp([oldLOval sqrt(-1) oldLOgtd
bracket(Tpos) bracketFval(Tpos) gtdT],...
doPlot,min(bracket),max(bracket));
if alpha_c > min(bracket) && alpha_c < max(bracket)
if abs(alpha_c - bracket(Tpos)) < abs(alpha_s - bracket(Tpos))
if debug
fprintf('Bounded Cubic Extrapolation\n');
end
t = alpha_c;
else
if debug
fprintf('Bounded Secant Extrapolation\n');
end
t = alpha_s;
end
else
if debug
fprintf('Bounded Secant Extrapolation\n');
end
t = alpha_s;
end
if bracket(Tpos) > oldLOval
t = min(bracket(Tpos) + 0.66*(bracket(nonTpos) - bracket(Tpos)),t);
else
t = max(bracket(Tpos) + 0.66*(bracket(nonTpos) - bracket(Tpos)),t);
end
else
t = polyinterp([bracket(nonTpos) bracketFval(nonTpos) gtdNonT
bracket(Tpos) bracketFval(Tpos) gtdT],doPlot);
end
end
%%
function [legal] = isLegal(v)
legal = sum(any(imag(v(:))))==0 & sum(isnan(v(:)))==0 & sum(isinf(v(:)))==0;
end
|
github
|
lcnbeapp/beapp-master
|
minFunc_processInputOptions.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/dmlt/external/minFunc/minFunc_processInputOptions.m
| 3,252 |
utf_8
|
72f66f58081120213a4f63f1bb4f42df
|
function [verbose,verboseI,debug,doPlot,maxFunEvals,maxIter,tolFun,tolX,method,...
corrections,c1,c2,LS_init,LS,cgSolve,SR1,cgUpdate,initialHessType,...
HessianModify,Fref,useComplex,numDiff,LS_saveHessianComp,...
DerivativeCheck,Damped,HvFunc,bbType,cycle,boundStepLength,...
HessianIter,outputFcn] = ...
minFunc_processInputOptions(o)
% Constants
SD = 0;
CSD = 1;
CG = 2;
BB = 3;
LBFGS = 4;
BFGS = 5;
NEWTON0 = 6;
NEWTON = 7;
TENSOR = 8;
verbose = 1;
verboseI= 1;
debug = 0;
doPlot = 0;
method = LBFGS;
cgSolve = 0;
o = toUpper(o);
if isfield(o,'DISPLAY')
switch(upper(o.DISPLAY))
case 0
verbose = 0;
verboseI = 0;
case 'FINAL'
verboseI = 0;
case 'OFF'
verbose = 0;
verboseI = 0;
case 'NONE'
verbose = 0;
verboseI = 0;
case 'FULL'
debug = 1;
case 'EXCESSIVE'
debug = 1;
doPlot = 1;
end
end
if isfield(o,'METHOD')
m = upper(o.METHOD);
switch(m)
case 'TENSOR'
method = TENSOR;
case 'NEWTON'
method = NEWTON;
case 'NEWTON0LBFGS'
method = NEWTON0;
cgSolve = 1;
case 'NEWTON0'
method = NEWTON0;
case 'BFGS'
method = BFGS;
case 'LBFGS'
method = LBFGS;
case 'BB'
method = BB;
case 'CG'
method = CG;
case 'CSD'
method = CSD;
case 'SD'
method = SD;
end
end
c2 = 0.9;
LS_init = 0;
LS = 4;
Fref = 1;
Damped = 0;
% Method Specific Default Options if different than the above
if method == BB
LS = 2;
Fref = 10;
elseif method == CG
c2 = 0.2;
LS_init = 1;
elseif method == CSD
c2 = 0.2;
Fref = 10;
elseif method == SD
LS = 2;
LS_init = 1;
elseif method == BFGS
Damped = 1;
end
maxFunEvals = getOpt(o,'MAXFUNEVALS',1000);
maxIter = getOpt(o,'MAXITER',500);
tolFun = getOpt(o,'TOLFUN',1e-5);
tolX = getOpt(o,'TOLX',1e-9);
corrections = getOpt(o,'CORR',100);
c1 = getOpt(o,'C1',1e-4);
c2 = getOpt(o,'C2',c2);
LS_init = getOpt(o,'LS_INIT',LS_init);
LS = getOpt(o,'LS',LS);
cgSolve = getOpt(o,'CGSOLVE',cgSolve);
SR1 = getOpt(o,'SR1',0);
cgUpdate = getOpt(o,'CGUPDATE',1);
initialHessType = getOpt(o,'INITIALHESSTYPE',1);
HessianModify = getOpt(o,'HESSIANMODIFY',0);
Fref = getOpt(o,'FREF',Fref);
useComplex = getOpt(o,'USECOMPLEX',0);
numDiff = getOpt(o,'NUMDIFF',0);
LS_saveHessianComp = getOpt(o,'LS_SAVEHESSIANCOMP',1);
DerivativeCheck = getOpt(o,'DERIVATIVECHECK',0);
Damped = getOpt(o,'DAMPED',Damped);
HvFunc = getOpt(o,'HVFUNC',[]);
bbType = getOpt(o,'BBTYPE',0);
cycle = getOpt(o,'CYCLE',3);
boundStepLength = getOpt(o,'BOUNDSTEPLENGTH',0);
HessianIter = getOpt(o,'HESSIANITER',1);
outputFcn = getOpt(o,'OUTPUTFCN',[]);
end
function [v] = getOpt(options,opt,default)
if isfield(options,opt)
if ~isempty(getfield(options,opt))
v = getfield(options,opt);
else
v = default;
end
else
v = default;
end
end
function [o] = toUpper(o)
if ~isempty(o)
fn = fieldnames(o);
for i = 1:length(fn)
o = setfield(o,upper(fn{i}),getfield(o,fn{i}));
end
end
end
|
github
|
lcnbeapp/beapp-master
|
betapdf.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/betapdf.m
| 4,091 |
utf_8
|
22972826a645e022df6b35f62bc7c557
|
% Copyright (C) 2012 Rik Wehbring
% Copyright (C) 1995-2012 Kurt Hornik
% Copyright (C) 2010 Christos Dimitrakakis
%
% This file is part of Octave.
%
% Octave is free software; you can redistribute it and/or modify it
% under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 3 of the License, or (at
% your option) any later version.
%
% Octave is distributed in the hope that it will be useful, but
% WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Octave; see the file COPYING. If not, see
% <http://www.gnu.org/licenses/>.
% -*- texinfo -*-
% @deftypefn {Function File} {} betapdf (@var{x}, @var{a}, @var{b})
% For each element of @var{x}, compute the probability density function (PDF)
% at @var{x} of the Beta distribution with parameters @var{a} and @var{b}.
% @end deftypefn
% Author: KH <[email protected]>, CD <[email protected]>
% Description: PDF of the Beta distribution
function pdf = betapdf (x, a, b)
if (nargin ~= 3)
print_usage ();
end
if (~isscalar (a) || ~isscalar (b))
[retval, x, a, b] = common_size (x, a, b);
if (retval > 0)
error ('betapdf: X, A, and B must be of common size or scalars');
end
end
if (iscomplex (x) || iscomplex (a) || iscomplex (b))
error ('betapdf: X, A, and B must not be complex');
end
if (isa (x, 'single') || isa (a, 'single') || isa (b, 'single'));
pdf = zeros (size (x), 'single');
else
pdf = zeros (size (x));
end
k = ~(a > 0) | ~(b > 0) | isnan (x);
pdf(k) = NaN;
k = (x > 0) & (x < 1) & (a > 0) & (b > 0) & ((a ~= 1) | (b ~= 1));
if (isscalar (a) && isscalar (b))
pdf(k) = exp ((a - 1) * log (x(k))...
+ (b - 1) * log (1 - x(k))...
+ lgamma (a + b) - lgamma (a) - lgamma (b));
else
pdf(k) = exp ((a(k) - 1) .* log (x(k))...
+ (b(k) - 1) .* log (1 - x(k))...
+ lgamma (a(k) + b(k)) - lgamma (a(k)) - lgamma (b(k)));
end
% Most important special cases when the density is finite.
k = (x == 0) & (a == 1) & (b > 0) & (b ~= 1);
if (isscalar (a) && isscalar (b))
pdf(k) = exp (lgamma (a + b) - lgamma (a) - lgamma (b));
else
pdf(k) = exp (lgamma (a(k) + b(k)) - lgamma (a(k)) - lgamma (b(k)));
end
k = (x == 1) & (b == 1) & (a > 0) & (a ~= 1);
if (isscalar (a) && isscalar (b))
pdf(k) = exp (lgamma (a + b) - lgamma (a) - lgamma (b));
else
pdf(k) = exp (lgamma (a(k) + b(k)) - lgamma (a(k)) - lgamma (b(k)));
end
k = (x >= 0) & (x <= 1) & (a == 1) & (b == 1);
pdf(k) = 1;
% Other special case when the density at the boundary is infinite.
k = (x == 0) & (a < 1);
pdf(k) = Inf;
k = (x == 1) & (b < 1);
pdf(k) = Inf;
end
%~shared x,y
%~ x = [-1 0 0.5 1 2];
%~ y = [0 2 1 0 0];
%~assert(betapdf (x, ones(1,5), 2*ones(1,5)), y);
%~assert(betapdf (x, 1, 2*ones(1,5)), y);
%~assert(betapdf (x, ones(1,5), 2), y);
%~assert(betapdf (x, [0 NaN 1 1 1], 2), [NaN NaN y(3:5)]);
%~assert(betapdf (x, 1, 2*[0 NaN 1 1 1]), [NaN NaN y(3:5)]);
%~assert(betapdf ([x, NaN], 1, 2), [y, NaN]);
%% Test class of input preserved
%~assert(betapdf (single([x, NaN]), 1, 2), single([y, NaN]));
%~assert(betapdf ([x, NaN], single(1), 2), single([y, NaN]));
%~assert(betapdf ([x, NaN], 1, single(2)), single([y, NaN]));
%% Beta (1/2,1/2) == arcsine distribution
%~test
%~ x = rand (10,1);
%~ y = 1./(pi * sqrt (x.*(1-x)));
%~ assert(betapdf (x, 1/2, 1/2), y, 50*eps);
%% Test large input values to betapdf
%~assert (betapdf(0.5, 1000, 1000), 35.678, 1e-3)
%% Test input validation
%~error betapdf ()
%~error betapdf (1)
%~error betapdf (1,2)
%~error betapdf (1,2,3,4)
%~error betapdf (ones(3),ones(2),ones(2))
%~error betapdf (ones(2),ones(3),ones(2))
%~error betapdf (ones(2),ones(2),ones(3))
%~error betapdf (i, 2, 2)
%~error betapdf (2, i, 2)
%~error betapdf (2, 2, i)
|
github
|
lcnbeapp/beapp-master
|
nansum.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/nansum.m
| 194 |
utf_8
|
d26aaa8d5695a2cd15c51e72c3d4a186
|
% NANSUM provides a replacement for MATLAB's nanmean.
%
% For usage see SUM.
function y = nansum(x, dim)
x(isnan(x)) = 0;
if nargin==1
y = sum(x);
else
y = sum(x,dim);
end
end % function
|
github
|
lcnbeapp/beapp-master
|
betacdf.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/betacdf.m
| 2,801 |
utf_8
|
e1af8d1223714ddeef6f5d873bf36c57
|
% Copyright (C) 2012 Rik Wehbring
% Copyright (C) 1995-2012 Kurt Hornik
%
% This file is part of Octave.
%
% Octave is free software; you can redistribute it and/or modify it
% under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 3 of the License, or (at
% your option) any later version.
%
% Octave is distributed in the hope that it will be useful, but
% WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Octave; see the file COPYING. If not, see
% <http://www.gnu.org/licenses/>.
% -*- texinfo -*-
% @deftypefn {Function File} {} betacdf (@var{x}, @var{a}, @var{b})
% For each element of @var{x}, compute the cumulative distribution function
% (CDF) at @var{x} of the Beta distribution with parameters @var{a} and
% @var{b}.
% @end deftypefn
% Author: KH <[email protected]>
% Description: CDF of the Beta distribution
function cdf = betacdf (x, a, b)
if (nargin ~= 3)
print_usage ();
end
if (~isscalar (a) || ~isscalar (b))
[retval, x, a, b] = common_size (x, a, b);
if (retval > 0)
error ('betacdf: X, A, and B must be of common size or scalars');
end
end
if (iscomplex (x) || iscomplex (a) || iscomplex (b))
error ('betacdf: X, A, and B must not be complex');
end
if (isa (x, 'single') || isa (a, 'single') || isa (b, 'single'))
cdf = zeros (size (x), 'single');
else
cdf = zeros (size (x));
end
k = isnan (x) | ~(a > 0) | ~(b > 0);
cdf(k) = NaN;
k = (x >= 1) & (a > 0) & (b > 0);
cdf(k) = 1;
k = (x > 0) & (x < 1) & (a > 0) & (b > 0);
if (isscalar (a) && isscalar (b))
cdf(k) = betainc (x(k), a, b);
else
cdf(k) = betainc (x(k), a(k), b(k));
end
end
%~shared x,y
%~ x = [-1 0 0.5 1 2];
%~ y = [0 0 0.75 1 1];
%~assert(betacdf (x, ones(1,5), 2*ones(1,5)), y);
%~assert(betacdf (x, 1, 2*ones(1,5)), y);
%~assert(betacdf (x, ones(1,5), 2), y);
%~assert(betacdf (x, [0 1 NaN 1 1], 2), [NaN 0 NaN 1 1]);
%~assert(betacdf (x, 1, 2*[0 1 NaN 1 1]), [NaN 0 NaN 1 1]);
%~assert(betacdf ([x(1:2) NaN x(4:5)], 1, 2), [y(1:2) NaN y(4:5)]);
%% Test class of input preserved
%~assert(betacdf ([x, NaN], 1, 2), [y, NaN]);
%~assert(betacdf (single([x, NaN]), 1, 2), single([y, NaN]));
%~assert(betacdf ([x, NaN], single(1), 2), single([y, NaN]));
%~assert(betacdf ([x, NaN], 1, single(2)), single([y, NaN]));
%% Test input validation
%~error betacdf ()
%~error betacdf (1)
%~error betacdf (1,2)
%~error betacdf (1,2,3,4)
%~error betacdf (ones(3),ones(2),ones(2))
%~error betacdf (ones(2),ones(3),ones(2))
%~error betacdf (ones(2),ones(2),ones(3))
|
github
|
lcnbeapp/beapp-master
|
nanstd.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/nanstd.m
| 248 |
utf_8
|
fa0dc38960ab4c502a685ee11ebb3328
|
% NANSTD provides a replacement for MATLAB's nanstd that is almost
% compatible.
%
% For usage see STD. Note that the three-argument call with FLAG is not
% supported.
function Y = nanstd(varargin)
Y = sqrt(nanvar(varargin{:}));
end % function
|
github
|
lcnbeapp/beapp-master
|
finv.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/finv.m
| 1,923 |
utf_8
|
fd552f83d58acd399ecde455a11523d0
|
% Copyright (C) 2012 Rik Wehbring
% Copyright (C) 1995-2012 Kurt Hornik
%
% This file is part of Octave.
%
% Octave is free software; you can redistribute it and/or modify it
% under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 3 of the License, or (at
% your option) any later version.
%
% Octave is distributed in the hope that it will be useful, but
% WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Octave; see the file COPYING. If not, see
% <http://www.gnu.org/licenses/>.
% -*- texinfo -*-
% @deftypefn {Function File} {} finv (@var{x}, @var{m}, @var{n})
% For each element of @var{x}, compute the quantile (the inverse of
% the CDF) at @var{x} of the F distribution with @var{m} and @var{n}
% degrees of freedom.
% @end deftypefn
% Author: KH <[email protected]>
% Description: Quantile function of the F distribution
function inv = finv (x, m, n)
if (nargin ~= 3)
print_usage ();
end
if (~isscalar (m) || ~isscalar (n))
[retval, x, m, n] = common_size (x, m, n);
if (retval > 0)
error ('finv: X, M, and N must be of common size or scalars');
end
end
if ~(isreal (x) || isreal (m) || isreal (n))
error ('finv: X, M, and N must not be complex');
end
if (isa (x, 'single') || isa (m, 'single') || isa (n, 'single'))
inv = NaN (size (x), 'single');
else
inv = NaN (size (x));
end
k = (x == 1) & (m > 0) & (m < Inf) & (n > 0) & (n < Inf);
inv(k) = Inf;
k = (x >= 0) & (x < 1) & (m > 0) & (m < Inf) & (n > 0) & (n < Inf);
if (isscalar (m) && isscalar (n))
inv(k) = ((1 ./ betainv (1 - x(k), n/2, m/2) - 1) * n / m);
else
inv(k) = ((1 ./ betainv (1 - x(k), n(k)/2, m(k)/2) - 1)...
.* n(k) ./ m(k));
end
end
|
github
|
lcnbeapp/beapp-master
|
nanmean.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/nanmean.m
| 245 |
utf_8
|
8ab56ab826c1e69fd89a8937baae4bf0
|
% NANMEAN provides a replacement for MATLAB's nanmean.
%
% For usage see MEAN.
function y = nanmean(x, dim)
if nargin<2
N = sum(~isnan(x));
y = nansum(x) ./ N;
else
N = sum(~isnan(x), dim);
y = nansum(x, dim) ./ N;
end
end % function
|
github
|
lcnbeapp/beapp-master
|
tinv.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/tinv.m
| 7,513 |
utf_8
|
8aca90435b847d43ade0cd829db21624
|
function x = tinv(p,v);
% TINV Inverse of Student's T cumulative distribution function (cdf).
% X=TINV(P,V) returns the inverse of Student's T cdf with V degrees
% of freedom, at the values in P.
%
% The size of X is the common size of P and V. A scalar input
% functions as a constant matrix of the same size as the other input.
%
% This is an open source function that was assembled by Eric Maris using
% open source subfunctions found on the web.
if nargin < 2,
error('Requires two input arguments.');
end
[errorcode p v] = distchck(2,p,v);
if errorcode > 0
error('Requires non-scalar arguments to match in size.');
end
% Initialize X to zero.
x=zeros(size(p));
k = find(v < 0 | v ~= round(v));
if any(k)
tmp = NaN;
x(k) = tmp(ones(size(k)));
end
k = find(v == 1);
if any(k)
x(k) = tan(pi * (p(k) - 0.5));
end
% The inverse cdf of 0 is -Inf, and the inverse cdf of 1 is Inf.
k0 = find(p == 0);
if any(k0)
tmp = Inf;
x(k0) = -tmp(ones(size(k0)));
end
k1 = find(p ==1);
if any(k1)
tmp = Inf;
x(k1) = tmp(ones(size(k1)));
end
k = find(p >= 0.5 & p < 1);
if any(k)
z = betainv(2*(1-p(k)),v(k)/2,0.5);
x(k) = sqrt(v(k) ./ z - v(k));
end
k = find(p < 0.5 & p > 0);
if any(k)
z = betainv(2*(p(k)),v(k)/2,0.5);
x(k) = -sqrt(v(k) ./ z - v(k));
end
%%%%%%%%%%%%%%%%%%%%%%%%%
% SUBFUNCTION distchck
%%%%%%%%%%%%%%%%%%%%%%%%%
function [errorcode,varargout] = distchck(nparms,varargin)
%DISTCHCK Checks the argument list for the probability functions.
errorcode = 0;
varargout = varargin;
if nparms == 1
return;
end
% Get size of each input, check for scalars, copy to output
isscalar = (cellfun('prodofsize',varargin) == 1);
% Done if all inputs are scalars. Otherwise fetch their common size.
if (all(isscalar)), return; end
n = nparms;
for j=1:n
sz{j} = size(varargin{j});
end
t = sz(~isscalar);
size1 = t{1};
% Scalars receive this size. Other arrays must have the proper size.
for j=1:n
sizej = sz{j};
if (isscalar(j))
t = zeros(size1);
t(:) = varargin{j};
varargout{j} = t;
elseif (~isequal(sizej,size1))
errorcode = 1;
return;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%
% SUBFUNCTION betainv
%%%%%%%%%%%%%%%%%%%%%%%%%%%
function x = betainv(p,a,b);
%BETAINV Inverse of the beta cumulative distribution function (cdf).
% X = BETAINV(P,A,B) returns the inverse of the beta cdf with
% parameters A and B at the values in P.
%
% The size of X is the common size of the input arguments. A scalar input
% functions as a constant matrix of the same size as the other inputs.
%
% BETAINV uses Newton's method to converge to the solution.
% Reference:
% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical
% Functions", Government Printing Office, 1964.
% B.A. Jones 1-12-93
if nargin < 3,
error('Requires three input arguments.');
end
[errorcode p a b] = distchck(3,p,a,b);
if errorcode > 0
error('Requires non-scalar arguments to match in size.');
end
% Initialize x to zero.
x = zeros(size(p));
% Return NaN if the arguments are outside their respective limits.
k = find(p < 0 | p > 1 | a <= 0 | b <= 0);
if any(k),
tmp = NaN;
x(k) = tmp(ones(size(k)));
end
% The inverse cdf of 0 is 0, and the inverse cdf of 1 is 1.
k0 = find(p == 0 & a > 0 & b > 0);
if any(k0),
x(k0) = zeros(size(k0));
end
k1 = find(p==1);
if any(k1),
x(k1) = ones(size(k1));
end
% Newton's Method.
% Permit no more than count_limit interations.
count_limit = 100;
count = 0;
k = find(p > 0 & p < 1 & a > 0 & b > 0);
pk = p(k);
% Use the mean as a starting guess.
xk = a(k) ./ (a(k) + b(k));
% Move starting values away from the boundaries.
if xk == 0,
xk = sqrt(eps);
end
if xk == 1,
xk = 1 - sqrt(eps);
end
h = ones(size(pk));
crit = sqrt(eps);
% Break out of the iteration loop for the following:
% 1) The last update is very small (compared to x).
% 2) The last update is very small (compared to 100*eps).
% 3) There are more than 100 iterations. This should NEVER happen.
while(any(abs(h) > crit * abs(xk)) & max(abs(h)) > crit ...
& count < count_limit),
count = count+1;
h = (betacdf(xk,a(k),b(k)) - pk) ./ betapdf(xk,a(k),b(k));
xnew = xk - h;
% Make sure that the values stay inside the bounds.
% Initially, Newton's Method may take big steps.
ksmall = find(xnew < 0);
klarge = find(xnew > 1);
if any(ksmall) | any(klarge)
xnew(ksmall) = xk(ksmall) /10;
xnew(klarge) = 1 - (1 - xk(klarge))/10;
end
xk = xnew;
end
% Return the converged value(s).
x(k) = xk;
if count==count_limit,
fprintf('\nWarning: BETAINV did not converge.\n');
str = 'The last step was: ';
outstr = sprintf([str,'%13.8f'],h);
fprintf(outstr);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% SUBFUNCTION betapdf
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function y = betapdf(x,a,b)
%BETAPDF Beta probability density function.
% Y = BETAPDF(X,A,B) returns the beta probability density
% function with parameters A and B at the values in X.
%
% The size of Y is the common size of the input arguments. A scalar input
% functions as a constant matrix of the same size as the other inputs.
% References:
% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical
% Functions", Government Printing Office, 1964, 26.1.33.
if nargin < 3,
error('Requires three input arguments.');
end
[errorcode x a b] = distchck(3,x,a,b);
if errorcode > 0
error('Requires non-scalar arguments to match in size.');
end
% Initialize Y to zero.
y = zeros(size(x));
% Return NaN for parameter values outside their respective limits.
k1 = find(a <= 0 | b <= 0 | x < 0 | x > 1);
if any(k1)
tmp = NaN;
y(k1) = tmp(ones(size(k1)));
end
% Return Inf for x = 0 and a < 1 or x = 1 and b < 1.
% Required for non-IEEE machines.
k2 = find((x == 0 & a < 1) | (x == 1 & b < 1));
if any(k2)
tmp = Inf;
y(k2) = tmp(ones(size(k2)));
end
% Return the beta density function for valid parameters.
k = find(~(a <= 0 | b <= 0 | x <= 0 | x >= 1));
if any(k)
y(k) = x(k) .^ (a(k) - 1) .* (1 - x(k)) .^ (b(k) - 1) ./ beta(a(k),b(k));
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% SUBFUNCTION betacdf
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function p = betacdf(x,a,b);
%BETACDF Beta cumulative distribution function.
% P = BETACDF(X,A,B) returns the beta cumulative distribution
% function with parameters A and B at the values in X.
%
% The size of P is the common size of the input arguments. A scalar input
% functions as a constant matrix of the same size as the other inputs.
%
% BETAINC does the computational work.
% Reference:
% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical
% Functions", Government Printing Office, 1964, 26.5.
if nargin < 3,
error('Requires three input arguments.');
end
[errorcode x a b] = distchck(3,x,a,b);
if errorcode > 0
error('Requires non-scalar arguments to match in size.');
end
% Initialize P to 0.
p = zeros(size(x));
k1 = find(a<=0 | b<=0);
if any(k1)
tmp = NaN;
p(k1) = tmp(ones(size(k1)));
end
% If is X >= 1 the cdf of X is 1.
k2 = find(x >= 1);
if any(k2)
p(k2) = ones(size(k2));
end
k = find(x > 0 & x < 1 & a > 0 & b > 0);
if any(k)
p(k) = betainc(x(k),a(k),b(k));
end
% Make sure that round-off errors never make P greater than 1.
k = find(p > 1);
p(k) = ones(size(k));
|
github
|
lcnbeapp/beapp-master
|
betainv.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/stats/betainv.m
| 2,699 |
utf_8
|
105a6134b0eb25f2c863062d582e2040
|
% Copyright (C) 2012 Rik Wehbring
% Copyright (C) 1995-2012 Kurt Hornik
%
% This file is part of Octave.
%
% Octave is free software; you can redistribute it and/or modify it
% under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 3 of the License, or (at
% your option) any later version.
%
% Octave is distributed in the hope that it will be useful, but
% WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Octave; see the file COPYING. If not, see
% <http://www.gnu.org/licenses/>.
% -*- texinfo -*-
% @deftypefn {Function File} {} betainv (@var{x}, @var{a}, @var{b})
% For each element of @var{x}, compute the quantile (the inverse of
% the CDF) at @var{x} of the Beta distribution with parameters @var{a}
% and @var{b}.
% @end deftypefn
% Author: KH <[email protected]>
% Description: Quantile function of the Beta distribution
function inv = betainv (x, a, b)
if (nargin ~= 3)
print_usage ();
end
if (~isscalar (a) || ~isscalar (b))
[retval, x, a, b] = common_size (x, a, b);
if (retval > 0)
error ('betainv: X, A, and B must be of common size or scalars');
end
end
if (iscomplex (x) || iscomplex (a) || iscomplex (b))
error ('betainv: X, A, and B must not be complex');
end
if (isa (x, 'single') || isa (a, 'single') || isa (b, 'single'))
inv = zeros (size (x), 'single');
else
inv = zeros (size (x));
end
k = (x < 0) | (x > 1) | ~(a > 0) | ~(b > 0) | isnan (x);
inv(k) = NaN;
k = (x == 1) & (a > 0) & (b > 0);
inv(k) = 1;
k = find ((x > 0) & (x < 1) & (a > 0) & (b > 0));
if (any (k))
if (~isscalar (a) || ~isscalar (b))
a = a(k);
b = b(k);
y = a ./ (a + b);
else
y = a / (a + b) * ones (size (k));
end
x = x(k);
if (isa (y, 'single'))
myeps = eps ('single');
else
myeps = eps;
end
l = find (y < myeps);
if (any (l))
y(l) = sqrt (myeps) * ones (length (l), 1);
end
l = find (y > 1 - myeps);
if (any (l))
y(l) = 1 - sqrt (myeps) * ones (length (l), 1);
end
y_old = y;
for i = 1 : 10000
h = (betacdf (y_old, a, b) - x) ./ betapdf (y_old, a, b);
y_new = y_old - h;
ind = find (y_new <= myeps);
if (any (ind))
y_new (ind) = y_old (ind) / 10;
end
ind = find (y_new >= 1 - myeps);
if (any (ind))
y_new (ind) = 1 - (1 - y_old (ind)) / 10;
end
h = y_old - y_new;
if (max (abs (h)) < sqrt (myeps))
break;
end
y_old = y_new;
end
inv(k) = y_new;
end
end
|
github
|
lcnbeapp/beapp-master
|
arrow.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/fileexchange/arrow.m
| 58,340 |
utf_8
|
e4ceab5f059e48a1443dff678cf4be7f
|
function [h,yy,zz] = arrow(varargin)
% ARROW Draw a line with an arrowhead.
%
% ARROW(Start,Stop) draws a line with an arrow from Start to Stop (points
% should be vectors of length 2 or 3, or matrices with 2 or 3
% columns), and returns the graphics handle of the arrow(s).
%
% ARROW uses the mouse (click-drag) to create an arrow.
%
% ARROW DEMO & ARROW DEMO2 show 3-D & 2-D demos of the capabilities of ARROW.
%
% ARROW may be called with a normal argument list or a property-based list.
% ARROW(Start,Stop,Length,BaseAngle,TipAngle,Width,Page,CrossDir) is
% the full normal argument list, where all but the Start and Stop
% points are optional. If you need to specify a later argument (e.g.,
% Page) but want default values of earlier ones (e.g., TipAngle),
% pass an empty matrix for the earlier ones (e.g., TipAngle=[]).
%
% ARROW('Property1',PropVal1,'Property2',PropVal2,...) creates arrows with the
% given properties, using default values for any unspecified or given as
% 'default' or NaN. Some properties used for line and patch objects are
% used in a modified fashion, others are passed directly to LINE, PATCH,
% or SET. For a detailed properties explanation, call ARROW PROPERTIES.
%
% Start The starting points. B
% Stop The end points. /|\ ^
% Length Length of the arrowhead in pixels. /|||\ |
% BaseAngle Base angle in degrees (ADE). //|||\\ L|
% TipAngle Tip angle in degrees (ABC). ///|||\\\ e|
% Width Width of the base in pixels. ////|||\\\\ n|
% Page Use hardcopy proportions. /////|D|\\\\\ g|
% CrossDir Vector || to arrowhead plane. //// ||| \\\\ t|
% NormalDir Vector out of arrowhead plane. /// ||| \\\ h|
% Ends Which end has an arrowhead. //<----->|| \\ |
% ObjectHandles Vector of handles to update. / base ||| \ V
% E angle||<-------->C
% ARROW(H,'Prop1',PropVal1,...), where H is a |||tipangle
% vector of handles to previously-created arrows |||
% and/or line objects, will update the previously- |||
% created arrows according to the current view -->|A|<-- width
% and any specified properties, and will convert
% two-point line objects to corresponding arrows. ARROW(H) will update
% the arrows if the current view has changed. Root, figure, or axes
% handles included in H are replaced by all descendant Arrow objects.
%
% A property list can follow any specified normal argument list, e.g.,
% ARROW([1 2 3],[0 0 0],36,'BaseAngle',60) creates an arrow from (1,2,3) to
% the origin, with an arrowhead of length 36 pixels and 60-degree base angle.
%
% The basic arguments or properties can generally be vectorized to create
% multiple arrows with the same call. This is done by passing a property
% with one row per arrow, or, if all arrows are to have the same property
% value, just one row may be specified.
%
% You may want to execute AXIS(AXIS) before calling ARROW so it doesn't change
% the axes on you; ARROW determines the sizes of arrow components BEFORE the
% arrow is plotted, so if ARROW changes axis limits, arrows may be malformed.
%
% This version of ARROW uses features of MATLAB 5 and is incompatible with
% earlier MATLAB versions (ARROW for MATLAB 4.2c is available separately);
% some problems with perspective plots still exist.
% Copyright (c)1995-2002, Dr. Erik A. Johnson <[email protected]>, 11/15/02
% Revision history:
% 11/15/02 EAJ Accomodate how MATLAB 6.5 handles NaN and logicals
% 7/28/02 EAJ Tried (but failed) work-around for MATLAB 6.x / OpenGL bug
% if zero 'Width' or not double-ended
% 11/10/99 EAJ Add logical() to eliminate zero index problem in MATLAB 5.3.
% 11/10/99 EAJ Corrected warning if axis limits changed on multiple axes.
% 11/10/99 EAJ Update e-mail address.
% 2/10/99 EAJ Some documentation updating.
% 2/24/98 EAJ Fixed bug if Start~=Stop but both colinear with viewpoint.
% 8/14/97 EAJ Added workaround for MATLAB 5.1 scalar logical transpose bug.
% 7/21/97 EAJ Fixed a few misc bugs.
% 7/14/97 EAJ Make arrow([],'Prop',...) do nothing (no old handles)
% 6/23/97 EAJ MATLAB 5 compatible version, release.
% 5/27/97 EAJ Added Line Arrows back in. Corrected a few bugs.
% 5/26/97 EAJ Changed missing Start/Stop to mouse-selected arrows.
% 5/19/97 EAJ MATLAB 5 compatible version, beta.
% 4/13/97 EAJ MATLAB 5 compatible version, alpha.
% 1/31/97 EAJ Fixed bug with multiple arrows and unspecified Z coords.
% 12/05/96 EAJ Fixed one more bug with log plots and NormalDir specified
% 10/24/96 EAJ Fixed bug with log plots and NormalDir specified
% 11/13/95 EAJ Corrected handling for 'reverse' axis directions
% 10/06/95 EAJ Corrected occasional conflict with SUBPLOT
% 4/24/95 EAJ A major rewrite.
% Fall 94 EAJ Original code.
% Things to be done:
% - segment parsing, computing, and plotting into separate subfunctions
% - change computing from Xform to Camera paradigms
% + this will help especially with 3-D perspective plots
% + if the WarpToFill section works right, remove warning code
% + when perpsective works properly, remove perspective warning code
% - add cell property values and struct property name/values (like get/set)
% - get rid of NaN as the "default" data label
% + perhaps change userdata to a struct and don't include (or leave
% empty) the values specified as default; or use a cell containing
% an empty matrix for a default value
% - add functionality of GET to retrieve current values of ARROW properties
% Many thanks to Keith Rogers <[email protected]> for his many excellent
% suggestions and beta testing. Check out his shareware package MATDRAW
% (at ftp://ftp.mathworks.com/pub/contrib/v5/graphics/matdraw/) -- he has
% permission to distribute ARROW with MATDRAW.
% Permission is granted to distribute ARROW with the toolboxes for the book
% "Solving Solid Mechanics Problems with MATLAB 5", by F. Golnaraghi et al.
% (Prentice Hall, 1999).
% global variable initialization
global ARROW_PERSP_WARN ARROW_STRETCH_WARN ARROW_AXLIMITS
if isempty(ARROW_PERSP_WARN ), ARROW_PERSP_WARN =1; end;
if isempty(ARROW_STRETCH_WARN), ARROW_STRETCH_WARN=1; end;
% Handle callbacks
if (nargin>0 & ischar(varargin{1}) & strcmp(lower(varargin{1}),'callback')),
arrow_callback(varargin{2:end}); return;
end;
% Are we doing the demo?
c = sprintf('\n');
if (nargin==1 & ischar(varargin{1})),
arg1 = lower(varargin{1});
if strncmp(arg1,'prop',4), arrow_props;
elseif strncmp(arg1,'demo',4)
clf reset
demo_info = arrow_demo;
if ~strncmp(arg1,'demo2',5),
hh=arrow_demo3(demo_info);
else,
hh=arrow_demo2(demo_info);
end;
if (nargout>=1), h=hh; end;
elseif strncmp(arg1,'fixlimits',3),
arrow_fixlimits(ARROW_AXLIMITS);
ARROW_AXLIMITS=[];
elseif strncmp(arg1,'help',4),
disp(help(mfilename));
else,
error([upper(mfilename) ' got an unknown single-argument string ''' deblank(arg1) '''.']);
end;
return;
end;
% Check # of arguments
if (nargout>3), error([upper(mfilename) ' produces at most 3 output arguments.']); end;
% find first property number
firstprop = nargin+1;
for k=1:length(varargin), if ~isnumeric(varargin{k}), firstprop=k; break; end; end;
lastnumeric = firstprop-1;
% check property list
if (firstprop<=nargin),
for k=firstprop:2:nargin,
curarg = varargin{k};
if ~ischar(curarg) | sum(size(curarg)>1)>1,
error([upper(mfilename) ' requires that a property name be a single string.']);
end;
end;
if (rem(nargin-firstprop,2)~=1),
error([upper(mfilename) ' requires that the property ''' ...
varargin{nargin} ''' be paired with a property value.']);
end;
end;
% default output
if (nargout>0), h=[]; end;
if (nargout>1), yy=[]; end;
if (nargout>2), zz=[]; end;
% set values to empty matrices
start = [];
stop = [];
len = [];
baseangle = [];
tipangle = [];
wid = [];
page = [];
crossdir = [];
ends = [];
ax = [];
oldh = [];
ispatch = [];
defstart = [NaN NaN NaN];
defstop = [NaN NaN NaN];
deflen = 16;
defbaseangle = 90;
deftipangle = 16;
defwid = 0;
defpage = 0;
defcrossdir = [NaN NaN NaN];
defends = 1;
defoldh = [];
defispatch = 1;
% The 'Tag' we'll put on our arrows
ArrowTag = 'Arrow';
% check for oldstyle arguments
if (firstprop==2),
% assume arg1 is a set of handles
oldh = varargin{1}(:);
if isempty(oldh), return; end;
elseif (firstprop>9),
error([upper(mfilename) ' takes at most 8 non-property arguments.']);
elseif (firstprop>2),
s = str2mat('start','stop','len','baseangle','tipangle','wid','page','crossdir');
for k=1:firstprop-1, eval([deblank(s(k,:)) '=varargin{k};']); end;
end;
% parse property pairs
extraprops={};
for k=firstprop:2:nargin,
prop = varargin{k};
val = varargin{k+1};
prop = [lower(prop(:)') ' '];
if strncmp(prop,'start' ,5), start = val;
elseif strncmp(prop,'stop' ,4), stop = val;
elseif strncmp(prop,'len' ,3), len = val(:);
elseif strncmp(prop,'base' ,4), baseangle = val(:);
elseif strncmp(prop,'tip' ,3), tipangle = val(:);
elseif strncmp(prop,'wid' ,3), wid = val(:);
elseif strncmp(prop,'page' ,4), page = val;
elseif strncmp(prop,'cross' ,5), crossdir = val;
elseif strncmp(prop,'norm' ,4), if (ischar(val)), crossdir=val; else, crossdir=val*sqrt(-1); end;
elseif strncmp(prop,'end' ,3), ends = val;
elseif strncmp(prop,'object',6), oldh = val(:);
elseif strncmp(prop,'handle',6), oldh = val(:);
elseif strncmp(prop,'type' ,4), ispatch = val;
elseif strncmp(prop,'userd' ,5), %ignore it
else,
% make sure it is a valid patch or line property
eval('get(0,[''DefaultPatch'' varargin{k}]);err=0;','err=1;'); errstr=lasterr;
if (err), eval('get(0,[''DefaultLine'' varargin{k}]);err=0;','err=1;'); end;
if (err),
errstr(1:max(find(errstr==setstr(13)|errstr==setstr(10)))) = '';
error([upper(mfilename) ' got ' errstr]);
end;
extraprops={extraprops{:},varargin{k},val};
end;
end;
% Check if we got 'default' values
start = arrow_defcheck(start ,defstart ,'Start' );
stop = arrow_defcheck(stop ,defstop ,'Stop' );
len = arrow_defcheck(len ,deflen ,'Length' );
baseangle = arrow_defcheck(baseangle,defbaseangle,'BaseAngle' );
tipangle = arrow_defcheck(tipangle ,deftipangle ,'TipAngle' );
wid = arrow_defcheck(wid ,defwid ,'Width' );
crossdir = arrow_defcheck(crossdir ,defcrossdir ,'CrossDir' );
page = arrow_defcheck(page ,defpage ,'Page' );
ends = arrow_defcheck(ends ,defends ,'' );
oldh = arrow_defcheck(oldh ,[] ,'ObjectHandles');
ispatch = arrow_defcheck(ispatch ,defispatch ,'' );
% check transpose on arguments
[m,n]=size(start ); if any(m==[2 3])&(n==1|n>3), start = start'; end;
[m,n]=size(stop ); if any(m==[2 3])&(n==1|n>3), stop = stop'; end;
[m,n]=size(crossdir); if any(m==[2 3])&(n==1|n>3), crossdir = crossdir'; end;
% convert strings to numbers
if ~isempty(ends) & ischar(ends),
endsorig = ends;
[m,n] = size(ends);
col = lower([ends(:,1:min(3,n)) ones(m,max(0,3-n))*' ']);
ends = nan(m,1);
oo = ones(1,m);
ii=find(all(col'==['non']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*0; end;
ii=find(all(col'==['sto']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*1; end;
ii=find(all(col'==['sta']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*2; end;
ii=find(all(col'==['bot']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*3; end;
if any(isnan(ends)),
ii = min(find(isnan(ends)));
error([upper(mfilename) ' does not recognize ''' deblank(endsorig(ii,:)) ''' as a valid ''Ends'' value.']);
end;
else,
ends = ends(:);
end;
if ~isempty(ispatch) & ischar(ispatch),
col = lower(ispatch(:,1));
patchchar='p'; linechar='l'; defchar=' ';
mask = col~=patchchar & col~=linechar & col~=defchar;
if any(mask),
error([upper(mfilename) ' does not recognize ''' deblank(ispatch(min(find(mask)),:)) ''' as a valid ''Type'' value.']);
end;
ispatch = (col==patchchar)*1 + (col==linechar)*0 + (col==defchar)*defispatch;
else,
ispatch = ispatch(:);
end;
oldh = oldh(:);
% check object handles
if ~all(ishandle(oldh)), error([upper(mfilename) ' got invalid object handles.']); end;
% expand root, figure, and axes handles
if ~isempty(oldh),
ohtype = get(oldh,'Type');
mask = strcmp(ohtype,'root') | strcmp(ohtype,'figure') | strcmp(ohtype,'axes');
if any(mask),
oldh = num2cell(oldh);
for ii=find(mask)',
oldh(ii) = {findobj(oldh{ii},'Tag',ArrowTag)};
end;
oldh = cat(1,oldh{:});
if isempty(oldh), return; end; % no arrows to modify, so just leave
end;
end;
% largest argument length
[mstart,junk]=size(start); [mstop,junk]=size(stop); [mcrossdir,junk]=size(crossdir);
argsizes = [length(oldh) mstart mstop ...
length(len) length(baseangle) length(tipangle) ...
length(wid) length(page) mcrossdir length(ends) ];
args=['length(ObjectHandle) '; ...
'#rows(Start) '; ...
'#rows(Stop) '; ...
'length(Length) '; ...
'length(BaseAngle) '; ...
'length(TipAngle) '; ...
'length(Width) '; ...
'length(Page) '; ...
'#rows(CrossDir) '; ...
'#rows(Ends) '];
if (any(imag(crossdir(:))~=0)),
args(9,:) = '#rows(NormalDir) ';
end;
if isempty(oldh),
narrows = max(argsizes);
else,
narrows = length(oldh);
end;
if (narrows<=0), narrows=1; end;
% Check size of arguments
ii = find((argsizes~=0)&(argsizes~=1)&(argsizes~=narrows));
if ~isempty(ii),
s = args(ii',:);
while ((size(s,2)>1)&((abs(s(:,size(s,2)))==0)|(abs(s(:,size(s,2)))==abs(' ')))),
s = s(:,1:size(s,2)-1);
end;
s = [ones(length(ii),1)*[upper(mfilename) ' requires that '] s ...
ones(length(ii),1)*[' equal the # of arrows (' num2str(narrows) ').' c]];
s = s';
s = s(:)';
s = s(1:length(s)-1);
error(setstr(s));
end;
% check element length in Start, Stop, and CrossDir
if ~isempty(start),
[m,n] = size(start);
if (n==2),
start = [start nan(m,1)];
elseif (n~=3),
error([upper(mfilename) ' requires 2- or 3-element Start points.']);
end;
end;
if ~isempty(stop),
[m,n] = size(stop);
if (n==2),
stop = [stop nan(m,1)];
elseif (n~=3),
error([upper(mfilename) ' requires 2- or 3-element Stop points.']);
end;
end;
if ~isempty(crossdir),
[m,n] = size(crossdir);
if (n<3),
crossdir = [crossdir nan(m,3-n)];
elseif (n~=3),
if (all(imag(crossdir(:))==0)),
error([upper(mfilename) ' requires 2- or 3-element CrossDir vectors.']);
else,
error([upper(mfilename) ' requires 2- or 3-element NormalDir vectors.']);
end;
end;
end;
% fill empty arguments
if isempty(start ), start = [Inf Inf Inf]; end;
if isempty(stop ), stop = [Inf Inf Inf]; end;
if isempty(len ), len = Inf; end;
if isempty(baseangle ), baseangle = Inf; end;
if isempty(tipangle ), tipangle = Inf; end;
if isempty(wid ), wid = Inf; end;
if isempty(page ), page = Inf; end;
if isempty(crossdir ), crossdir = [Inf Inf Inf]; end;
if isempty(ends ), ends = Inf; end;
if isempty(ispatch ), ispatch = Inf; end;
% expand single-column arguments
o = ones(narrows,1);
if (size(start ,1)==1), start = repmat(start , narrows, 1); end;
if (size(stop ,1)==1), stop = repmat(stop , narrows, 1); end;
if (length(len )==1), len = repmat(len , narrows, 1); end;
if (length(baseangle )==1), baseangle = repmat(baseangle, narrows, 1); end;
if (length(tipangle )==1), tipangle = repmat(tipangle , narrows, 1); end;
if (length(wid )==1), wid = repmat(wid , narrows, 1); end;
if (length(page )==1), page = repmat(page , narrows, 1); end;
if (size(crossdir ,1)==1), crossdir = repmat(crossdir , narrows, 1); end;
if (length(ends )==1), ends = repmat(ends , narrows, 1); end;
if (length(ispatch )==1), ispatch = repmat(ispatch , narrows, 1); end;
ax =repmat(gca, narrows, 1);
% if we've got handles, get the defaults from the handles
if ~isempty(oldh),
for k=1:narrows,
oh = oldh(k);
ud = get(oh,'UserData');
ax(k) = get(oh,'Parent');
ohtype = get(oh,'Type');
if strcmp(get(oh,'Tag'),ArrowTag), % if it's an arrow already
if isinf(ispatch(k)), ispatch(k)=strcmp(ohtype,'patch'); end;
% arrow UserData format: [start' stop' len base tip wid page crossdir' ends]
start0 = ud(1:3);
stop0 = ud(4:6);
if (isinf(len(k))), len(k) = ud( 7); end;
if (isinf(baseangle(k))), baseangle(k) = ud( 8); end;
if (isinf(tipangle(k))), tipangle(k) = ud( 9); end;
if (isinf(wid(k))), wid(k) = ud(10); end;
if (isinf(page(k))), page(k) = ud(11); end;
if (isinf(crossdir(k,1))), crossdir(k,1) = ud(12); end;
if (isinf(crossdir(k,2))), crossdir(k,2) = ud(13); end;
if (isinf(crossdir(k,3))), crossdir(k,3) = ud(14); end;
if (isinf(ends(k))), ends(k) = ud(15); end;
elseif strcmp(ohtype,'line')|strcmp(ohtype,'patch'), % it's a non-arrow line or patch
convLineToPatch = 1; %set to make arrow patches when converting from lines.
if isinf(ispatch(k)), ispatch(k)=convLineToPatch|strcmp(ohtype,'patch'); end;
x=get(oh,'XData'); x=x(~isnan(x(:))); if isempty(x), x=NaN; end;
y=get(oh,'YData'); y=y(~isnan(y(:))); if isempty(y), y=NaN; end;
z=get(oh,'ZData'); z=z(~isnan(z(:))); if isempty(z), z=NaN; end;
start0 = [x(1) y(1) z(1) ];
stop0 = [x(end) y(end) z(end)];
else,
error([upper(mfilename) ' cannot convert ' ohtype ' objects.']);
end;
ii=find(isinf(start(k,:))); if ~isempty(ii), start(k,ii)=start0(ii); end;
ii=find(isinf(stop( k,:))); if ~isempty(ii), stop( k,ii)=stop0( ii); end;
end;
end;
% convert Inf's to NaN's
start( isinf(start )) = NaN;
stop( isinf(stop )) = NaN;
len( isinf(len )) = NaN;
baseangle( isinf(baseangle)) = NaN;
tipangle( isinf(tipangle )) = NaN;
wid( isinf(wid )) = NaN;
page( isinf(page )) = NaN;
crossdir( isinf(crossdir )) = NaN;
ends( isinf(ends )) = NaN;
ispatch( isinf(ispatch )) = NaN;
% set up the UserData data (here so not corrupted by log10's and such)
ud = [start stop len baseangle tipangle wid page crossdir ends];
% Set Page defaults
page = ~isnan(page) & trueornan(page);
% Get axes limits, range, min; correct for aspect ratio and log scale
axm = zeros(3,narrows);
axr = zeros(3,narrows);
axrev = zeros(3,narrows);
ap = zeros(2,narrows);
xyzlog = zeros(3,narrows);
limmin = zeros(2,narrows);
limrange = zeros(2,narrows);
oldaxlims = zeros(narrows,7);
oneax = all(ax==ax(1));
if (oneax),
T = zeros(4,4);
invT = zeros(4,4);
else,
T = zeros(16,narrows);
invT = zeros(16,narrows);
end;
axnotdone = logical(ones(size(ax)));
while (any(axnotdone)),
ii = min(find(axnotdone));
curax = ax(ii);
curpage = page(ii);
% get axes limits and aspect ratio
axl = [get(curax,'XLim'); get(curax,'YLim'); get(curax,'ZLim')];
oldaxlims(min(find(oldaxlims(:,1)==0)),:) = [double(curax) reshape(axl',1,6)];
% get axes size in pixels (points)
u = get(curax,'Units');
axposoldunits = get(curax,'Position');
really_curpage = curpage & strcmp(u,'normalized');
if (really_curpage),
curfig = get(curax,'Parent');
pu = get(curfig,'PaperUnits');
set(curfig,'PaperUnits','points');
pp = get(curfig,'PaperPosition');
set(curfig,'PaperUnits',pu);
set(curax,'Units','pixels');
curapscreen = get(curax,'Position');
set(curax,'Units','normalized');
curap = pp.*get(curax,'Position');
else,
set(curax,'Units','pixels');
curapscreen = get(curax,'Position');
curap = curapscreen;
end;
set(curax,'Units',u);
set(curax,'Position',axposoldunits);
% handle non-stretched axes position
str_stretch = { 'DataAspectRatioMode' ; ...
'PlotBoxAspectRatioMode' ; ...
'CameraViewAngleMode' };
str_camera = { 'CameraPositionMode' ; ...
'CameraTargetMode' ; ...
'CameraViewAngleMode' ; ...
'CameraUpVectorMode' };
notstretched = strcmp(get(curax,str_stretch),'manual');
manualcamera = strcmp(get(curax,str_camera),'manual');
if ~arrow_WarpToFill(notstretched,manualcamera,curax),
% give a warning that this has not been thoroughly tested
if 0 & ARROW_STRETCH_WARN,
ARROW_STRETCH_WARN = 0;
strs = {str_stretch{1:2},str_camera{:}};
strs = [char(ones(length(strs),1)*sprintf('\n ')) char(strs)]';
warning([upper(mfilename) ' may not yet work quite right ' ...
'if any of the following are ''manual'':' strs(:).']);
end;
% find the true pixel size of the actual axes
texttmp = text(axl(1,[1 2 2 1 1 2 2 1]), ...
axl(2,[1 1 2 2 1 1 2 2]), ...
axl(3,[1 1 1 1 2 2 2 2]),'');
set(texttmp,'Units','points');
textpos = get(texttmp,'Position');
delete(texttmp);
textpos = cat(1,textpos{:});
textpos = max(textpos(:,1:2)) - min(textpos(:,1:2));
% adjust the axes position
if (really_curpage),
% adjust to printed size
textpos = textpos * min(curap(3:4)./textpos);
curap = [curap(1:2)+(curap(3:4)-textpos)/2 textpos];
else,
% adjust for pixel roundoff
textpos = textpos * min(curapscreen(3:4)./textpos);
curap = [curap(1:2)+(curap(3:4)-textpos)/2 textpos];
end;
end;
if ARROW_PERSP_WARN & ~strcmp(get(curax,'Projection'),'orthographic'),
ARROW_PERSP_WARN = 0;
warning([upper(mfilename) ' does not yet work right for 3-D perspective projection.']);
end;
% adjust limits for log scale on axes
curxyzlog = [strcmp(get(curax,'XScale'),'log'); ...
strcmp(get(curax,'YScale'),'log'); ...
strcmp(get(curax,'ZScale'),'log')];
if (any(curxyzlog)),
ii = find([curxyzlog;curxyzlog]);
if (any(axl(ii)<=0)),
error([upper(mfilename) ' does not support non-positive limits on log-scaled axes.']);
else,
axl(ii) = log10(axl(ii));
end;
end;
% correct for 'reverse' direction on axes;
curreverse = [strcmp(get(curax,'XDir'),'reverse'); ...
strcmp(get(curax,'YDir'),'reverse'); ...
strcmp(get(curax,'ZDir'),'reverse')];
ii = find(curreverse);
if ~isempty(ii),
axl(ii,[1 2])=-axl(ii,[2 1]);
end;
% compute the range of 2-D values
curT = view(curax);
lim = curT*[0 1 0 1 0 1 0 1;0 0 1 1 0 0 1 1;0 0 0 0 1 1 1 1;1 1 1 1 1 1 1 1];
lim = lim(1:2,:)./([1;1]*lim(4,:));
curlimmin = min(lim')';
curlimrange = max(lim')' - curlimmin;
curinvT = inv(curT);
if (~oneax),
curT = curT.';
curinvT = curinvT.';
curT = curT(:);
curinvT = curinvT(:);
end;
% check which arrows to which cur corresponds
ii = find((ax==curax)&(page==curpage));
oo = ones(1,length(ii));
axr(:,ii) = diff(axl')' * oo;
axm(:,ii) = axl(:,1) * oo;
axrev(:,ii) = curreverse * oo;
ap(:,ii) = curap(3:4)' * oo;
xyzlog(:,ii) = curxyzlog * oo;
limmin(:,ii) = curlimmin * oo;
limrange(:,ii) = curlimrange * oo;
if (oneax),
T = curT;
invT = curinvT;
else,
T(:,ii) = curT * oo;
invT(:,ii) = curinvT * oo;
end;
axnotdone(ii) = zeros(1,length(ii));
end;
oldaxlims(oldaxlims(:,1)==0,:)=[];
% correct for log scales
curxyzlog = xyzlog.';
ii = find(curxyzlog(:));
if ~isempty(ii),
start( ii) = real(log10(start( ii)));
stop( ii) = real(log10(stop( ii)));
if (all(imag(crossdir)==0)), % pulled (ii) subscript on crossdir, 12/5/96 eaj
crossdir(ii) = real(log10(crossdir(ii)));
end;
end;
% correct for reverse directions
ii = find(axrev.');
if ~isempty(ii),
start( ii) = -start( ii);
stop( ii) = -stop( ii);
crossdir(ii) = -crossdir(ii);
end;
% transpose start/stop values
start = start.';
stop = stop.';
% take care of defaults, page was done above
ii=find(isnan(start(:) )); if ~isempty(ii), start(ii) = axm(ii)+axr(ii)/2; end;
ii=find(isnan(stop(:) )); if ~isempty(ii), stop(ii) = axm(ii)+axr(ii)/2; end;
ii=find(isnan(crossdir(:) )); if ~isempty(ii), crossdir(ii) = zeros(length(ii),1); end;
ii=find(isnan(len )); if ~isempty(ii), len(ii) = ones(length(ii),1)*deflen; end;
ii=find(isnan(baseangle )); if ~isempty(ii), baseangle(ii) = ones(length(ii),1)*defbaseangle; end;
ii=find(isnan(tipangle )); if ~isempty(ii), tipangle(ii) = ones(length(ii),1)*deftipangle; end;
ii=find(isnan(wid )); if ~isempty(ii), wid(ii) = ones(length(ii),1)*defwid; end;
ii=find(isnan(ends )); if ~isempty(ii), ends(ii) = ones(length(ii),1)*defends; end;
% transpose rest of values
len = len.';
baseangle = baseangle.';
tipangle = tipangle.';
wid = wid.';
page = page.';
crossdir = crossdir.';
ends = ends.';
ax = ax.';
% given x, a 3xN matrix of points in 3-space;
% want to convert to X, the corresponding 4xN 2-space matrix
%
% tmp1=[(x-axm)./axr; ones(1,size(x,1))];
% if (oneax), X=T*tmp1;
% else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1;
% tmp2=zeros(4,4*N); tmp2(:)=tmp1(:);
% X=zeros(4,N); X(:)=sum(tmp2)'; end;
% X = X ./ (ones(4,1)*X(4,:));
% for all points with start==stop, start=stop-(verysmallvalue)*(up-direction);
ii = find(all(start==stop));
if ~isempty(ii),
% find an arrowdir vertical on screen and perpendicular to viewer
% transform to 2-D
tmp1 = [(stop(:,ii)-axm(:,ii))./axr(:,ii);ones(1,length(ii))];
if (oneax), twoD=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T(:,ii).*tmp1;
tmp2=zeros(4,4*length(ii)); tmp2(:)=tmp1(:);
twoD=zeros(4,length(ii)); twoD(:)=sum(tmp2)'; end;
twoD=twoD./(ones(4,1)*twoD(4,:));
% move the start point down just slightly
tmp1 = twoD + [0;-1/1000;0;0]*(limrange(2,ii)./ap(2,ii));
% transform back to 3-D
if (oneax), threeD=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT(:,ii).*tmp1;
tmp2=zeros(4,4*length(ii)); tmp2(:)=tmp1(:);
threeD=zeros(4,length(ii)); threeD(:)=sum(tmp2)'; end;
start(:,ii) = (threeD(1:3,:)./(ones(3,1)*threeD(4,:))).*axr(:,ii)+axm(:,ii);
end;
% compute along-arrow points
% transform Start points
tmp1=[(start-axm)./axr;ones(1,narrows)];
if (oneax), X0=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
X0=zeros(4,narrows); X0(:)=sum(tmp2)'; end;
X0=X0./(ones(4,1)*X0(4,:));
% transform Stop points
tmp1=[(stop-axm)./axr;ones(1,narrows)];
if (oneax), Xf=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
Xf=zeros(4,narrows); Xf(:)=sum(tmp2)'; end;
Xf=Xf./(ones(4,1)*Xf(4,:));
% compute pixel distance between points
D = sqrt(sum(((Xf(1:2,:)-X0(1:2,:)).*(ap./limrange)).^2));
D = D + (D==0); %eaj new 2/24/98
% compute and modify along-arrow distances
len1 = len;
len2 = len - (len.*tan(tipangle/180*pi)-wid/2).*tan((90-baseangle)/180*pi);
slen0 = zeros(1,narrows);
slen1 = len1 .* ((ends==2)|(ends==3));
slen2 = len2 .* ((ends==2)|(ends==3));
len0 = zeros(1,narrows);
len1 = len1 .* ((ends==1)|(ends==3));
len2 = len2 .* ((ends==1)|(ends==3));
% for no start arrowhead
ii=find((ends==1)&(D<len2));
if ~isempty(ii),
slen0(ii) = D(ii)-len2(ii);
end;
% for no end arrowhead
ii=find((ends==2)&(D<slen2));
if ~isempty(ii),
len0(ii) = D(ii)-slen2(ii);
end;
len1 = len1 + len0;
len2 = len2 + len0;
slen1 = slen1 + slen0;
slen2 = slen2 + slen0;
% note: the division by D below will probably not be accurate if both
% of the following are true:
% 1. the ratio of the line length to the arrowhead
% length is large
% 2. the view is highly perspective.
% compute stoppoints
tmp1=X0.*(ones(4,1)*(len0./D))+Xf.*(ones(4,1)*(1-len0./D));
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,narrows); tmp3(:)=sum(tmp2)'; end;
stoppoint = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)).*axr+axm;
% compute tippoints
tmp1=X0.*(ones(4,1)*(len1./D))+Xf.*(ones(4,1)*(1-len1./D));
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,narrows); tmp3(:)=sum(tmp2)'; end;
tippoint = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)).*axr+axm;
% compute basepoints
tmp1=X0.*(ones(4,1)*(len2./D))+Xf.*(ones(4,1)*(1-len2./D));
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,narrows); tmp3(:)=sum(tmp2)'; end;
basepoint = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)).*axr+axm;
% compute startpoints
tmp1=X0.*(ones(4,1)*(1-slen0./D))+Xf.*(ones(4,1)*(slen0./D));
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,narrows); tmp3(:)=sum(tmp2)'; end;
startpoint = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)).*axr+axm;
% compute stippoints
tmp1=X0.*(ones(4,1)*(1-slen1./D))+Xf.*(ones(4,1)*(slen1./D));
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,narrows); tmp3(:)=sum(tmp2)'; end;
stippoint = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)).*axr+axm;
% compute sbasepoints
tmp1=X0.*(ones(4,1)*(1-slen2./D))+Xf.*(ones(4,1)*(slen2./D));
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT.*tmp1;
tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,narrows); tmp3(:)=sum(tmp2)'; end;
sbasepoint = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)).*axr+axm;
% compute cross-arrow directions for arrows with NormalDir specified
if (any(imag(crossdir(:))~=0)),
ii = find(any(imag(crossdir)~=0));
crossdir(:,ii) = cross((stop(:,ii)-start(:,ii))./axr(:,ii), ...
imag(crossdir(:,ii))).*axr(:,ii);
end;
% compute cross-arrow directions
basecross = crossdir + basepoint;
tipcross = crossdir + tippoint;
sbasecross = crossdir + sbasepoint;
stipcross = crossdir + stippoint;
ii = find(all(crossdir==0)|any(isnan(crossdir)));
if ~isempty(ii),
numii = length(ii);
% transform start points
tmp1 = [basepoint(:,ii) tippoint(:,ii) sbasepoint(:,ii) stippoint(:,ii)];
tmp1 = (tmp1-axm(:,[ii ii ii ii])) ./ axr(:,[ii ii ii ii]);
tmp1 = [tmp1; ones(1,4*numii)];
if (oneax), X0=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T(:,[ii ii ii ii]).*tmp1;
tmp2=zeros(4,16*numii); tmp2(:)=tmp1(:);
X0=zeros(4,4*numii); X0(:)=sum(tmp2)'; end;
X0=X0./(ones(4,1)*X0(4,:));
% transform stop points
tmp1 = [(2*stop(:,ii)-start(:,ii)-axm(:,ii))./axr(:,ii);ones(1,numii)];
tmp1 = [tmp1 tmp1 tmp1 tmp1];
if (oneax), Xf=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T(:,[ii ii ii ii]).*tmp1;
tmp2=zeros(4,16*numii); tmp2(:)=tmp1(:);
Xf=zeros(4,4*numii); Xf(:)=sum(tmp2)'; end;
Xf=Xf./(ones(4,1)*Xf(4,:));
% compute perpendicular directions
pixfact = ((limrange(1,ii)./limrange(2,ii)).*(ap(2,ii)./ap(1,ii))).^2;
pixfact = [pixfact pixfact pixfact pixfact];
pixfact = [pixfact;1./pixfact];
[dummyval,jj] = max(abs(Xf(1:2,:)-X0(1:2,:)));
jj1 = ((1:4)'*ones(1,length(jj))==ones(4,1)*jj);
jj2 = ((1:4)'*ones(1,length(jj))==ones(4,1)*(3-jj));
jj3 = jj1(1:2,:);
Xf(jj1)=Xf(jj1)+(Xf(jj1)-X0(jj1)==0); %eaj new 2/24/98
Xp = X0;
Xp(jj2) = X0(jj2) + ones(sum(jj2(:)),1);
Xp(jj1) = X0(jj1) - (Xf(jj2)-X0(jj2))./(Xf(jj1)-X0(jj1)) .* pixfact(jj3);
% inverse transform the cross points
if (oneax), Xp=invT*Xp;
else, tmp1=[Xp;Xp;Xp;Xp]; tmp1=invT(:,[ii ii ii ii]).*tmp1;
tmp2=zeros(4,16*numii); tmp2(:)=tmp1(:);
Xp=zeros(4,4*numii); Xp(:)=sum(tmp2)'; end;
Xp=(Xp(1:3,:)./(ones(3,1)*Xp(4,:))).*axr(:,[ii ii ii ii])+axm(:,[ii ii ii ii]);
basecross(:,ii) = Xp(:,0*numii+(1:numii));
tipcross(:,ii) = Xp(:,1*numii+(1:numii));
sbasecross(:,ii) = Xp(:,2*numii+(1:numii));
stipcross(:,ii) = Xp(:,3*numii+(1:numii));
end;
% compute all points
% compute start points
axm11 = [axm axm axm axm axm axm axm axm axm axm axm];
axr11 = [axr axr axr axr axr axr axr axr axr axr axr];
st = [stoppoint tippoint basepoint sbasepoint stippoint startpoint stippoint sbasepoint basepoint tippoint stoppoint];
tmp1 = (st - axm11) ./ axr11;
tmp1 = [tmp1; ones(1,size(tmp1,2))];
if (oneax), X0=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=[T T T T T T T T T T T].*tmp1;
tmp2=zeros(4,44*narrows); tmp2(:)=tmp1(:);
X0=zeros(4,11*narrows); X0(:)=sum(tmp2)'; end;
X0=X0./(ones(4,1)*X0(4,:));
% compute stop points
tmp1 = ([start tipcross basecross sbasecross stipcross stop stipcross sbasecross basecross tipcross start] ...
- axm11) ./ axr11;
tmp1 = [tmp1; ones(1,size(tmp1,2))];
if (oneax), Xf=T*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=[T T T T T T T T T T T].*tmp1;
tmp2=zeros(4,44*narrows); tmp2(:)=tmp1(:);
Xf=zeros(4,11*narrows); Xf(:)=sum(tmp2)'; end;
Xf=Xf./(ones(4,1)*Xf(4,:));
% compute lengths
len0 = len.*((ends==1)|(ends==3)).*tan(tipangle/180*pi);
slen0 = len.*((ends==2)|(ends==3)).*tan(tipangle/180*pi);
le = [zeros(1,narrows) len0 wid/2 wid/2 slen0 zeros(1,narrows) -slen0 -wid/2 -wid/2 -len0 zeros(1,narrows)];
aprange = ap./limrange;
aprange = [aprange aprange aprange aprange aprange aprange aprange aprange aprange aprange aprange];
D = sqrt(sum(((Xf(1:2,:)-X0(1:2,:)).*aprange).^2));
Dii=find(D==0); if ~isempty(Dii), D=D+(D==0); le(Dii)=zeros(1,length(Dii)); end; %should fix DivideByZero warnings
tmp1 = X0.*(ones(4,1)*(1-le./D)) + Xf.*(ones(4,1)*(le./D));
% inverse transform
if (oneax), tmp3=invT*tmp1;
else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=[invT invT invT invT invT invT invT invT invT invT invT].*tmp1;
tmp2=zeros(4,44*narrows); tmp2(:)=tmp1(:);
tmp3=zeros(4,11*narrows); tmp3(:)=sum(tmp2)'; end;
pts = tmp3(1:3,:)./(ones(3,1)*tmp3(4,:)) .* axr11 + axm11;
% correct for ones where the crossdir was specified
ii = find(~(all(crossdir==0)|any(isnan(crossdir))));
if ~isempty(ii),
D1 = [pts(:,1*narrows+ii)-pts(:,9*narrows+ii) ...
pts(:,2*narrows+ii)-pts(:,8*narrows+ii) ...
pts(:,3*narrows+ii)-pts(:,7*narrows+ii) ...
pts(:,4*narrows+ii)-pts(:,6*narrows+ii) ...
pts(:,6*narrows+ii)-pts(:,4*narrows+ii) ...
pts(:,7*narrows+ii)-pts(:,3*narrows+ii) ...
pts(:,8*narrows+ii)-pts(:,2*narrows+ii) ...
pts(:,9*narrows+ii)-pts(:,1*narrows+ii)]/2;
ii = ii'*ones(1,8) + ones(length(ii),1)*[1:4 6:9]*narrows;
ii = ii(:)';
pts(:,ii) = st(:,ii) + D1;
end;
% readjust for reverse directions
iicols=(1:narrows)'; iicols=iicols(:,ones(1,11)); iicols=iicols(:).';
tmp1=axrev(:,iicols);
ii = find(tmp1(:)); if ~isempty(ii), pts(ii)=-pts(ii); end;
% readjust for log scale on axes
tmp1=xyzlog(:,iicols);
ii = find(tmp1(:)); if ~isempty(ii), pts(ii)=10.^pts(ii); end;
% compute the x,y,z coordinates of the patches;
ii = narrows*(0:10)'*ones(1,narrows) + ones(11,1)*(1:narrows);
ii = ii(:)';
x = zeros(11,narrows);
y = zeros(11,narrows);
z = zeros(11,narrows);
x(:) = pts(1,ii)';
y(:) = pts(2,ii)';
z(:) = pts(3,ii)';
% do the output
if (nargout<=1),
% % create or modify the patches
newpatch = trueornan(ispatch) & (isempty(oldh)|~strcmp(get(oldh,'Type'),'patch'));
newline = ~trueornan(ispatch) & (isempty(oldh)|~strcmp(get(oldh,'Type'),'line'));
if isempty(oldh), H=zeros(narrows,1); else, H=oldh; end;
% % make or modify the arrows
for k=1:narrows,
if all(isnan(ud(k,[3 6])))&arrow_is2DXY(ax(k)), zz=[]; else, zz=z(:,k); end;
% work around a MATLAB 6.x OpenGL bug -- 7/28/02
xx=x(:,k); yy=y(:,k);
mask=any([ones(1,2+size(zz,2));diff([xx yy zz],[],1)],2);
xx=xx(mask); yy=yy(mask); if ~isempty(zz), zz=zz(mask); end;
% plot the patch or line
xyz = {'XData',xx,'YData',yy,'ZData',zz,'Tag',ArrowTag};
if newpatch(k)|newline(k),
if newpatch(k),
H(k) = patch(xyz{:});
else,
H(k) = line(xyz{:});
end;
if ~isempty(oldh), arrow_copyprops(oldh(k),H(k)); end;
else,
if ispatch(k), xyz={xyz{:},'CData',[]}; end;
set(H(k),xyz{:});
end;
end;
if ~isempty(oldh), delete(oldh(oldh~=H)); end;
% % additional properties
set(H,'Clipping','off');
set(H,{'UserData'},num2cell(ud,2));
if (length(extraprops)>0), set(H,extraprops{:}); end;
% handle choosing arrow Start and/or Stop locations if unspecified
[H,oldaxlims,errstr] = arrow_clicks(H,ud,x,y,z,ax,oldaxlims);
if ~isempty(errstr), error([upper(mfilename) ' got ' errstr]); end;
% set the output
if (nargout>0), h=H; end;
% make sure the axis limits did not change
if isempty(oldaxlims),
ARROW_AXLIMITS = [];
else,
lims = get(oldaxlims(:,1),{'XLim','YLim','ZLim'})';
lims = reshape(cat(2,lims{:}),6,size(lims,2));
mask = arrow_is2DXY(oldaxlims(:,1));
oldaxlims(mask,6:7) = lims(5:6,mask)';
ARROW_AXLIMITS = oldaxlims(find(any(oldaxlims(:,2:7)'~=lims)),:);
if ~isempty(ARROW_AXLIMITS),
warning(arrow_warnlimits(ARROW_AXLIMITS,narrows));
end;
end;
else,
% don't create the patch, just return the data
h=x;
yy=y;
zz=z;
end;
function out = arrow_defcheck(in,def,prop)
% check if we got 'default' values
out = in;
if ~ischar(in), return; end;
if size(in,1)==1 & strncmp(lower(in),'def',3),
out = def;
elseif ~isempty(prop),
error([upper(mfilename) ' does not recognize ''' in(:)' ''' as a valid ''' prop ''' string.']);
end;
function [H,oldaxlims,errstr] = arrow_clicks(H,ud,x,y,z,ax,oldaxlims)
% handle choosing arrow Start and/or Stop locations if necessary
errstr = '';
if isempty(H)|isempty(ud)|isempty(x), return; end;
% determine which (if any) need Start and/or Stop
needStart = all(isnan(ud(:,1:3)'))';
needStop = all(isnan(ud(:,4:6)'))';
mask = any(needStart|needStop);
if ~any(mask), return; end;
ud(~mask,:)=[]; ax(:,~mask)=[];
x(:,~mask)=[]; y(:,~mask)=[]; z(:,~mask)=[];
% make them invisible for the time being
set(H,'Visible','off');
% save the current axes and limits modes; set to manual for the time being
oldAx = gca;
limModes=get(ax(:),{'XLimMode','YLimMode','ZLimMode'});
set(ax(:),{'XLimMode','YLimMode','ZLimMode'},{'manual','manual','manual'});
% loop over each arrow that requires attention
jj = find(mask);
for ii=1:length(jj),
h = H(jj(ii));
axes(ax(ii));
% figure out correct call
if needStart(ii), prop='Start'; else, prop='Stop'; end;
[wasInterrupted,errstr] = arrow_click(needStart(ii)&needStop(ii),h,prop,ax(ii));
% handle errors and control-C
if wasInterrupted,
delete(H(jj(ii:end)));
H(jj(ii:end))=[];
oldaxlims(jj(ii:end),:)=[];
break;
end;
end;
% restore the axes and limit modes
axes(oldAx);
set(ax(:),{'XLimMode','YLimMode','ZLimMode'},limModes);
function [wasInterrupted,errstr] = arrow_click(lockStart,H,prop,ax)
% handle the clicks for one arrow
fig = get(ax,'Parent');
% save some things
oldFigProps = {'Pointer','WindowButtonMotionFcn','WindowButtonUpFcn'};
oldFigValue = get(fig,oldFigProps);
oldArrowProps = {'EraseMode'};
oldArrowValue = get(H,oldArrowProps);
set(H,'EraseMode','background'); %because 'xor' makes shaft invisible unless Width>1
global ARROW_CLICK_H ARROW_CLICK_PROP ARROW_CLICK_AX ARROW_CLICK_USE_Z
ARROW_CLICK_H=H; ARROW_CLICK_PROP=prop; ARROW_CLICK_AX=ax;
ARROW_CLICK_USE_Z=~arrow_is2DXY(ax)|~arrow_planarkids(ax);
set(fig,'Pointer','crosshair');
% set up the WindowButtonMotion so we can see the arrow while moving around
set(fig,'WindowButtonUpFcn','set(gcf,''WindowButtonUpFcn'','''')', ...
'WindowButtonMotionFcn','');
if ~lockStart,
set(H,'Visible','on');
set(fig,'WindowButtonMotionFcn',[mfilename '(''callback'',''motion'');']);
end;
% wait for the button to be pressed
[wasKeyPress,wasInterrupted,errstr] = arrow_wfbdown(fig);
% if we wanted to click-drag, set the Start point
if lockStart & ~wasInterrupted,
pt = arrow_point(ARROW_CLICK_AX,ARROW_CLICK_USE_Z);
feval(mfilename,H,'Start',pt,'Stop',pt);
set(H,'Visible','on');
ARROW_CLICK_PROP='Stop';
set(fig,'WindowButtonMotionFcn',[mfilename '(''callback'',''motion'');']);
% wait for the mouse button to be released
eval('waitfor(fig,''WindowButtonUpFcn'','''');','wasInterrupted=1;');
if wasInterrupted, errstr=lasterr; end;
end;
if ~wasInterrupted, feval(mfilename,'callback','motion'); end;
% restore some things
set(gcf,oldFigProps,oldFigValue);
set(H,oldArrowProps,oldArrowValue);
function arrow_callback(varargin)
% handle redrawing callbacks
if nargin==0, return; end;
str = varargin{1};
if ~ischar(str), error([upper(mfilename) ' got an invalid Callback command.']); end;
s = lower(str);
if strcmp(s,'motion'),
% motion callback
global ARROW_CLICK_H ARROW_CLICK_PROP ARROW_CLICK_AX ARROW_CLICK_USE_Z
feval(mfilename,ARROW_CLICK_H,ARROW_CLICK_PROP,arrow_point(ARROW_CLICK_AX,ARROW_CLICK_USE_Z));
drawnow;
else,
error([upper(mfilename) ' does not recognize ''' str(:).' ''' as a valid Callback option.']);
end;
function out = arrow_point(ax,use_z)
% return the point on the given axes
if nargin==0, ax=gca; end;
if nargin<2, use_z=~arrow_is2DXY(ax)|~arrow_planarkids(ax); end;
out = get(ax,'CurrentPoint');
out = out(1,:);
if ~use_z, out=out(1:2); end;
function [wasKeyPress,wasInterrupted,errstr] = arrow_wfbdown(fig)
% wait for button down ignoring object ButtonDownFcn's
if nargin==0, fig=gcf; end;
errstr = '';
% save ButtonDownFcn values
objs = findobj(fig);
buttonDownFcns = get(objs,'ButtonDownFcn');
mask=~strcmp(buttonDownFcns,''); objs=objs(mask); buttonDownFcns=buttonDownFcns(mask);
set(objs,'ButtonDownFcn','');
% save other figure values
figProps = {'KeyPressFcn','WindowButtonDownFcn'};
figValue = get(fig,figProps);
% do the real work
set(fig,'KeyPressFcn','set(gcf,''KeyPressFcn'','''',''WindowButtonDownFcn'','''');', ...
'WindowButtonDownFcn','set(gcf,''WindowButtonDownFcn'','''')');
lasterr('');
wasInterrupted=0; eval('waitfor(fig,''WindowButtonDownFcn'','''');','wasInterrupted=1;');
wasKeyPress = ~wasInterrupted & strcmp(get(fig,'KeyPressFcn'),'');
if wasInterrupted, errstr=lasterr; end;
% restore ButtonDownFcn and other figure values
set(objs,'ButtonDownFcn',buttonDownFcns);
set(fig,figProps,figValue);
function [out,is2D] = arrow_is2DXY(ax)
% check if axes are 2-D X-Y plots
% may not work for modified camera angles, etc.
out = logical(zeros(size(ax))); % 2-D X-Y plots
is2D = out; % any 2-D plots
views = get(ax(:),{'View'});
views = cat(1,views{:});
out(:) = abs(views(:,2))==90;
is2D(:) = out(:) | all(rem(views',90)==0)';
function out = arrow_planarkids(ax)
% check if axes descendents all have empty ZData (lines,patches,surfaces)
out = logical(ones(size(ax)));
allkids = get(ax(:),{'Children'});
for k=1:length(allkids),
kids = get([findobj(allkids{k},'flat','Type','line')
findobj(allkids{k},'flat','Type','patch')
findobj(allkids{k},'flat','Type','surface')],{'ZData'});
for j=1:length(kids),
if ~isempty(kids{j}), out(k)=logical(0); break; end;
end;
end;
function arrow_fixlimits(axlimits)
% reset the axis limits as necessary
if isempty(axlimits), disp([upper(mfilename) ' does not remember any axis limits to reset.']); end;
for k=1:size(axlimits,1),
if any(get(axlimits(k,1),'XLim')~=axlimits(k,2:3)), set(axlimits(k,1),'XLim',axlimits(k,2:3)); end;
if any(get(axlimits(k,1),'YLim')~=axlimits(k,4:5)), set(axlimits(k,1),'YLim',axlimits(k,4:5)); end;
if any(get(axlimits(k,1),'ZLim')~=axlimits(k,6:7)), set(axlimits(k,1),'ZLim',axlimits(k,6:7)); end;
end;
function out = arrow_WarpToFill(notstretched,manualcamera,curax)
% check if we are in "WarpToFill" mode.
out = strcmp(get(curax,'WarpToFill'),'on');
% 'WarpToFill' is undocumented, so may need to replace this by
% out = ~( any(notstretched) & any(manualcamera) );
function out = arrow_warnlimits(axlimits,narrows)
% create a warning message if we've changed the axis limits
msg = '';
switch (size(axlimits,1))
case 1, msg='';
case 2, msg='on two axes ';
otherwise, msg='on several axes ';
end;
msg = [upper(mfilename) ' changed the axis limits ' msg ...
'when adding the arrow'];
if (narrows>1), msg=[msg 's']; end;
out = [msg '.' sprintf('\n') ' Call ' upper(mfilename) ...
' FIXLIMITS to reset them now.'];
function arrow_copyprops(fm,to)
% copy line properties to patches
props = {'EraseMode','LineStyle','LineWidth','Marker','MarkerSize',...
'MarkerEdgeColor','MarkerFaceColor','ButtonDownFcn', ...
'Clipping','DeleteFcn','BusyAction','HandleVisibility', ...
'Selected','SelectionHighlight','Visible'};
lineprops = {'Color', props{:}};
patchprops = {'EdgeColor',props{:}};
patch2props = {'FaceColor',patchprops{:}};
fmpatch = strcmp(get(fm,'Type'),'patch');
topatch = strcmp(get(to,'Type'),'patch');
set(to( fmpatch& topatch),patch2props,get(fm( fmpatch& topatch),patch2props)); %p->p
set(to(~fmpatch&~topatch),lineprops, get(fm(~fmpatch&~topatch),lineprops )); %l->l
set(to( fmpatch&~topatch),lineprops, get(fm( fmpatch&~topatch),patchprops )); %p->l
set(to(~fmpatch& topatch),patchprops, get(fm(~fmpatch& topatch),lineprops) ,'FaceColor','none'); %l->p
function arrow_props
% display further help info about ARROW properties
c = sprintf('\n');
disp([c ...
'ARROW Properties: Default values are given in [square brackets], and other' c ...
' acceptable equivalent property names are in (parenthesis).' c c ...
' Start The starting points. For N arrows, B' c ...
' this should be a Nx2 or Nx3 matrix. /|\ ^' c ...
' Stop The end points. For N arrows, this /|||\ |' c ...
' should be a Nx2 or Nx3 matrix. //|||\\ L|' c ...
' Length Length of the arrowhead (in pixels on ///|||\\\ e|' c ...
' screen, points on a page). [16] (Len) ////|||\\\\ n|' c ...
' BaseAngle Angle (degrees) of the base angle /////|D|\\\\\ g|' c ...
' ADE. For a simple stick arrow, use //// ||| \\\\ t|' c ...
' BaseAngle=TipAngle. [90] (Base) /// ||| \\\ h|' c ...
' TipAngle Angle (degrees) of tip angle ABC. //<----->|| \\ |' c ...
' [16] (Tip) / base ||| \ V' c ...
' Width Width of the base in pixels. Not E angle ||<-------->C' c ...
' the ''LineWidth'' prop. [0] (Wid) |||tipangle' c ...
' Page If provided, non-empty, and not NaN, |||' c ...
' this causes ARROW to use hardcopy |||' c ...
' rather than onscreen proportions. A' c ...
' This is important if screen aspect --> <-- width' c ...
' ratio and hardcopy aspect ratio are ----CrossDir---->' c ...
' vastly different. []' c...
' CrossDir A vector giving the direction towards which the fletches' c ...
' on the arrow should go. [computed such that it is perpen-' c ...
' dicular to both the arrow direction and the view direction' c ...
' (i.e., as if it was pasted on a normal 2-D graph)] (Note' c ...
' that CrossDir is a vector. Also note that if an axis is' c ...
' plotted on a log scale, then the corresponding component' c ...
' of CrossDir must also be set appropriately, i.e., to 1 for' c ...
' no change in that direction, >1 for a positive change, >0' c ...
' and <1 for negative change.)' c ...
' NormalDir A vector normal to the fletch direction (CrossDir is then' c ...
' computed by the vector cross product [Line]x[NormalDir]). []' c ...
' (Note that NormalDir is a vector. Unlike CrossDir,' c ...
' NormalDir is used as is regardless of log-scaled axes.)' c ...
' Ends Set which end has an arrowhead. Valid values are ''none'',' c ...
' ''stop'', ''start'', and ''both''. [''stop''] (End)' c...
' ObjectHandles Vector of handles to previously-created arrows to be' c ...
' updated or line objects to be converted to arrows.' c ...
' [] (Object,Handle)' c ]);
function out = arrow_demo
% demo
% create the data
[x,y,z] = peaks;
[ddd,out.iii]=max(z(:));
out.axlim = [min(x(:)) max(x(:)) min(y(:)) max(y(:)) min(z(:)) max(z(:))];
% modify it by inserting some NaN's
[m,n] = size(z);
m = floor(m/2);
n = floor(n/2);
z(1:m,1:n) = nan(m,n);
% graph it
clf('reset');
out.hs=surf(x,y,z);
out.x=x; out.y=y; out.z=z;
xlabel('x'); ylabel('y');
function h = arrow_demo3(in)
% set the view
axlim = in.axlim;
axis(axlim);
zlabel('z');
%set(in.hs,'FaceColor','interp');
view(viewmtx(-37.5,30,20));
title(['Demo of the capabilities of the ARROW function in 3-D']);
% Normal blue arrow
h1 = feval(mfilename,[axlim(1) axlim(4) 4],[-.8 1.2 4], ...
'EdgeColor','b','FaceColor','b');
% Normal white arrow, clipped by the surface
h2 = feval(mfilename,axlim([1 4 6]),[0 2 4]);
t=text(-2.4,2.7,7.7,'arrow clipped by surf');
% Baseangle<90
h3 = feval(mfilename,[3 .125 3.5],[1.375 0.125 3.5],30,50);
t2=text(3.1,.125,3.5,'local maximum');
% Baseangle<90, fill and edge colors different
h4 = feval(mfilename,axlim(1:2:5)*.5,[0 0 0],36,60,25, ...
'EdgeColor','b','FaceColor','c');
t3=text(axlim(1)*.5,axlim(3)*.5,axlim(5)*.5-.75,'origin');
set(t3,'HorizontalAlignment','center');
% Baseangle>90, black fill
h5 = feval(mfilename,[-2.9 2.9 3],[-1.3 .4 3.2],30,120,[],6, ...
'EdgeColor','r','FaceColor','k','LineWidth',2);
% Baseangle>90, no fill
h6 = feval(mfilename,[-2.9 2.9 1.3],[-1.3 .4 1.5],30,120,[],6, ...
'EdgeColor','r','FaceColor','none','LineWidth',2);
% Stick arrow
h7 = feval(mfilename,[-1.6 -1.65 -6.5],[0 -1.65 -6.5],[],16,16);
t4=text(-1.5,-1.65,-7.25,'global mininum');
set(t4,'HorizontalAlignment','center');
% Normal, black fill
h8 = feval(mfilename,[-1.4 0 -7.2],[-1.4 0 -3],'FaceColor','k');
t5=text(-1.5,0,-7.75,'local minimum');
set(t5,'HorizontalAlignment','center');
% Gray fill, crossdir specified, 'LineStyle' --
h9 = feval(mfilename,[-3 2.2 -6],[-3 2.2 -.05],36,[],27,6,[],[0 -1 0], ...
'EdgeColor','k','FaceColor',.75*[1 1 1],'LineStyle','--');
% a series of normal arrows, linearly spaced, crossdir specified
h10y=(0:4)'/3;
h10 = feval(mfilename,[-3*ones(size(h10y)) h10y -6.5*ones(size(h10y))], ...
[-3*ones(size(h10y)) h10y -.05*ones(size(h10y))], ...
12,[],[],[],[],[0 -1 0]);
% a series of normal arrows, linearly spaced
h11x=(1:.33:2.8)';
h11 = feval(mfilename,[h11x -3*ones(size(h11x)) 6.5*ones(size(h11x))], ...
[h11x -3*ones(size(h11x)) -.05*ones(size(h11x))]);
% series of magenta arrows, radially oriented, crossdir specified
h12x=2; h12y=-3; h12z=axlim(5)/2; h12xr=1; h12zr=h12z; ir=.15;or=.81;
h12t=(0:11)'/6*pi;
h12 = feval(mfilename, ...
[h12x+h12xr*cos(h12t)*ir h12y*ones(size(h12t)) ...
h12z+h12zr*sin(h12t)*ir],[h12x+h12xr*cos(h12t)*or ...
h12y*ones(size(h12t)) h12z+h12zr*sin(h12t)*or], ...
10,[],[],[],[], ...
[-h12xr*sin(h12t) zeros(size(h12t)) h12zr*cos(h12t)],...
'FaceColor','none','EdgeColor','m');
% series of normal arrows, tangentially oriented, crossdir specified
or13=.91; h13t=(0:.5:12)'/6*pi;
locs = [h12x+h12xr*cos(h13t)*or13 h12y*ones(size(h13t)) h12z+h12zr*sin(h13t)*or13];
h13 = feval(mfilename,locs(1:end-1,:),locs(2:end,:),6);
% arrow with no line ==> oriented downwards
h14 = feval(mfilename,[3 3 .100001],[3 3 .1],30);
t6=text(3,3,3.6,'no line'); set(t6,'HorizontalAlignment','center');
% arrow with arrowheads at both ends
h15 = feval(mfilename,[-.5 -3 -3],[1 -3 -3],'Ends','both','FaceColor','g', ...
'Length',20,'Width',3,'CrossDir',[0 0 1],'TipAngle',25);
h=[h1;h2;h3;h4;h5;h6;h7;h8;h9;h10;h11;h12;h13;h14;h15];
function h = arrow_demo2(in)
axlim = in.axlim;
dolog = 1;
if (dolog), set(in.hs,'YData',10.^get(in.hs,'YData')); end;
shading('interp');
view(2);
title(['Demo of the capabilities of the ARROW function in 2-D']);
hold on; [C,H]=contour(in.x,in.y,in.z,20,'-'); hold off;
for k=H',
set(k,'ZData',(axlim(6)+1)*ones(size(get(k,'XData'))),'Color','k');
if (dolog), set(k,'YData',10.^get(k,'YData')); end;
end;
if (dolog), axis([axlim(1:2) 10.^axlim(3:4)]); set(gca,'YScale','log');
else, axis(axlim(1:4)); end;
% Normal blue arrow
start = [axlim(1) axlim(4) axlim(6)+2];
stop = [in.x(in.iii) in.y(in.iii) axlim(6)+2];
if (dolog), start(:,2)=10.^start(:,2); stop(:,2)=10.^stop(:,2); end;
h1 = feval(mfilename,start,stop,'EdgeColor','b','FaceColor','b');
% three arrows with varying fill, width, and baseangle
start = [-3 -3 10; -3 -1.5 10; -1.5 -3 10];
stop = [-.03 -.03 10; -.03 -1.5 10; -1.5 -.03 10];
if (dolog), start(:,2)=10.^start(:,2); stop(:,2)=10.^stop(:,2); end;
h2 = feval(mfilename,start,stop,24,[90;60;120],[],[0;0;4],'Ends',str2mat('both','stop','stop'));
set(h2(2),'EdgeColor',[0 .35 0],'FaceColor',[0 .85 .85]);
set(h2(3),'EdgeColor','r','FaceColor',[1 .5 1]);
h=[h1;h2];
function out = trueornan(x)
if isempty(x),
out=x;
else,
out = isnan(x);
out(~out) = x(~out);
end;
|
github
|
lcnbeapp/beapp-master
|
icp.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/fileexchange/icp.m
| 19,293 |
utf_8
|
344fc5e77eb0c82e12ab2f075f996060
|
function [TR, TT, ER, t, info] = icp(q,p,varargin)
% Perform the Iterative Closest Point algorithm on three dimensional point
% clouds.
%
% [TR, TT] = icp(q,p) returns the rotation matrix TR and translation
% vector TT that minimizes the distances from (TR * p + TT) to q.
% p is a 3xm matrix and q is a 3xn matrix.
%
% [TR, TT] = icp(q,p,k) forces the algorithm to make k iterations
% exactly. The default is 10 iterations.
%
% [TR, TT, ER] = icp(q,p,k) also returns the RMS of errors for k
% iterations in a (k+1)x1 vector. ER(0) is the initial error.
%
% [TR, TT, ER, t] = icp(q,p,k) also returns the calculation times per
% iteration in a (k+1)x1 vector. t(0) is the time consumed for preprocessing.
%
% [TR, TT, ER, t, info] = icp(q,p,k) also returns a structure info, that
% contains some diagnostic information. This functionality has been added
% by Jan-Mathijs Schoffelen, October 2013, containing the indices to the
% matching points of the point clouds, as well as the initial positions
% and the positions after the coregistration.
%
% Additional settings may be provided in a parameter list:
%
% Boundary
% {[]} | 1x? vector
% If EdgeRejection is set, a vector can be provided that indexes into
% q and specifies which points of q are on the boundary.
%
% EdgeRejection
% {false} | true
% If EdgeRejection is true, point matches to edge vertices of q are
% ignored. Requires that boundary points of q are specified using
% Boundary or that a triangulation matrix for q is provided.
%
% Extrapolation
% {false} | true
% If Extrapolation is true, the iteration direction will be evaluated
% and extrapolated if possible using the method outlined by
% Besl and McKay 1992.
%
% Matching
% {bruteForce} | Delaunay | kDtree
% Specifies how point matching should be done.
% bruteForce is usually the slowest and kDtree is the fastest.
% Note that the kDtree option is depends on the Statistics Toolbox
% v. 7.3 or higher.
%
% Minimize
% {point} | plane | lmaPoint
% Defines whether point to point or point to plane minimization
% should be performed. point is based on the SVD approach and is
% usually the fastest. plane will often yield higher accuracy. It
% uses linearized angles and requires surface normals for all points
% in q. Calculation of surface normals requires substantial pre
% proccessing.
% The option lmaPoint does point to point minimization using the non
% linear least squares Levenberg Marquardt algorithm. Results are
% generally the same as in points, but computation time may differ.
%
% Normals
% {[]} | n x 3 matrix
% A matrix of normals for the n points in q might be provided.
% Normals of q are used for point to plane minimization.
% Else normals will be found through a PCA of the 4 nearest
% neighbors.
%
% ReturnAll
% {false} | true
% Determines whether R and T should be returned for all iterations
% or only for the last one. If this option is set to true, R will be
% a 3x3x(k+1) matrix and T will be a 3x1x(k+1) matrix.
%
% Triangulation
% {[]} | ? x 3 matrix
% A triangulation matrix for the points in q can be provided,
% enabling EdgeRejection. The elements should index into q, defining
% point triples that act together as triangles.
%
% Verbose
% {false} | true
% Enables extrapolation output in the Command Window.
%
% Weight
% {@(match)ones(1,m)} | Function handle
% For point or plane minimization, a function handle to a weighting
% function can be provided. The weighting function will be called
% with one argument, a 1xm vector that specifies point pairs by
% indexing into q. The weighting function should return a 1xm vector
% of weights for every point pair.
%
% WorstRejection
% {0} | scalar in ]0; 1[
% Reject a given percentage of the worst point pairs, based on their
% Euclidean distance.
%
% Martin Kjer and Jakob Wilm, Technical University of Denmark, 2012
% Use the inputParser class to validate input arguments.
inp = inputParser;
inp.addRequired('q', @(x)isreal(x) && size(x,1) == 3);
inp.addRequired('p', @(x)isreal(x) && size(x,1) == 3);
inp.addOptional('iter', 10, @(x)x > 0 && x < 10^5);
inp.addParamValue('Boundary', [], @(x)size(x,1) == 1);
inp.addParamValue('EdgeRejection', false, @(x)islogical(x));
inp.addParamValue('Extrapolation', false, @(x)islogical(x));
validMatching = {'bruteForce','Delaunay','kDtree'};
inp.addParamValue('Matching', 'bruteForce', @(x)any(strcmpi(x,validMatching)));
validMinimize = {'point','plane','lmapoint'};
inp.addParamValue('Minimize', 'point', @(x)any(strcmpi(x,validMinimize)));
inp.addParamValue('Normals', [], @(x)isreal(x) && size(x,1) == 3);
inp.addParamValue('NormalsData', [], @(x)isreal(x) && size(x,1) == 3);
inp.addParamValue('ReturnAll', false, @(x)islogical(x));
inp.addParamValue('Triangulation', [], @(x)isreal(x) && size(x,2) == 3);
inp.addParamValue('Verbose', false, @(x)islogical(x));
inp.addParamValue('Weight', @(x)ones(1,length(x)), @(x)isa(x,'function_handle'));
inp.addParamValue('WorstRejection', 0, @(x)isscalar(x) && x > 0 && x < 1);
inp.parse(q,p,varargin{:});
arg = inp.Results;
clear('inp');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Actual implementation
% Allocate vector for RMS of errors in every iteration.
t = zeros(arg.iter+1,1);
% Start timer
tic;
Np = size(p,2);
% Transformed data point cloud
pt = p;
% Allocate vector for RMS of errors in every iteration.
ER = zeros(arg.iter+1,1);
% Initialize temporary transform vector and matrix.
T = zeros(3,1);
R = eye(3,3);
% Initialize total transform vector(s) and rotation matric(es).
TT = zeros(3,1, arg.iter+1);
TR = repmat(eye(3,3), [1,1, arg.iter+1]);
% If Minimize == 'plane', normals are needed
if (strcmp(arg.Minimize, 'plane') && isempty(arg.Normals))
arg.Normals = lsqnormest(q,4);
end
% If Matching == 'Delaunay', a triangulation is needed
if strcmp(arg.Matching, 'Delaunay')
DT = DelaunayTri(transpose(q));
end
% If Matching == 'kDtree', a kD tree should be built (req. Stat. TB >= 7.3)
if strcmp(arg.Matching, 'kDtree')
kdOBJ = KDTreeSearcher(transpose(q));
end
% If edge vertices should be rejected, find edge vertices
if arg.EdgeRejection
if isempty(arg.Boundary)
bdr = find_bound(q, arg.Triangulation);
else
bdr = arg.Boundary;
end
end
if arg.Extrapolation
% Initialize total transform vector (quaternion ; translation vec.)
qq = [ones(1,arg.iter+1);zeros(6,arg.iter+1)];
% Allocate vector for direction change and change angle.
dq = zeros(7,arg.iter+1);
theta = zeros(1,arg.iter+1);
end
t(1) = toc;
% Go into main iteration loop
for k=1:arg.iter
% Do matching
switch arg.Matching
case 'bruteForce'
[match mindist] = match_bruteForce(q,pt);
case 'Delaunay'
[match mindist] = match_Delaunay(q,pt,DT);
case 'kDtree'
[match mindist] = match_kDtree(q,pt,kdOBJ);
end
% If matches to edge vertices should be rejected
if arg.EdgeRejection
p_idx = not(ismember(match, bdr));
q_idx = match(p_idx);
mindist = mindist(p_idx);
else
p_idx = true(1, Np);
q_idx = match;
end
% If worst matches should be rejected
if arg.WorstRejection
edge = round((1-arg.WorstRejection)*sum(p_idx));
pairs = find(p_idx);
[~, idx] = sort(mindist);
p_idx(pairs(idx(edge:end))) = false;
q_idx = match(p_idx);
mindist = mindist(p_idx);
end
if k == 1
ER(k) = sqrt(sum(mindist.^2)/length(mindist));
end
switch arg.Minimize
case 'point'
% Determine weight vector
weights = arg.Weight(match);
[R,T] = eq_point(q(:,q_idx),pt(:,p_idx), weights(p_idx));
case 'plane'
weights = arg.Weight(match);
[R,T] = eq_plane(q(:,q_idx),pt(:,p_idx),arg.Normals(:,q_idx),weights(p_idx));
case 'lmaPoint'
[R,T] = eq_lmaPoint(q(:,q_idx),pt(:,p_idx));
end
% Add to the total transformation
TR(:,:,k+1) = R*TR(:,:,k);
TT(:,:,k+1) = R*TT(:,:,k)+T;
% Apply last transformation
pt = TR(:,:,k+1) * p + repmat(TT(:,:,k+1), 1, Np);
% Root mean of objective function
ER(k+1) = rms_error(q(:,q_idx), pt(:,p_idx));
% If Extrapolation, we might be able to move quicker
if arg.Extrapolation
qq(:,k+1) = [rmat2quat(TR(:,:,k+1));TT(:,:,k+1)];
dq(:,k+1) = qq(:,k+1) - qq(:,k);
theta(k+1) = (180/pi)*acos(dot(dq(:,k),dq(:,k+1))/(norm(dq(:,k))*norm(dq(:,k+1))));
if arg.Verbose
disp(['Direction change ' num2str(theta(k+1)) ' degree in iteration ' num2str(k)]);
end
if k>2 && theta(k+1) < 10 && theta(k) < 10
d = [ER(k+1), ER(k), ER(k-1)];
v = [0, -norm(dq(:,k+1)), -norm(dq(:,k))-norm(dq(:,k+1))];
vmax = 25 * norm(dq(:,k+1));
dv = extrapolate(v,d,vmax);
if dv ~= 0
q_mark = qq(:,k+1) + dv * dq(:,k+1)/norm(dq(:,k+1));
q_mark(1:4) = q_mark(1:4)/norm(q_mark(1:4));
qq(:,k+1) = q_mark;
TR(:,:,k+1) = quat2rmat(qq(1:4,k+1));
TT(:,:,k+1) = qq(5:7,k+1);
% Reapply total transformation
pt = TR(:,:,k+1) * p + repmat(TT(:,:,k+1), 1, Np);
% Recalculate root mean of objective function
% Note this is costly and only for fun!
switch arg.Matching
case 'bruteForce'
[~, mindist] = match_bruteForce(q,pt);
case 'Delaunay'
[~, mindist] = match_Delaunay(q,pt,DT);
case 'kDtree'
[~, mindist] = match_kDtree(q,pt,kdOBJ);
end
ER(k+1) = sqrt(sum(mindist.^2)/length(mindist));
end
end
end
t(k+1) = toc;
end
if not(arg.ReturnAll)
TR = TR(:,:,end);
TT = TT(:,:,end);
end
% keep track of some information (added by JMS)
info.q_idx = q_idx;
info.p_idx = find(p_idx);
info.qout = q(:,q_idx);
info.pout = pt(:,p_idx);
info.pin = p(:,p_idx);
% run some diagnostics
npnt = size(info.pout,2);
q_origin = mean(info.qout,2); q_radius = sqrt(sum((info.qout-q_origin*ones(1,npnt)).^2));
p_origin = mean(info.pout,2); p_radius = sqrt(sum((info.pout-q_origin*ones(1,npnt)).^2)); %q_origin is on purpose here
info.distanceout = sqrt(sum((info.qout-info.pout).^2)).*sign(p_radius-q_radius);
info.distancein = sqrt(sum((info.qout-info.pin ).^2)).*sign(p_radius-q_radius);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [match mindist] = match_bruteForce(q, p)
m = size(p,2);
n = size(q,2);
match = zeros(1,m);
mindist = zeros(1,m);
for ki=1:m
d=zeros(1,n);
for ti=1:3
d=d+(q(ti,:)-p(ti,ki)).^2;
end
[mindist(ki),match(ki)]=min(d);
end
mindist = sqrt(mindist);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [match mindist] = match_Delaunay(q, p, DT)
match = transpose(nearestNeighbor(DT, transpose(p)));
mindist = sqrt(sum((p-q(:,match)).^2,1));
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [match mindist] = match_kDtree(~, p, kdOBJ)
[match mindist] = knnsearch(kdOBJ,transpose(p));
match = transpose(match);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [R,T] = eq_point(q,p,weights)
m = size(p,2);
n = size(q,2);
% normalize weights
weights = weights ./ sum(weights);
% find data centroid and deviations from centroid
q_bar = q * transpose(weights);
q_mark = q - repmat(q_bar, 1, n);
% Apply weights
q_mark = q_mark .* repmat(weights, 3, 1);
% find data centroid and deviations from centroid
p_bar = p * transpose(weights);
p_mark = p - repmat(p_bar, 1, m);
% Apply weights
%p_mark = p_mark .* repmat(weights, 3, 1);
N = p_mark*transpose(q_mark); % taking points of q in matched order
[U,~,V] = svd(N); % singular value decomposition
R = V*diag([1 1 det(U*V')])*transpose(U);
T = q_bar - R*p_bar;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [R,T] = eq_plane(q,p,n,weights)
n = n .* repmat(weights,3,1);
c = cross(p,n);
cn = vertcat(c,n);
C = cn*transpose(cn);
b = - [sum(sum((p-q).*repmat(cn(1,:),3,1).*n));
sum(sum((p-q).*repmat(cn(2,:),3,1).*n));
sum(sum((p-q).*repmat(cn(3,:),3,1).*n));
sum(sum((p-q).*repmat(cn(4,:),3,1).*n));
sum(sum((p-q).*repmat(cn(5,:),3,1).*n));
sum(sum((p-q).*repmat(cn(6,:),3,1).*n))];
X = C\b;
cx = cos(X(1)); cy = cos(X(2)); cz = cos(X(3));
sx = sin(X(1)); sy = sin(X(2)); sz = sin(X(3));
R = [cy*cz cz*sx*sy-cx*sz cx*cz*sy+sx*sz;
cy*sz cx*cz+sx*sy*sz cx*sy*sz-cz*sx;
-sy cy*sx cx*cy];
T = X(4:6);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [R,T] = eq_lmaPoint(q,p)
Rx = @(a)[1 0 0;
0 cos(a) -sin(a);
0 sin(a) cos(a)];
Ry = @(b)[cos(b) 0 sin(b);
0 1 0;
-sin(b) 0 cos(b)];
Rz = @(g)[cos(g) -sin(g) 0;
sin(g) cos(g) 0;
0 0 1];
Rot = @(x)Rx(x(1))*Ry(x(2))*Rz(x(3));
myfun = @(x,xdata)Rot(x(1:3))*xdata+repmat(x(4:6),1,length(xdata));
options = optimset('Algorithm', 'levenberg-marquardt');
x = lsqcurvefit(myfun, zeros(6,1), p, q, [], [], options);
R = Rot(x(1:3));
T = x(4:6);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Extrapolation in quaternion space. Details are found in:
%
% Besl, P., & McKay, N. (1992). A method for registration of 3-D shapes.
% IEEE Transactions on pattern analysis and machine intelligence, 239?256.
function [dv] = extrapolate(v,d,vmax)
p1 = polyfit(v,d,1); % linear fit
p2 = polyfit(v,d,2); % parabolic fit
v1 = -p1(2)/p1(1); % linear zero crossing
v2 = -p2(2)/(2*p2(1)); % polynomial top point
if issorted([0 v2 v1 vmax]) || issorted([0 v2 vmax v1])
disp('Parabolic update!');
dv = v2;
elseif issorted([0 v1 v2 vmax]) || issorted([0 v1 vmax v2])...
|| (v2 < 0 && issorted([0 v1 vmax]))
disp('Line based update!');
dv = v1;
elseif v1 > vmax && v2 > vmax
disp('Maximum update!');
dv = vmax;
else
disp('No extrapolation!');
dv = 0;
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Determine the RMS error between two point equally sized point clouds with
% point correspondance.
% ER = rms_error(p1,p2) where p1 and p2 are 3xn matrices.
function ER = rms_error(p1,p2)
dsq = sum(power(p1 - p2, 2),1);
ER = sqrt(mean(dsq));
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Converts (orthogonal) rotation matrices R to (unit) quaternion
% representations
%
% Input: A 3x3xn matrix of rotation matrices
% Output: A 4xn matrix of n corresponding quaternions
%
% http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
function quaternion = rmat2quat(R)
Qxx = R(1,1,:);
Qxy = R(1,2,:);
Qxz = R(1,3,:);
Qyx = R(2,1,:);
Qyy = R(2,2,:);
Qyz = R(2,3,:);
Qzx = R(3,1,:);
Qzy = R(3,2,:);
Qzz = R(3,3,:);
w = 0.5 * sqrt(1+Qxx+Qyy+Qzz);
x = 0.5 * sign(Qzy-Qyz) .* sqrt(1+Qxx-Qyy-Qzz);
y = 0.5 * sign(Qxz-Qzx) .* sqrt(1-Qxx+Qyy-Qzz);
z = 0.5 * sign(Qyx-Qxy) .* sqrt(1-Qxx-Qyy+Qzz);
quaternion = reshape([w;x;y;z],4,[]);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Converts (unit) quaternion representations to (orthogonal) rotation matrices R
%
% Input: A 4xn matrix of n quaternions
% Output: A 3x3xn matrix of corresponding rotation matrices
%
% http://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#From_a_quaternion_to_an_orthogonal_matrix
function R = quat2rmat(quaternion)
q0(1,1,:) = quaternion(1,:);
qx(1,1,:) = quaternion(2,:);
qy(1,1,:) = quaternion(3,:);
qz(1,1,:) = quaternion(4,:);
R = [q0.^2+qx.^2-qy.^2-qz.^2 2*qx.*qy-2*q0.*qz 2*qx.*qz+2*q0.*qy;
2*qx.*qy+2*q0.*qz q0.^2-qx.^2+qy.^2-qz.^2 2*qy.*qz-2*q0.*qx;
2*qx.*qz-2*q0.*qy 2*qy.*qz+2*q0.*qx q0.^2-qx.^2-qy.^2+qz.^2];
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Least squares normal estimation from point clouds using PCA
%
% H. Hoppe, T. DeRose, T. Duchamp, J. McDonald, and W. Stuetzle.
% Surface reconstruction from unorganized points.
% In Proceedings of ACM Siggraph, pages 71:78, 1992.
%
% p should be a matrix containing the horizontally concatenated column
% vectors with points. k is a scalar indicating how many neighbors the
% normal estimation is based upon.
%
% Note that for large point sets, the function performs significantly
% faster if Statistics Toolbox >= v. 7.3 is installed.
%
% Jakob Wilm 2010
function n = lsqnormest(p, k)
m = size(p,2);
n = zeros(3,m);
v = ver('stats');
if str2double(v.Version) >= 7.5
neighbors = transpose(knnsearch(transpose(p), transpose(p), 'k', k+1));
else
neighbors = k_nearest_neighbors(p, p, k+1);
end
for i = 1:m
x = p(:,neighbors(2:end, i));
p_bar = 1/k * sum(x,2);
P = (x - repmat(p_bar,1,k)) * transpose(x - repmat(p_bar,1,k)); %spd matrix P
%P = 2*cov(x);
[V,D] = eig(P);
[~, idx] = min(diag(D)); % choses the smallest eigenvalue
n(:,i) = V(:,idx); % returns the corresponding eigenvector
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Program to find the k - nearest neighbors (kNN) within a set of points.
% Distance metric used: Euclidean distance
%
% Note that this function makes repetitive use of min(), which seems to be
% more efficient than sort() for k < 30.
function [neighborIds neighborDistances] = k_nearest_neighbors(dataMatrix, queryMatrix, k)
numDataPoints = size(dataMatrix,2);
numQueryPoints = size(queryMatrix,2);
neighborIds = zeros(k,numQueryPoints);
neighborDistances = zeros(k,numQueryPoints);
D = size(dataMatrix, 1); %dimensionality of points
for i=1:numQueryPoints
d=zeros(1,numDataPoints);
for t=1:D % this is to avoid slow repmat()
d=d+(dataMatrix(t,:)-queryMatrix(t,i)).^2;
end
for j=1:k
[s,t] = min(d);
neighborIds(j,i)=t;
neighborDistances(j,i)=sqrt(s);
d(t) = NaN; % remove found number from d
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Boundary point determination. Given a set of 3D points and a
% corresponding triangle representation, returns those point indices that
% define the border/edge of the surface.
function bound = find_bound(pts, poly)
%Correcting polygon indices and converting datatype
poly = double(poly);
pts = double(pts);
%Calculating freeboundary points:
TR = TriRep(poly, pts(1,:)', pts(2,:)', pts(3,:)');
FF = freeBoundary(TR);
%Output
bound = FF(:,1);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
github
|
lcnbeapp/beapp-master
|
DataHash.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/fileexchange/DataHash.m
| 18,999 |
utf_8
|
8e6737cdb54095fc9656428c35490821
|
function Hash = DataHash(Data, Opt)
% DATAHASH - Checksum for Matlab array of any type
% This function creates a hash value for an input of any type. The type and
% dimensions of the input are considered as default, such that UINT8([0,0]) and
% UINT16(0) have different hash values. Nested STRUCTs and CELLs are parsed
% recursively.
%
% Hash = DataHash(Data, Opt)
% INPUT:
% Data: Array of these built-in types:
% (U)INT8/16/32/64, SINGLE, DOUBLE, (real/complex, full/sparse)
% CHAR, LOGICAL, CELL (nested), STRUCT (scalar or array, nested),
% function_handle.
% Opt: Struct to specify the hashing algorithm and the output format.
% Opt and all its fields are optional.
% Opt.Method: String, known methods for Java 1.6 (Matlab 2011b):
% 'SHA-1', 'SHA-256', 'SHA-384', 'SHA-512', 'MD2', 'MD5'.
% Call DataHash without inputs to get a list of available methods.
% Default: 'MD5'.
% Opt.Format: String specifying the output format:
% 'hex', 'HEX': Lower/uppercase hexadecimal string.
% 'double', 'uint8': Numerical vector.
% 'base64': Base64 encoded string, only printable ASCII
% characters, shorter than 'hex', no padding.
% Default: 'hex'.
% Opt.Input: Type of the input as string, not case-sensitive:
% 'array': The contents, type and size of the input [Data] are
% considered for the creation of the hash. Nested CELLs
% and STRUCT arrays are parsed recursively. Empty arrays of
% different type reply different hashs.
% 'file': [Data] is treated as file name and the hash is calculated
% for the files contents.
% 'bin': [Data] is a numerical, LOGICAL or CHAR array. Only the
% binary contents of the array is considered, such that
% e.g. empty arrays of different type reply the same hash.
% 'ascii': Same as 'bin', but only the 8-bit ASCII part of the 16-bit
% Matlab CHARs is considered.
% Default: 'array'.
%
% OUTPUT:
% Hash: String, DOUBLE or UINT8 vector. The length depends on the hashing
% method.
%
% EXAMPLES:
% % Default: MD5, hex:
% DataHash([]) % 5b302b7b2099a97ba2a276640a192485
% % MD5, Base64:
% Opt = struct('Format', 'base64', 'Method', 'MD5');
% DataHash(int32(1:10), Opt) % +tJN9yeF89h3jOFNN55XLg
% % SHA-1, Base64:
% S.a = uint8([]);
% S.b = {{1:10}, struct('q', uint64(415))};
% Opt.Method = 'SHA-1';
% Opt.Format = 'HEX';
% DataHash(S, Opt) % 18672BE876463B25214CA9241B3C79CC926F3093
% % SHA-1 of binary values:
% Opt = struct('Method', 'SHA-1', 'Input', 'bin');
% DataHash(1:8, Opt) % 826cf9d3a5d74bbe415e97d4cecf03f445f69225
% % SHA-256, consider ASCII part only (Matlab's CHAR has 16 bits!):
% Opt.Method = 'SHA-256';
% Opt.Input = 'ascii';
% DataHash('abc', Opt)
% % ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad
% % Or equivalently:
% Opt.Input = 'bin';
% DataHash(uint8('abc'), Opt)
%
% NOTES:
% Function handles and user-defined objects cannot be converted uniquely:
% - The subfunction ConvertFuncHandle uses the built-in function FUNCTIONS,
% but the replied struct can depend on the Matlab version.
% - It is tried to convert objects to UINT8 streams in the subfunction
% ConvertObject. A conversion by STRUCT() might be more appropriate.
% Adjust these subfunctions on demand.
%
% MATLAB CHARs have 16 bits! Use Opt.Input='ascii' for comparisons with e.g.
% online hash generators.
%
% Matt Raum suggested this for e.g. user-defined objects:
% DataHash(getByteStreamFromArray(Data)
% This works very well, but unfortunately getByteStreamFromArray is
% undocumented, such that it might vanish in the future or reply different
% output.
%
% For arrays the calculated hash value might be changed in new versions.
% Calling this function without inputs replies the version of the hash.
%
% The C-Mex function GetMD5 is 2 to 100 times faster, but obtains MD5 only:
% http://www.mathworks.com/matlabcentral/fileexchange/25921
%
% Tested: Matlab 7.7, 7.8, 7.13, 8.6, WinXP/32, Win7/64
% Author: Jan Simon, Heidelberg, (C) 2011-2016 matlab.2010(a)n(MINUS)simon.de
%
% See also: TYPECAST, CAST.
%
% Michael Kleder, "Compute Hash", no structs and cells:
% http://www.mathworks.com/matlabcentral/fileexchange/8944
% Tim, "Serialize/Deserialize", converts structs and cells to a byte stream:
% http://www.mathworks.com/matlabcentral/fileexchange/29457
% $JRev: R-F V:031 Sum:NOmLtPnVFcz3 Date:28-Feb-2016 15:51:08 $
% $License: BSD (use/copy/change/redistribute on own risk, mention the author) $
% $File: Tools\GLFile\DataHash.m $
% History:
% 001: 01-May-2011 21:52, First version.
% 007: 10-Jun-2011 10:38, [Opt.Input], binary data, complex values considered.
% 011: 26-May-2012 15:57, Fixed: Failed for binary input and empty data.
% 014: 04-Nov-2012 11:37, Consider Mex-, MDL- and P-files also.
% Thanks to David (author 243360), who found this bug.
% Jan Achterhold (author 267816) suggested to consider Java objects.
% 016: 01-Feb-2015 20:53, Java heap space exhausted for large files.
% Now files are process in chunks to save memory.
% 017: 15-Feb-2015 19:40, Collsions: Same hash for different data.
% Examples: zeros(1,1) and zeros(1,1,0)
% complex(0) and zeros(1,1,0,0)
% Now the number of dimensions is included, to avoid this.
% 022: 30-Mar-2015 00:04, Bugfix: Failed for strings and [] without TYPECASTX.
% Ross found these 2 bugs, which occur when TYPECASTX is not installed.
% If you need the base64 format padded with '=' characters, adjust
% fBase64_enc as you like.
% 026: 29-Jun-2015 00:13, Changed hash for STRUCTs.
% Struct arrays are analysed field by field now, which is much faster.
% 027: 13-Sep-2015 19:03, 'ascii' input as abbrev. for Input='bin' and UINT8().
% 028: 15-Oct-2015 23:11, Example values in help section updated to v022.
% 029: 16-Oct-2015 22:32, Use default options for empty input.
% 031: 28-Feb-2016 15:10, New hash value to get same reply as GetMD5.
% New Matlab version (at least 2015b) use a fast method for TYPECAST, such
% that calling James Tursa's TYPECASTX is not needed anymore.
% Matlab 6.5 not supported anymore: MException for CATCH.
% OPEN BUGS:
% Nath wrote:
% function handle refering to struct containing the function will create
% infinite loop. Is there any workaround ?
% Example:
% d= dynamicprops();
% addprop(d,'f');
% d.f= @(varargin) struct2cell(d);
% DataHash(d.f) % infinite loop
% This is caught with an error message concerning the recursion limit now.
% Main function: ===============================================================
% Default options: -------------------------------------------------------------
Method = 'MD5';
OutFormat = 'hex';
isFile = false;
isBin = false;
% Check number and type of inputs: ---------------------------------------------
nArg = nargin;
if nArg == 2
if isa(Opt, 'struct') == 0 % Bad type of 2nd input:
Error_L('BadInput2', '2nd input [Opt] must be a struct.');
end
% Specify hash algorithm:
if isfield(Opt, 'Method') && ~isempty(Opt.Method) % Short-circuiting
Method = upper(Opt.Method);
end
% Specify output format:
if isfield(Opt, 'Format') && ~isempty(Opt.Format) % Short-circuiting
OutFormat = Opt.Format;
end
% Check if the Input type is specified - default: 'array':
if isfield(Opt, 'Input') && ~isempty(Opt.Input) % Short-circuiting
if strcmpi(Opt.Input, 'File')
if ischar(Data) == 0
Error_L('CannotOpen', '1st input FileName must be a string');
end
isFile = true;
elseif strncmpi(Opt.Input, 'bin', 3) % Accept 'binary' also
if (isnumeric(Data) || ischar(Data) || islogical(Data)) == 0 || ...
issparse(Data)
Error_L('BadDataType', ...
'1st input must be numeric, CHAR or LOGICAL for binary input.');
end
isBin = true;
elseif strncmpi(Opt.Input, 'asc', 3) % 8-bit ASCII characters
if ~ischar(Data)
Error_L('BadDataType', ...
'1st input must be a CHAR for the input type ASCII.');
end
isBin = true;
Data = uint8(Data);
end
end
elseif nArg == 0 % Reply version of this function:
R = Version_L;
if nargout == 0
disp(R);
else
Hash = R;
end
return;
elseif nArg ~= 1 % Bad number of arguments:
Error_L('BadNInput', '1 or 2 inputs required.');
end
% Create the engine: -----------------------------------------------------------
try
Engine = java.security.MessageDigest.getInstance(Method);
catch
Error_L('BadInput2', 'Invalid algorithm: [%s].', Method);
end
% Create the hash value: -------------------------------------------------------
if isFile
% Open the file:
FID = fopen(Data, 'r');
if FID < 0
% Check existence of file:
Found = FileExist_L(Data);
if Found
Error_L('CantOpenFile', 'Cannot open file: %s.', Data);
else
Error_L('FileNotFound', 'File not found: %s.', Data);
end
end
% Read file in chunks to save memory and Java heap space:
Chunk = 1e6;
[Data, Count] = fread(FID, Chunk, '*uint8');
Engine.update(Data);
while Count == Chunk
[Data, Count] = fread(FID, Chunk, '*uint8');
Engine.update(Data);
end
fclose(FID);
% Calculate the hash:
Hash = typecast(Engine.digest, 'uint8');
elseif isBin % Contents of an elementary array, type tested already:
if isempty(Data) % Nothing to do, Engine.update fails for empty input!
Hash = typecast(Engine.digest, 'uint8');
else % Matlab's TYPECAST is less elegant:
if isnumeric(Data)
if isreal(Data)
Engine.update(typecast(Data(:), 'uint8'));
else
Engine.update(typecast(real(Data(:)), 'uint8'));
Engine.update(typecast(imag(Data(:)), 'uint8'));
end
elseif islogical(Data) % TYPECAST cannot handle LOGICAL
Engine.update(typecast(uint8(Data(:)), 'uint8'));
elseif ischar(Data) % TYPECAST cannot handle CHAR
Engine.update(typecast(uint16(Data(:)), 'uint8'));
% Bugfix: Line removed
end
Hash = typecast(Engine.digest, 'uint8');
end
else % Array with type:
Engine = CoreHash(Data, Engine);
Hash = typecast(Engine.digest, 'uint8');
end
% Convert hash specific output format: -----------------------------------------
switch OutFormat
case 'hex'
Hash = sprintf('%.2x', double(Hash));
case 'HEX'
Hash = sprintf('%.2X', double(Hash));
case 'double'
Hash = double(reshape(Hash, 1, []));
case 'uint8'
Hash = reshape(Hash, 1, []);
case 'base64'
Hash = fBase64_enc(double(Hash));
otherwise
Error_L('BadOutFormat', ...
'[Opt.Format] must be: HEX, hex, uint8, double, base64.');
end
% return;
% ******************************************************************************
function Engine = CoreHash(Data, Engine)
% This methods uses the slower TYPECAST of Matlab
% Consider the type and dimensions of the array to distinguish arrays with the
% same data, but different shape: [0 x 0] and [0 x 1], [1,2] and [1;2],
% DOUBLE(0) and SINGLE([0,0]):
% < v016: [class, size, data]. BUG! 0 and zeros(1,1,0) had the same hash!
% >= v016: [class, ndims, size, data]
Engine.update([uint8(class(Data)), ...
typecast(uint64([ndims(Data), size(Data)]), 'uint8')]);
if issparse(Data) % Sparse arrays to struct:
[S.Index1, S.Index2, S.Value] = find(Data);
Engine = CoreHash(S, Engine);
elseif isstruct(Data) % Hash for all array elements and fields:
F = sort(fieldnames(Data)); % Ignore order of fields
for iField = 1:length(F) % Loop over fields
aField = F{iField};
Engine.update(uint8(aField));
for iS = 1:numel(Data) % Loop over elements of struct array
Engine = CoreHash(Data(iS).(aField), Engine);
end
end
elseif iscell(Data) % Get hash for all cell elements:
for iS = 1:numel(Data)
Engine = CoreHash(Data{iS}, Engine);
end
elseif isempty(Data) % Nothing to do
elseif isnumeric(Data)
if isreal(Data)
Engine.update(typecast(Data(:), 'uint8'));
else
Engine.update(typecast(real(Data(:)), 'uint8'));
Engine.update(typecast(imag(Data(:)), 'uint8'));
end
elseif islogical(Data) % TYPECAST cannot handle LOGICAL
Engine.update(typecast(uint8(Data(:)), 'uint8'));
elseif ischar(Data) % TYPECAST cannot handle CHAR
Engine.update(typecast(uint16(Data(:)), 'uint8'));
elseif isa(Data, 'function_handle')
Engine = CoreHash(ConvertFuncHandle(Data), Engine);
elseif (isobject(Data) || isjava(Data)) && ismethod(Data, 'hashCode')
Engine = CoreHash(char(Data.hashCode), Engine);
else % Most likely a user-defined object:
try
BasicData = ConvertObject(Data);
catch ME
error(['JSimon:', mfilename, ':BadDataType'], ...
'%s: Cannot create elementary array for type: %s\n %s', ...
mfilename, class(Data), ME.message);
end
try
Engine = CoreHash(BasicData, Engine);
catch ME
if strcmpi(ME.identifier, 'MATLAB:recursionLimit')
ME = MException(['JSimon:', mfilename, ':RecursiveType'], ...
'%s: Cannot create hash for recursive data type: %s', ...
mfilename, class(Data));
end
throw(ME);
end
end
% return;
% ******************************************************************************
function FuncKey = ConvertFuncHandle(FuncH)
% The subfunction ConvertFuncHandle converts function_handles to a struct
% using the Matlab function FUNCTIONS. The output of this function changes
% with the Matlab version, such that DataHash(@sin) replies different hashes
% under Matlab 6.5 and 2009a.
% An alternative is using the function name and name of the file for
% function_handles, but this is not unique for nested or anonymous functions.
% If the MATLABROOT is removed from the file's path, at least the hash of
% Matlab's toolbox functions is (usually!) not influenced by the version.
% Finally I'm in doubt if there is a unique method to hash function handles.
% Please adjust the subfunction ConvertFuncHandles to your needs.
% The Matlab version influences the conversion by FUNCTIONS:
% 1. The format of the struct replied FUNCTIONS is not fixed,
% 2. The full paths of toolbox function e.g. for @mean differ.
FuncKey = functions(FuncH);
% Include modification file time and file size. Suggested by Aslak Grinsted:
if ~isempty(FuncKey.file)
d = dir(FuncKey.file);
if ~isempty(d)
FuncKey.filebytes = d.bytes;
FuncKey.filedate = d.datenum;
end
end
% ALTERNATIVE: Use name and path. The <matlabroot> part of the toolbox functions
% is replaced such that the hash for @mean does not depend on the Matlab
% version.
% Drawbacks: Anonymous functions, nested functions...
% funcStruct = functions(FuncH);
% funcfile = strrep(funcStruct.file, matlabroot, '<MATLAB>');
% FuncKey = uint8([funcStruct.function, ' ', funcfile]);
% Finally I'm afraid there is no unique method to get a hash for a function
% handle. Please adjust this conversion to your needs.
% return;
% ******************************************************************************
function DataBin = ConvertObject(DataObj)
% Convert a user-defined object to a binary stream. There cannot be a unique
% solution, so this part is left for the user...
try % Perhaps a direct conversion is implemented:
DataBin = uint8(DataObj);
% Matt Raum had this excellent idea - unfortunately this function is
% undocumented and might not be supported in te future:
% DataBin = getByteStreamFromArray(DataObj);
catch % Or perhaps this is better:
WarnS = warning('off', 'MATLAB:structOnObject');
DataBin = struct(DataObj);
warning(WarnS);
end
% return;
% ******************************************************************************
function Out = fBase64_enc(In)
% Encode numeric vector of UINT8 values to base64 string.
% The intention of this is to create a shorter hash than the HEX format.
% Therefore a padding with '=' characters is omitted on purpose.
Pool = [65:90, 97:122, 48:57, 43, 47]; % [0:9, a:z, A:Z, +, /]
v8 = [128; 64; 32; 16; 8; 4; 2; 1];
v6 = [32, 16, 8, 4, 2, 1];
In = reshape(In, 1, []);
X = rem(floor(In(ones(8, 1), :) ./ v8(:, ones(length(In), 1))), 2);
Y = reshape([X(:); zeros(6 - rem(numel(X), 6), 1)], 6, []);
Out = char(Pool(1 + v6 * Y));
% return;
% ******************************************************************************
function Ex = FileExist_L(FileName)
% A more reliable version of EXIST(FileName, 'file'):
dirFile = dir(FileName);
if length(dirFile) == 1
Ex = ~(dirFile.isdir);
else
Ex = false;
end
% return;
% ******************************************************************************
function R = Version_L()
% The output differs between versions of this function. So give the user a
% chance to recognize the version:
% 1: 01-May-2011, Initial version
% 2: 15-Feb-2015, The number of dimensions is considered in addition.
% In version 1 these variables had the same hash:
% zeros(1,1) and zeros(1,1,0), complex(0) and zeros(1,1,0,0)
% 3: 29-Jun-2015, Struct arrays are processed field by field and not element
% by element, because this is much faster. In consequence the hash value
% differs, if the input contains a struct.
% 4: 28-Feb-2016 15:20, same output as GetMD5 for MD5 sums. Therefore the
% dimensions are casted to UINT64 at first.
R.HashVersion = 4;
R.Date = [2016, 2, 28];
R.HashMethod = {};
try
Provider = java.security.Security.getProviders;
for iProvider = 1:numel(Provider)
S = char(Provider(iProvider).getServices);
Index = strfind(S, 'MessageDigest.');
for iDigest = 1:length(Index)
Digest = strtok(S(Index(iDigest):end));
Digest = strrep(Digest, 'MessageDigest.', '');
R.HashMethod = cat(2, R.HashMethod, {Digest});
end
end
catch ME
fprintf(2, '%s\n', ME.message);
R.HashMethod = 'error';
end
% return;
% ******************************************************************************
function Error_L(ID, varargin)
error(['JSimon:', mfilename, ':', ID], ['*** %s: ', varargin{1}], ...
mfilename, varargin{2:nargin - 1});
% return;
|
github
|
lcnbeapp/beapp-master
|
meshresample.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/iso2mesh/meshresample.m
| 1,665 |
utf_8
|
a84fc4c4bfcb7894e079f34b8a9c6f9d
|
function [node,elem]=meshresample(v,f,keepratio)
%
% [node,elem]=meshresample(v,f,keepratio)
%
% resample mesh using CGAL mesh simplification utility
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2007/11/12
%
% input:
% v: list of nodes
% f: list of surface elements (each row for each triangle)
% keepratio: decimation rate, a number less than 1, as the percentage
% of the elements after the sampling
%
% output:
% node: the node coordinates of the sampled surface mesh
% elem: the element list of the sampled surface mesh
%
% -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net)
%
[node,elem]=domeshsimplify(v,f,keepratio);
if(length(node)==0)
warning(['Your input mesh contains topological defects, and the ',...
'mesh resampling utility aborted during processing. Now iso2mesh ',...
'is trying to repair your mesh with meshcheckrepair. ',...
'You can also call this manually before passing your mesh to meshresample.'] );
[vnew,fnew]=meshcheckrepair(v,f);
[node,elem]=domeshsimplify(vnew,fnew,keepratio);
end
[node,I,J]=unique(node,'rows');
elem=J(elem);
saveoff(node,elem,mwpath('post_remesh.off'));
end
% function to perform the actual resampling
function [node,elem]=domeshsimplify(v,f,keepratio)
exesuff=getexeext;
exesuff=fallbackexeext(exesuff,'cgalsimp2');
saveoff(v,f,mwpath('pre_remesh.off'));
deletemeshfile(mwpath('post_remesh.off'));
system([' "' mcpath('cgalsimp2') exesuff '" "' mwpath('pre_remesh.off') '" ' num2str(keepratio) ' "' mwpath('post_remesh.off') '"']);
[node,elem]=readoff(mwpath('post_remesh.off'));
end
|
github
|
lcnbeapp/beapp-master
|
savejson.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/iso2mesh/savejson.m
| 14,306 |
utf_8
|
efe098b229009f38be24329e7d8af75d
|
function json=savejson(rootname,obj,varargin)
%
% json=savejson(rootname,obj,filename)
% or
% json=savejson(rootname,obj,opt)
% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript
% Object Notation) string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09
%
% $Id$
%
% input:
% rootname: name of the root-object, if set to '', will use variable name
% obj: a MATLAB object (array, cell, cell array, struct, struct array)
% filename: a string for the file name to save the output JSON data
% opt: a struct for additional options, use [] if all use default
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.FloatFormat ['%.10g'|string]: format to show each numeric element
% of a 1D/2D array;
% opt.ArrayIndent [1|0]: if 1, output explicit data array with
% precedent indentation; if 0, no indentation
% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [0|1]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.Inf ['"$1_Inf_"'|string]: a customized regular expression pattern
% to represent +/-Inf. The matched pattern is '([-+]*)Inf'
% and $1 represents the sign. For those who want to use
% 1e999 to represent Inf, they can set opt.Inf to '$11e999'
% opt.NaN ['"_NaN_"'|string]: a customized regular expression pattern
% to represent NaN
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSON='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt.
% output:
% json: a string in the JSON format (see http://json.org)
%
% examples:
% a=struct('node',[1 9 10; 2 1 1.2], 'elem',[9 1;1 2;2 3],...
% 'face',[9 01 2; 1 2 3; NaN,Inf,-Inf], 'author','FangQ');
% savejson('mesh',a)
% savejson('',a,'ArrayIndent',0,'FloatFormat','\t%.5g')
%
% license:
% BSD or GPL version 3, see LICENSE_{BSD,GPLv3}.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
json=obj2json(rootname,obj,rootlevel,opt);
if(rootisarray)
json=sprintf('%s\n',json);
else
json=sprintf('{\n%s\n}\n',json);
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=sprintf('%s(%s);\n',jsonp,json);
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
fid = fopen(opt.FileName, 'wt');
fwrite(fid,json,'char');
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2json(name,item,level,varargin)
if(iscell(item))
txt=cell2json(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2json(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2json(name,item,level,varargin{:});
else
txt=mat2json(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2json(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
len=numel(item); % let's handle 1D cell first
padding1=repmat(sprintf('\t'),1,level-1);
padding0=repmat(sprintf('\t'),1,level);
if(len>1)
if(~isempty(name))
txt=sprintf('%s"%s": [\n',padding0, checkname(name,varargin{:})); name='';
else
txt=sprintf('%s[\n',padding0);
end
elseif(len==0)
if(~isempty(name))
txt=sprintf('%s"%s": null',padding0, checkname(name,varargin{:})); name='';
else
txt=sprintf('%snull',padding0);
end
end
for i=1:len
txt=sprintf('%s%s%s',txt,padding1,obj2json(name,item{i},level+(len>1),varargin{:}));
if(i<len) txt=sprintf('%s%s',txt,sprintf(',\n')); end
end
if(len>1) txt=sprintf('%s\n%s]',txt,padding0); end
%%-------------------------------------------------------------------------
function txt=struct2json(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
len=numel(item);
padding1=repmat(sprintf('\t'),1,level-1);
padding0=repmat(sprintf('\t'),1,level);
sep=',';
if(~isempty(name))
if(len>1) txt=sprintf('%s"%s": [\n',padding0,checkname(name,varargin{:})); end
else
if(len>1) txt=sprintf('%s[\n',padding0); end
end
for e=1:len
names = fieldnames(item(e));
if(~isempty(name) && len==1)
txt=sprintf('%s%s"%s": {\n',txt,repmat(sprintf('\t'),1,level+(len>1)), checkname(name,varargin{:}));
else
txt=sprintf('%s%s{\n',txt,repmat(sprintf('\t'),1,level+(len>1)));
end
if(~isempty(names))
for i=1:length(names)
txt=sprintf('%s%s',txt,obj2json(names{i},getfield(item(e),...
names{i}),level+1+(len>1),varargin{:}));
if(i<length(names)) txt=sprintf('%s%s',txt,','); end
txt=sprintf('%s%s',txt,sprintf('\n'));
end
end
txt=sprintf('%s%s}',txt,repmat(sprintf('\t'),1,level+(len>1)));
if(e==len) sep=''; end
if(e<len) txt=sprintf('%s%s',txt,sprintf(',\n')); end
end
if(len>1) txt=sprintf('%s\n%s]',txt,padding0); end
%%-------------------------------------------------------------------------
function txt=str2json(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
sep=sprintf(',\n');
padding1=repmat(sprintf('\t'),1,level);
padding0=repmat(sprintf('\t'),1,level+1);
if(~isempty(name))
if(len>1) txt=sprintf('%s"%s": [\n',padding1,checkname(name,varargin{:})); end
else
if(len>1) txt=sprintf('%s[\n',padding1); end
end
isoct=jsonopt('IsOctave',0,varargin{:});
for e=1:len
if(isoct)
val=regexprep(item(e,:),'\\','\\');
val=regexprep(val,'"','\"');
val=regexprep(val,'^"','\"');
else
val=regexprep(item(e,:),'\\','\\\\');
val=regexprep(val,'"','\\"');
val=regexprep(val,'^"','\\"');
end
if(len==1)
obj=['"' checkname(name,varargin{:}) '": ' '"',val,'"'];
if(isempty(name)) obj=['"',val,'"']; end
txt=sprintf('%s%s%s%s',txt,repmat(sprintf('\t'),1,level),obj);
else
txt=sprintf('%s%s%s%s',txt,repmat(sprintf('\t'),1,level+1),['"',val,'"']);
end
if(e==len) sep=''; end
txt=sprintf('%s%s',txt,sep);
end
if(len>1) txt=sprintf('%s\n%s%s',txt,padding1,']'); end
%%-------------------------------------------------------------------------
function txt=mat2json(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
padding1=repmat(sprintf('\t'),1,level);
padding0=repmat(sprintf('\t'),1,level+1);
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))
if(isempty(name))
txt=sprintf('%s{\n%s"_ArrayType_": "%s",\n%s"_ArraySize_": %s,\n',...
padding1,padding0,class(item),padding0,regexprep(mat2str(size(item)),'\s+',',') );
else
txt=sprintf('%s"%s": {\n%s"_ArrayType_": "%s",\n%s"_ArraySize_": %s,\n',...
padding1,checkname(name,varargin{:}),padding0,class(item),padding0,regexprep(mat2str(size(item)),'\s+',',') );
end
else
if(isempty(name))
txt=sprintf('%s%s',padding1,matdata2json(item,level+1,varargin{:}));
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\[',''),']','');
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
else
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),matdata2json(item,level+1,varargin{:}));
end
end
return;
end
dataformat='%s%s%s%s%s';
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sprintf(',\n'));
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsSparse_": ','1', sprintf(',\n'));
if(size(item,1)==1)
% Row vector, store only column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([iy(:),data'],level+2,varargin{:}), sprintf('\n'));
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,data],level+2,varargin{:}), sprintf('\n'));
else
% General case, store row and column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,iy,data],level+2,varargin{:}), sprintf('\n'));
end
else
if(isreal(item))
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json(item(:)',level+2,varargin{:}), sprintf('\n'));
else
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sprintf(',\n'));
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), sprintf('\n'));
end
end
txt=sprintf('%s%s%s',txt,padding1,'}');
%%-------------------------------------------------------------------------
function txt=matdata2json(mat,level,varargin)
if(size(mat,1)==1)
pre='';
post='';
level=level-1;
else
pre=sprintf('[\n');
post=sprintf('\n%s]',repmat(sprintf('\t'),1,level-1));
end
if(isempty(mat))
txt='null';
return;
end
floatformat=jsonopt('FloatFormat','%.10g',varargin{:});
formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],\n')]];
if(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)
formatstr=[repmat(sprintf('\t'),1,level) formatstr];
end
txt=sprintf(formatstr,mat');
txt(end-1:end)=[];
if(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)
txt=regexprep(txt,'1','true');
txt=regexprep(txt,'0','false');
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],\n['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
txt=[pre txt post];
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos)) return; end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
|
github
|
lcnbeapp/beapp-master
|
loadjson.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/iso2mesh/loadjson.m
| 16,225 |
ibm852
|
f1745f9ac3fd57bd26f86b59f45cfb39
|
function data = loadjson(fname,varargin)
%
% data=loadjson(fname,opt)
% or
% data=loadjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2011/09/09
% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713
% date: 2009/11/02
% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393
% date: 2009/03/22
% Joel Feenstra:
% http://www.mathworks.com/matlabcentral/fileexchange/20565
% date: 2008/07/03
%
% $Id$
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a JSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs. The param string is equivallent
% to a field in opt.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% license:
% BSD or GPL version 3, see LICENSE_{BSD,GPLv3}.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
fid = fopen(fname,'rt');
string = fscanf(fid,'%c');
fclose(fid);
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
if(~isempty(data))
if(isstruct(data)) % data can be a struct array
data=jstruct2array(data);
elseif(iscell(data))
data=jcell2array(data);
end
end
%%
function newdata=parse_collection(id,data,obj)
if(jsoncount>0 && exist('data','var'))
if(~iscell(data))
newdata=cell(1);
newdata{1}=data;
data=newdata;
end
end
%%
function newdata=jcell2array(data)
len=length(data);
newdata=data;
for i=1:len
if(isstruct(data{i}))
newdata{i}=jstruct2array(data{i});
elseif(iscell(data{i}))
newdata{i}=jcell2array(data{i});
end
end
%%-------------------------------------------------------------------------
function newdata=jstruct2array(data)
fn=fieldnames(data);
newdata=data;
len=length(data);
for i=1:length(fn) % depth-first
for j=1:len
if(isstruct(getfield(data(j),fn{i})))
newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));
end
end
end
if(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))
newdata=cell(len,1);
for j=1:len
ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);
iscpx=0;
if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))
if(data(j).x0x5F_ArrayIsComplex_)
iscpx=1;
end
end
if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))
if(data(j).x0x5F_ArrayIsSparse_)
if(~isempty(strmatch('x0x5F_ArraySize_',fn)))
dim=data(j).x0x5F_ArraySize_;
if(iscpx && size(ndata,2)==4-any(dim==1))
ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));
end
if isempty(ndata)
% All-zeros sparse
ndata=sparse(dim(1),prod(dim(2:end)));
elseif dim(1)==1
% Sparse row vector
ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));
elseif dim(2)==1
% Sparse column vector
ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));
else
% Generic sparse array.
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));
end
else
if(iscpx && size(ndata,2)==4)
ndata(:,3)=complex(ndata(:,3),ndata(:,4));
end
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));
end
end
elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))
if(iscpx && size(ndata,2)==2)
ndata=complex(ndata(:,1),ndata(:,2));
end
ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);
end
newdata{j}=ndata;
end
if(len==1)
newdata=newdata{1};
end
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
if next_char ~= '}'
while 1
str = parseStr(varargin{:});
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
parse_char(':');
val = parse_value(varargin{:});
eval( sprintf( 'object.%s = val;', valid_field(str) ) );
if next_char == '}'
break;
end
parse_char(',');
end
end
parse_char('}');
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr isoct
parse_char('[');
object = cell(0, 1);
dim2=[];
if next_char ~= ']'
[endpos e1l e1r maxlevel]=matching_bracket(inStr,pos);
arraystr=['[' inStr(pos:endpos)];
arraystr=regexprep(arraystr,'"_NaN_"','NaN');
arraystr=regexprep(arraystr,'"([-+]*)_Inf_"','$1Inf');
arraystr(find(arraystr==sprintf('\n')))=[];
arraystr(find(arraystr==sprintf('\r')))=[];
%arraystr=regexprep(arraystr,'\s*,',','); % this is slow,sometimes needed
if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D
astr=inStr((e1l+1):(e1r-1));
astr=regexprep(astr,'"_NaN_"','NaN');
astr=regexprep(astr,'"([-+]*)_Inf_"','$1Inf');
astr(find(astr==sprintf('\n')))=[];
astr(find(astr==sprintf('\r')))=[];
astr(find(astr==' '))='';
if(isempty(find(astr=='[', 1))) % array is 2D
dim2=length(sscanf(astr,'%f,',[1 inf]));
end
else % array is 1D
astr=arraystr(2:end-1);
astr(find(astr==' '))='';
[obj count errmsg nextidx]=sscanf(astr,'%f,',[1,inf]);
if(nextidx>=length(astr)-1)
object=obj;
pos=endpos;
parse_char(']');
return;
end
end
if(~isempty(dim2))
astr=arraystr;
astr(find(astr=='['))='';
astr(find(astr==']'))='';
astr(find(astr==' '))='';
[obj count errmsg nextidx]=sscanf(astr,'%f,',inf);
if(nextidx>=length(astr)-1)
object=reshape(obj,dim2,numel(obj)/dim2)';
pos=endpos;
parse_char(']');
return;
end
end
arraystr=regexprep(arraystr,'\]\s*,','];');
try
if(isoct && regexp(arraystr,'"','once'))
error('Octave eval can produce empty cells for JSON-like input');
end
object=eval(arraystr);
pos=endpos;
catch
while 1
val = parse_value(varargin{:});
object{end+1} = val;
if next_char == ']'
break;
end
parse_char(',');
end
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ndims(object)==2)
object=object';
end
catch
end
end
parse_char(']');
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
skip_whitespace;
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
skip_whitespace;
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
skip_whitespace;
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function skip_whitespace
global pos inStr len
while pos <= len && isspace(inStr(pos))
pos = pos + 1;
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr len esc index_esc len_esc
% len, ns = length(inStr), keyboard
if inStr(pos) ~= '"'
error_pos('String starting with " expected at position %d');
else
pos = pos + 1;
end
str = '';
while pos <= len
while index_esc <= len_esc && esc(index_esc) < pos
index_esc = index_esc + 1;
end
if index_esc > len_esc
str = [str inStr(pos:len)];
pos = len + 1;
break;
else
str = [str inStr(pos:esc(index_esc)-1)];
pos = esc(index_esc);
end
nstr = length(str); switch inStr(pos)
case '"'
pos = pos + 1;
if(~isempty(str))
if(strcmp(str,'_Inf_'))
str=Inf;
elseif(strcmp(str,'-_Inf_'))
str=-Inf;
elseif(strcmp(str,'_NaN_'))
str=NaN;
end
end
return;
case '\'
if pos+1 > len
error_pos('End of file reached right after escape character');
end
pos = pos + 1;
switch inStr(pos)
case {'"' '\' '/'}
str(nstr+1) = inStr(pos);
pos = pos + 1;
case {'b' 'f' 'n' 'r' 't'}
str(nstr+1) = sprintf(['\' inStr(pos)]);
pos = pos + 1;
case 'u'
if pos+4 > len
error_pos('End of file reached in escaped unicode character');
end
str(nstr+(1:6)) = inStr(pos-1:pos+4);
pos = pos + 5;
end
otherwise % should never happen
str(nstr+1) = inStr(pos), keyboard
pos = pos + 1;
end
end
error_pos('End of file while expecting end of inStr');
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr len isoct
currstr=inStr(pos:end);
numstr=0;
if(isoct~=0)
numstr=regexp(currstr,'^\s*-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+\-]?\d+)?','end');
[num, one] = sscanf(currstr, '%f', 1);
delta=numstr+1;
else
[num, one, err, delta] = sscanf(currstr, '%f', 1);
if ~isempty(err)
error_pos('Error reading number at position %d');
end
end
pos = pos + delta-1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr len
true = 1; false = 0;
switch(inStr(pos))
case '"'
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
return;
case {'-','0','1','2','3','4','5','6','7','8','9'}
val = parse_number(varargin{:});
return;
case 't'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')
val = true;
pos = pos + 4;
return;
end
case 'f'
if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')
val = false;
pos = pos + 5;
return;
end
case 'n'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')
val = [];
pos = pos + 4;
return;
end
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos e1l e1r maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r)) e1r=bpos(pos); end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l)) e1l=bpos(pos); end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
lcnbeapp/beapp-master
|
surfaceclean.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/iso2mesh/surfaceclean.m
| 1,015 |
utf_8
|
7e36bba6243ce6e4c9645e4599663dfe
|
function f=surfaceclean(f,v)
%
% f=surfaceclean(f,v)
%
% remove surface patches that are located inside
% the bounding box faces
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2008/04/08
%
% input:
% v: surface node list, dimension (nn,3)
% f: surface face element list, dimension (be,3)
%
% output:
% f: faces free of those on the bounding box
%
% -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net)
%
pos=v;
mi=min(pos);
ma=max(pos);
idx0=find(abs(pos(:,1)-mi(1))<1e-6);
idx1=find(abs(pos(:,1)-ma(1))<1e-6);
idy0=find(abs(pos(:,2)-mi(2))<1e-6);
idy1=find(abs(pos(:,2)-ma(2))<1e-6);
idz0=find(abs(pos(:,3)-mi(3))<1e-6);
idz1=find(abs(pos(:,3)-ma(3))<1e-6);
f=removeedgefaces(f,v,idx0);
f=removeedgefaces(f,v,idx1);
f=removeedgefaces(f,v,idy0);
f=removeedgefaces(f,v,idy1);
f=removeedgefaces(f,v,idz0);
f=removeedgefaces(f,v,idz1);
function f=removeedgefaces(f,v,idx1)
mask=zeros(length(v),1);
mask(idx1)=1;
f(find(sum(mask(f)')==3),:)=[];
|
github
|
lcnbeapp/beapp-master
|
plotsurf.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/iso2mesh/plotsurf.m
| 3,890 |
utf_8
|
cb1cd7f508119c87ac12155d80e72f2d
|
function hm=plotsurf(node,face,varargin)
%
% hm=plotsurf(node,face,opt)
%
% plot 3D surface meshes
%
% author: Qianqian Fang <fangq at nmr.mgh.harvard.edu>
%
% input:
% node: node coordinates, dimension (nn,3); if node has a
% 4th column, it will be used to set the color at each node.
% face: triangular surface face list; if face has a 4th column,
% it will be used to separate the surface into
% sub-surfaces and display them in different colors;
% face can be a cell array, each element of the array represents
% a polyhedral facet of the mesh, if an element is an array with
% two array subelements, the first one is the node index, the
% second one is a scalar as the group id of the facet.
% opt: additional options for the plotting, see plotmesh
%
% output:
% hm: handle or handles (vector) to the plotted surfaces
%
% example:
%
% h=plotsurf(node,face);
% h=plotsurf(node,face,'facecolor','r');
%
% -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net)
%
rngstate = rand ('state');
if(nargin>=2)
randseed=hex2dec('623F9A9E'); % "U+623F U+9A9E"
if(isoctavemesh) randseed=randseed+3; end
if(~isempty(getvarfrom({'caller','base'},'ISO2MESH_RANDSEED')))
randseed=getvarfrom({'caller','base'},'ISO2MESH_RANDSEED');
end
rand('state',randseed);
if(iscell(face))
sc=sparse(10,3); % face colormap
sc(1:10,:)=rand(3,10)';
len=length(face);
newsurf=cell(1);
% reorganizing each labeled surface into a new cell
for i=1:len
fc=face{i};
if(iscell(fc) && length(fc)>=2)
if(fc{2}+1>10)
sc(fc{2}+1,:)=rand(1,3);
end
if(fc{2}+1>length(newsurf))
newsurf{fc{2}+1}={};
end
newsurf{fc{2}+1}{end+1}=fc{1};
else % unlabeled facet is tagged by 0
if(iscell(fc))
newsurf{1}{end+1}=cell2mat(fc);
else
newsurf{1}{end+1}=fc;
end
end
end
hold on;
h=[];
newlen=length(newsurf);
for i=1:newlen
if(isempty(newsurf{i})); continue; end
try
subface=cell2mat(newsurf{i}')';
if(size(subface,1)>1 && ndims(subface)==2)
subface=subface';
end
h=[h patch('Vertices',node,'Faces',subface,'facecolor',sc(i,:),varargin{:})];
catch
for j=1:length(newsurf{i})
h=[h patch('Vertices',node,'Faces',newsurf{i}{j},'facecolor',sc(i,:),varargin{:})];
end
end
end
else
if(size(face,2)==4)
tag=face(:,4);
types=unique(tag);
hold on;
h=[];
for i=1:length(types)
if(size(node,2)==3)
h=[h plotasurf(node,face(find(tag==types(i)),1:3),'facecolor',rand(3,1),varargin{:})];
else
h=[h plotasurf(node,face(find(tag==types(i)),1:3),varargin{:})];
end
end
else
h=plotasurf(node,face,varargin{:});
end
end
end
if(~isempty(h))
axis equal;
if(all(get(gca,'view')==[0 90]))
view(3);
end
end
if(~isempty(h) && nargout>=1)
hm=h;
end
rand ('state',rngstate);
%-------------------------------------------------------------------------
function hh=plotasurf(node,face,varargin)
isoct=isoctavemesh;
if(size(node,2)==4)
if(isoct && ~exist('trisurf','file'))
h=trimesh(face(:,1:3),node(:,1),node(:,2),node(:,3),node(:,4),'edgecolor','k',varargin{:});
else
h=trisurf(face(:,1:3),node(:,1),node(:,2),node(:,3),node(:,4),varargin{:});
end
else
if(isoct && ~exist('trisurf','file'))
h=trimesh(face(:,1:3),node(:,1),node(:,2),node(:,3),'edgecolor','k',varargin{:});
else
h=trisurf(face(:,1:3),node(:,1),node(:,2),node(:,3),varargin{:});
end
end
if(exist('h','var')) hh=h; end
|
github
|
lcnbeapp/beapp-master
|
readoff.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/iso2mesh/readoff.m
| 1,446 |
utf_8
|
aece4a72343e6bece72b82ced7e8fbe6
|
function [node,elem]=readoff(fname)
%
% [node,elem]=readoff(fname)
%
% read Geomview Object File Format (OFF)
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2008/03/28
%
% input:
% fname: name of the OFF data file
%
% output:
% node: node coordinates of the mesh
% elem: list of elements of the mesh
%
% -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net)
%
node=[];
elem=[];
fid=fopen(fname,'rt');
line=fgetl(fid);
dim=sscanf(line,'OFF %d %d %d');
line=nonemptyline(fid);
if(size(dim,1)~=3)
dim=sscanf(line,'%d',3);
line=nonemptyline(fid);
end
nodalcount=3;
if(~isempty(line))
[val nodalcount]=sscanf(line,'%f',inf);
else
fclose(fid);
return;
end
node=fscanf(fid,'%f',[nodalcount,dim(1)-1])';
node=[val(:)';node];
line=nonemptyline(fid);
facetcount=4;
if(~isempty(line))
[val facetcount]=sscanf(line,'%f',inf);
else
fclose(fid);
return;
end
elem=fscanf(fid,'%f',[facetcount,dim(2)-1])';
elem=[val(:)';elem];
fclose(fid);
elem(:,1)=[];
if(size(elem,2)<=3)
elem(:,1:3)=round(elem(:,1:3))+1;
else
elem(:,1:4)=round(elem(:,1:4))+1;
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function str=nonemptyline(fid)
str='';
if(fid==0) error('invalid file'); end
while((isempty(regexp(str,'\S')) || ~isempty(regexp(str,'^#'))) && ~feof(fid))
str=fgetl(fid);
if(~ischar(str))
str='';
return;
end
end
|
github
|
lcnbeapp/beapp-master
|
om_check_vol.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/openmeeg/om_check_vol.m
| 1,911 |
utf_8
|
2ad2a2270818ab0b2323ee1513c5a541
|
function status = om_check_vol(vol)
% OM_CHECK_VOL Check meshes of volume conductor for BEM modeling
% [STATUS] = OM_CHECK_VOL(VOL)
%
% returns 1 if there is a problem with geometry
% else returns 0
%
% Copyright (C) 2010, Alexandre Gramfort, INRIA
% $Id$
% $LastChangedBy: alegra $
% $LastChangedDate: 2010-09-06 13:58:49 +0200 (Mon, 06 Sep 2010) $
% $Revision$
openmeeg_license
om_checkombin;
% the first compartment should be the skin, the last the source
% flip the order of the compartments if necessary
if vol.skin==length(vol.bnd) && vol.source==1
vol.bnd = fliplr(vol.bnd(:)');
vol.skin = 1;
vol.source = length(vol.bnd);
end
assert(vol.skin == 1)
assert(vol.source == length(vol.bnd))
% Flip faces for openmeeg convention
for ii=1:length(vol.bnd)
vol.bnd(ii).tri = fliplr(vol.bnd(ii).tri);
end
try
% store the current path and change folder to the temporary one
tmpfolder = cd;
cd(tempdir)
% write the triangulations to file
bndfile = {};
for ii=1:length(vol.bnd)
[junk,tname] = fileparts(tempname);
bndfile{ii} = [tname '.tri'];
om_save_tri(bndfile{ii}, vol.bnd(ii).pos, vol.bnd(ii).tri);
end
% these will hold the shell script and the inverted system matrix
[junk,tname] = fileparts(tempname);
if ~ispc
exefile = [tname '.sh'];
else
exefile = [tname '.bat'];
end
[junk,tname] = fileparts(tempname);
geomfile = [tname '.geom'];
% write conductivity and geometry files
om_write_geom(geomfile,bndfile);
% Exe file
status = system(['om_check_geom -g ',geomfile]);
cleaner(vol,bndfile,geomfile)
cd(tmpfolder)
catch
cleaner(vol,bndfile,geomfile)
cd(tmpfolder)
rethrow(lasterror)
end
function cleaner(vol,bndfile,geomfile)
% delete the temporary files
for i=1:length(vol.bnd)
delete(bndfile{i})
end
delete(geomfile);
return
|
github
|
lcnbeapp/beapp-master
|
testOpenMEEGeeg.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/openmeeg/testOpenMEEGeeg.m
| 3,442 |
utf_8
|
da3bbd9c98535f6d189d29d80e72d0f0
|
function testOpenMEEGeeg
% TEST testOpenMEEGeeg
% Test the computation of an EEG leadfield with OpenMEEG
addpath(cd) % Make sure current folder is in the path
%% Set the position of the probe dipole
pos = [0 0 70];
%% Set the radius and conductivities of each of the compartments
% 4 Layers
r = [85 88 92 100];
c = [1 1/20 1/80 1];
[rdms,mags] = run_bem_computation(r,c,pos);
% the following would require the installation of xunit toolbox
% assertElementsAlmostEqual(rdms, [0.019963 0.019962 0.10754], 'absolute', 1e-3)
% assertElementsAlmostEqual(mags, [0.84467 0.84469 0.83887], 'absolute', 1e-3)
%use instead
thr = 2e-2;
assert(norm(rdms-[0.019963 0.019962 0.10754])<thr)
assert(norm(mags-[0.84467 0.84469 0.83887])<thr)
% 3 Layers
r = [88 92 100];
c = [1 1/80 1];
[rdms,mags] = run_bem_computation(r,c,pos);
% assertElementsAlmostEqual(rdms, [0.064093 0.064092 0.13532], 'absolute', 1e-3)
% assertElementsAlmostEqual(mags, [1.0498 1.0498 1.0207], 'absolute', 1e-3)
assert(norm(rdms-[0.064093 0.064092 0.13532])<thr)
assert(norm(mags-[1.0498 1.0498 1.0207])<thr)
% 2 Layers
r = [92 100];
c = [1 1/4];
[rdms,mags] = run_bem_computation(r,c,pos);
% assertElementsAlmostEqual(rdms, [0.15514 0.15514 0.1212], 'absolute', 1e-3)
% assertElementsAlmostEqual(mags, [1.8211 1.8211 1.3606], 'absolute', 1e-3)
assert(norm(rdms-[0.15514 0.15514 0.1212])<thr)
assert(norm(mags-[1.8211 1.8211 1.3606])<thr)
% 1 Layers
r = [100];
c = [1];
[rdms,mags] = run_bem_computation(r,c,pos);
% assertElementsAlmostEqual(rdms, [0.18934 0.18931 0.0778], 'absolute', 1e-3)
% assertElementsAlmostEqual(mags, [1.3584 1.3583 1.2138], 'absolute', 1e-3)
assert(norm(rdms-[0.18934 0.18931 0.0778])<thr)
assert(norm(mags-[1.3584 1.3583 1.2138])<thr)
function [rdms,mags] = run_bem_computation(r,c,pos)
%% Description of the spherical mesh
[pos, tri] = icosahedron42;
% [pos, tri] = icosahedron162;
% [pos, tri] = icosahedron642;
%% Create a set of electrodes on the outer surface
sens.elecpos = max(r) * pos;
sens.label = {};
nsens = size(sens.elecpos,1);
for ii=1:nsens
sens.label{ii} = sprintf('vertex%03d', ii);
end
%% Create a triangulated mesh, the first boundary is inside
vol = [];
for ii=1:length(r)
vol.bnd(ii).pos = pos * r(ii);
vol.bnd(ii).tri = tri;
end
%% Compute the BEM model
cfg.conductivity = c;
cfg.method = 'openmeeg';
vol1 = ft_prepare_bemmodel(cfg, vol);
vol2 = ft_prepare_headmodel(cfg, vol);
vol_bem = vol1;
cfg.vol = vol_bem;
cfg.grid.pos = pos;
cfg.elec = sens;
grid = ft_prepare_leadfield(cfg);
lf_openmeeg = grid.leadfield{1};
% Rq : ft_compute_leadfield centers the forward fields by default
% (average reference)
% lf_openmeeg = lf_openmeeg - repmat(mean(lf_openmeeg),size(lf_openmeeg,1),1);
%% Compute the analytic leadfield
vol_sphere = [];
vol_sphere.r = r;
vol_sphere.cond = c;
lf_sphere = ft_compute_leadfield(pos, sens, vol_sphere);
%% Evaluate the quality of the result using RDM and MAG
rdms = zeros(1,size(lf_openmeeg,2));
for ii=1:size(lf_openmeeg,2)
rdms(ii) = norm(lf_openmeeg(:,ii)/norm(lf_openmeeg(:,ii)) - lf_sphere(:,ii) / norm(lf_sphere(:,ii)));
end
mags = sqrt(sum(lf_openmeeg.^2))./sqrt(sum(lf_sphere.^2));
disp(['RDMs: ',num2str(rdms)]);
disp(['MAGs: ',num2str(mags)]);
end % function
end
|
github
|
lcnbeapp/beapp-master
|
openmeeg.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/openmeeg/openmeeg.m
| 4,515 |
utf_8
|
0fd160989b8ea14353b3fae3fe6b2d9b
|
function [vol] = openmeeg(vol, isolated)
% OPENMEEG computes a symmetric BEM system matrix
%
% Use as
% [vol] = openmeeg(vol, isolated)
%
% Attention: the normals of the mesh describing the volume conductor are by
% FieldTrip convention pointing outwards (with respect to the mesh center),
% whereas OpenMEEG binaries expect them to be poiting inwards.
% Copyright (C) 2009, Alexandre Gramfort
% INRIA Odyssee Project Team
% $Id$
warning('OPENMEEG is deprecated, please use FT_PREPARE_HEADMODEL with cfg.method = ''bem_openmeeg'' instead.')
openmeeg_license
om_checkombin;
sprintf('%s','Calculating BEM model...please wait');
skin = find_outermost_boundary(vol.bnd);
source = find_innermost_boundary(vol.bnd);
% the first compartment should be the skin, the last the source
% flip the order of the compartments if necessary
if skin==length(vol.bnd) && source==1
% flip the order of the compartments
vol.bnd = fliplr(vol.bnd(:)');
vol.cond = fliplr(vol.cond(:)');
vol.skin_surface = 1;
vol.source = length(vol.bnd);
elseif skin==1 && source==length(vol.bnd)
vol.skin_surface = 1;
vol.source = length(vol.bnd);
else
error('the first compartment should be the skin, the last the source');
end
% store the current path and change folder to the temporary one
tmpfolder = cd;
try
cd(tempdir)
% initialize OM boundaries
bndfile = {};
bndom = vol.bnd;
% check if normals are outward oriented (as they should be)
ok = checknormals(bndom);
% Flip faces for openmeeg convention (inwards normals)
if ~ok
for ii=1:length(bndom)
bndom(ii).tri = fliplr(bndom(ii).tri);
end
end
% write triangulation files on disk
for ii=1:length(vol.bnd)
[junk,tname] = fileparts(tempname);
bndfile{ii} = [tname '.tri'];
om_save_tri(bndfile{ii}, bndom(ii).pos, bndom(ii).tri);
end
% these will hold the shell script and the inverted system matrix
[junk,tname] = fileparts(tempname);
if ~ispc
exefile = [tname '.sh'];
else
exefile = [tname '.bat'];
end
[junk,tname] = fileparts(tempname);
condfile = [tname '.cond'];
[junk,tname] = fileparts(tempname);
geomfile = [tname '.geom'];
[junk,tname] = fileparts(tempname);
hmfile = [tname '.bin'];
[junk,tname] = fileparts(tempname);
hminvfile = [tname '.bin'];
% write conductivity and geometry files
om_write_geom(geomfile,bndfile);
om_write_cond(condfile,vol.cond);
% Exe file
efid = fopen(exefile, 'w');
omp_num_threads = feature('numCores');
if ~ispc
fprintf(efid,'#!/usr/bin/env bash\n');
fprintf(efid,['export OMP_NUM_THREADS=',num2str(omp_num_threads),'\n']);
fprintf(efid,['om_assemble -HM ./' geomfile ' ./' condfile ' ./' hmfile ' 2>&1 > /dev/null\n']);
fprintf(efid,['om_minverser ./' hmfile ' ./' hminvfile ' 2>&1 > /dev/null\n']);
else
fprintf(efid,['om_assemble -HM ./' geomfile ' ./' condfile ' ./' hmfile '\n']);
fprintf(efid,['om_minverser ./' hmfile ' ./' hminvfile '\n']);
end
fclose(efid);
if ~ispc
dos(sprintf('chmod +x %s', exefile));
end
catch
cd(tmpfolder)
rethrow(lasterror)
end
try
% execute OpenMEEG and read the resulting file
if ispc
dos([exefile]);
elseif ismac
dos(['./' exefile]);
else % assumes linux by default
version = om_getgccversion;
if version > 3
dos(['./' exefile]);
else
error('non suitable GCC compiler version (must be superior to gcc3)');
end
end
vol.mat = om_load_sym(hminvfile,'binary');
cleaner(vol,bndfile,condfile,geomfile,hmfile,hminvfile,exefile)
cd(tmpfolder)
catch
warning('an error ocurred while running OpenMEEG');
disp(lasterr);
cleaner(vol,bndfile,condfile,geomfile,hmfile,hminvfile,exefile)
cd(tmpfolder)
end
function cleaner(vol,bndfile,condfile,geomfile,hmfile,hminvfile,exefile)
% delete the temporary files
for ii=1:length(vol.bnd)
delete(bndfile{ii})
end
delete(condfile);
delete(geomfile);
delete(hmfile);
delete(hminvfile);
delete(exefile);
return
function ok = checknormals(bnd)
ok = 0;
pos = bnd.pos;
tri = bnd.tri;
% translate to the center
org = mean(pos,1);
pos(:,1) = pos(:,1) - org(1);
pos(:,2) = pos(:,2) - org(2);
pos(:,3) = pos(:,3) - org(3);
w = sum(solid_angle(pos, tri));
if w<0 && (abs(w)-4*pi)<1000*eps
% FIXME: this method is rigorous only for star shaped surfaces
warning('your normals are not oriented correctly')
ok = 0;
elseif w>0 && abs(w-4*pi)<1000*eps
ok = 1;
else
error('your surface probably is irregular')
ok = 0;
end
|
github
|
lcnbeapp/beapp-master
|
openmeeg_dsm.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/openmeeg/openmeeg_dsm.m
| 4,055 |
utf_8
|
b5927ebd6c389bc30f4183cb25c4499f
|
function [dsm] = openmeeg_dsm(pos, vol, flag)
% OPENMEEG_DSM computes the OpenMEEG DSM matrix
% i.e. Right hand side in the potential equation
%
% Use as
% [dsm] = openmeeg_dsm(po, vol, flag)
%
% flag = 1 non adaptive algorithm: does not try to approximate the
% potential in the neighborhodd of the leads, by locally refining the BEM surface
% Copyright (C) 2009, Alexandre Gramfort
% INRIA Odyssee Project Team
% store the current path and change folder to the temporary one
tmpfolder = cd;
om_checkombin;
bndom = vol.bnd;
try
cd(tempdir)
% write the triangulations to file
bndfile = {};
for i=1:length(vol.bnd)
[junk,tname] = fileparts(tempname);
bndfile{i} = [tname '.tri'];
ok = checknormals(bndom(i));
if ~ok
bndom(i).tri = fliplr(bndom(i).tri);
end
om_save_tri(bndfile{i}, bndom(i).pos, bndom(i).tri);
end
% these will hold the shell script and the inverted system matrix
[junk,tname] = fileparts(tempname);
if ~ispc
exefile = [tname '.sh'];
else
exefile = [tname '.bat'];
end
[junk,tname] = fileparts(tempname);
condfile = [tname '.cond'];
[junk,tname] = fileparts(tempname);
geomfile = [tname '.geom'];
[junk,tname] = fileparts(tempname);
dipfile = [tname '.dip'];
[junk,tname] = fileparts(tempname);
dsmfile = [tname '.bin'];
% write conductivity and geometry files
om_write_geom(geomfile,bndfile);
om_write_cond(condfile,vol.cond);
% handle dipole file
ndip = size(pos,1);
pos = [kron(pos,ones(3,1)) , kron(ones(ndip,1),eye(3))]; % save pos with each 3D orientation
om_save_full(pos,dipfile,'ascii');
% Exe file
efid = fopen(exefile, 'w');
omp_num_threads = feature('numCores');
if flag
str = ' -DSMNA';
else
str = ' -DSM';
end
if ~ispc
fprintf(efid,'#!/usr/bin/env bash\n');
fprintf(efid,['export OMP_NUM_THREADS=',num2str(omp_num_threads),'\n']);
% the following implements Galerkin method and switch can be -DSM or -DSMNA
% (non adaptive), see OMtrunk/src/assembleSourceMat.cpp, operators.cpp
fprintf(efid,['om_assemble' str ' ./',geomfile,' ./',condfile,' ./',dipfile,' ./',dsmfile,' 2>&1 > /dev/null\n']);
else
fprintf(efid,['om_assemble' str ' ./',geomfile,' ./',condfile,' ./',dipfile,' ./',dsmfile,'\n']);
end
fclose(efid);
if ~ispc
dos(sprintf('chmod +x %s', exefile));
end
catch
cd(tmpfolder)
rethrow(lasterror)
end
try
% execute OpenMEEG and read the resulting file
disp(['Assembling OpenMEEG DSM matrix']);
stopwatch = tic;
if ispc
dos([exefile]);
else
dos(['./' exefile]);
end
dsm = om_load_full(dsmfile,'binary');
toc(stopwatch);
cleaner(vol,bndfile,condfile,geomfile,exefile,dipfile,dsmfile)
cd(tmpfolder)
catch
warning('an error ocurred while running OpenMEEG');
disp(lasterr);
cleaner(vol,bndfile,condfile,geomfile,exefile,dipfile,dsmfile)
cd(tmpfolder)
end
function cleaner(vol,bndfile,condfile,geomfile,exefile,dipfile,dsmfile)
% delete the temporary files
for i=1:length(vol.bnd)
if exist(bndfile{i},'file'),delete(bndfile{i}),end
end
if exist(condfile,'file'),delete(condfile);end
if exist(geomfile,'file'),delete(geomfile);end
if exist(exefile,'file'),delete(exefile);end
if exist(dipfile,'file'),delete(dipfile);end
if exist(dsmfile,'file'),delete(dsmfile);end
function ok = checknormals(bnd)
% FIXME: this method is rigorous only for star shaped surfaces
ok = 0;
pos = bnd.pos;
tri = bnd.tri;
% translate to the center
org = mean(pos,1);
pos(:,1) = pos(:,1) - org(1);
pos(:,2) = pos(:,2) - org(2);
pos(:,3) = pos(:,3) - org(3);
w = sum(solid_angle(pos, tri));
if w<0 && (abs(w)-4*pi)<1000*eps
ok = 0;
% warning('your normals are outwards oriented\n')
elseif w>0 && (abs(w)-4*pi)<1000*eps
ok = 1;
% warning('your normals are inwards oriented')
else
error('your surface probably is irregular\n')
ok = 0;
end
|
github
|
lcnbeapp/beapp-master
|
openmeeg_megm.m
|
.m
|
beapp-master/Packages/eeglab14_1_2b/plugins/fieldtrip-20160917/external/openmeeg/openmeeg_megm.m
| 3,786 |
utf_8
|
65f11e114798758fdc4c4b43287f55c1
|
function [h2mm, s2mm] = openmeeg_megm(pos, vol, sens)
% OPENMEEG_MEGM computes the OpenMEEG H2MM and S2MM matrices
% i.e. the contribution to MEG from the potential and from the source
%
% Use as
% [h2mm,s2mm] = openmeeg_megm(vol, isolated)
% Copyright (C) 2010, Emmanuel Olivi
% INRIA Athena Project Team
% $Id$
% $LastChangedBy: alegra $
% $LastChangedDate: 2010-06-24 14:49:11 +0200 (Thu, 24 Jun 2010) $
% $Revision$
% store the current path and change folder to the temporary one
tmpfolder = cd;
om_checkombin;
try
cd(tempdir)
% write the triangulations to file
bndfile = {};
for i=1:length(vol.bnd)
[junk,tname] = fileparts(tempname);
bndfile{i} = [tname '.tri'];
om_save_tri(bndfile{i}, vol.bnd(i).pos, vol.bnd(i).tri);
end
% these will hold the shell script and the inverted system matrix
[junk,tname] = fileparts(tempname);
if ~ispc
exefile = [tname '.sh'];
else
exefile = [tname '.bat'];
end
[junk,tname] = fileparts(tempname);
condfile = [tname '.cond'];
[junk,tname] = fileparts(tempname);
geomfile = [tname '.geom'];
[junk,tname] = fileparts(tempname);
dipfile = [tname '.dip'];
[junk,tname] = fileparts(tempname);
sqdfile = [tname '.bin'];
[junk,tname] = fileparts(tempname);
h2mmfile = [tname '.bin'];
[junk,tname] = fileparts(tempname);
s2mmfile = [tname '.bin'];
% write conductivity and geometry files
om_write_geom(geomfile,bndfile);
om_write_cond(condfile,vol.cond);
% handle dipole file
ndip = size(pos,1);
pos = [kron(pos,ones(3,1)),kron(ones(ndip,1),eye(3))]; % save pos with each 3D orientation
om_save_full(pos,dipfile,'ascii');
% handle squids file
om_save_full([sens.coilpos,sens.coilori],sqdfile,'ascii');
% Exe file
efid = fopen(exefile, 'w');
omp_num_threads = feature('numCores');
if ~ispc
fprintf(efid,'#!/usr/bin/env bash\n');
fprintf(efid,['export OMP_NUM_THREADS=',num2str(omp_num_threads),'\n']);
fprintf(efid,['om_assemble -DS2MM ./',dipfile,' ./',sqdfile,' ./',s2mmfile,' 2>&1 > /dev/null\n']);
fprintf(efid,['om_assemble -H2MM ./',geomfile,' ./',condfile,' ./',sqdfile,' ./', h2mmfile,' 2>&1 > /dev/null\n']);
else
fprintf(efid,['om_assemble -DS2MM ./',dipfile,' ./',sqdfile,' ./',s2mmfile,'\n']);
fprintf(efid,['om_assemble -H2MM ./',geomfile,' ./',condfile,' ./',sqdfile,' ./', h2mmfile,'\n']);
end
fclose(efid);
if ~ispc
dos(sprintf('chmod +x %s', exefile));
end
catch
cd(tmpfolder)
rethrow(lasterror)
end
try
% execute OpenMEEG and read the resulting file
disp(['Assembling OpenMEEG H2MM and S2MM matrices']);
stopwatch = tic;
if ispc
dos([exefile]);
else
dos(['./' exefile]);
end
h2mm = om_load_full(h2mmfile,'binary');
s2mm = om_load_full(s2mmfile,'binary');
toc(stopwatch);
cleaner(vol,bndfile,condfile,geomfile,exefile,dipfile,h2mmfile,s2mmfile,sqdfile)
cd(tmpfolder)
catch
warning('an error ocurred while running OpenMEEG');
disp(lasterr);
cleaner(vol,bndfile,condfile,geomfile,exefile,dipfile,h2mmfile,s2mmfile,sqdfile)
cd(tmpfolder)
end
function cleaner(vol,bndfile,condfile,geomfile,exefile,dipfile,h2mmfile,s2mmfile,sqdfile)
% delete the temporary files
for i=1:length(vol.bnd)
if exist(bndfile{i},'file'),delete(bndfile{i}),end
end
if exist(condfile,'file'),delete(condfile);end
if exist(geomfile,'file'),delete(geomfile);end
if exist(exefile,'file'),delete(exefile);end
if exist(dipfile,'file'),delete(dipfile);end
if exist(h2mmfile,'file'),delete(h2mmfile);end
if exist(s2mmfile,'file'),delete(s2mmfile);end
if exist(sqdfile,'file'),delete(sqdfile);end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.